]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/thunderbolt.c
1a7bc0bf459806bb1349be8a63ea00153fe7095a
[linux.git] / drivers / net / thunderbolt.c
1 /*
2  * Networking over Thunderbolt cable using Apple ThunderboltIP protocol
3  *
4  * Copyright (C) 2017, Intel Corporation
5  * Authors: Amir Levy <amir.jer.levy@intel.com>
6  *          Michael Jamet <michael.jamet@intel.com>
7  *          Mika Westerberg <mika.westerberg@linux.intel.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13
14 #include <linux/atomic.h>
15 #include <linux/highmem.h>
16 #include <linux/if_vlan.h>
17 #include <linux/jhash.h>
18 #include <linux/module.h>
19 #include <linux/etherdevice.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/sizes.h>
22 #include <linux/thunderbolt.h>
23 #include <linux/uuid.h>
24 #include <linux/workqueue.h>
25
26 #include <net/ip6_checksum.h>
27
28 /* Protocol timeouts in ms */
29 #define TBNET_LOGIN_DELAY       4500
30 #define TBNET_LOGIN_TIMEOUT     500
31 #define TBNET_LOGOUT_TIMEOUT    100
32
33 #define TBNET_RING_SIZE         256
34 #define TBNET_LOCAL_PATH        0xf
35 #define TBNET_LOGIN_RETRIES     60
36 #define TBNET_LOGOUT_RETRIES    5
37 #define TBNET_MATCH_FRAGS_ID    BIT(1)
38 #define TBNET_MAX_MTU           SZ_64K
39 #define TBNET_FRAME_SIZE        SZ_4K
40 #define TBNET_MAX_PAYLOAD_SIZE  \
41         (TBNET_FRAME_SIZE - sizeof(struct thunderbolt_ip_frame_header))
42 /* Rx packets need to hold space for skb_shared_info */
43 #define TBNET_RX_MAX_SIZE       \
44         (TBNET_FRAME_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
45 #define TBNET_RX_PAGE_ORDER     get_order(TBNET_RX_MAX_SIZE)
46 #define TBNET_RX_PAGE_SIZE      (PAGE_SIZE << TBNET_RX_PAGE_ORDER)
47
48 #define TBNET_L0_PORT_NUM(route) ((route) & GENMASK(5, 0))
49
50 /**
51  * struct thunderbolt_ip_frame_header - Header for each Thunderbolt frame
52  * @frame_size: size of the data with the frame
53  * @frame_index: running index on the frames
54  * @frame_id: ID of the frame to match frames to specific packet
55  * @frame_count: how many frames assembles a full packet
56  *
57  * Each data frame passed to the high-speed DMA ring has this header. If
58  * the XDomain network directory announces that %TBNET_MATCH_FRAGS_ID is
59  * supported then @frame_id is filled, otherwise it stays %0.
60  */
61 struct thunderbolt_ip_frame_header {
62         u32 frame_size;
63         u16 frame_index;
64         u16 frame_id;
65         u32 frame_count;
66 };
67
68 enum thunderbolt_ip_frame_pdf {
69         TBIP_PDF_FRAME_START = 1,
70         TBIP_PDF_FRAME_END,
71 };
72
73 enum thunderbolt_ip_type {
74         TBIP_LOGIN,
75         TBIP_LOGIN_RESPONSE,
76         TBIP_LOGOUT,
77         TBIP_STATUS,
78 };
79
80 struct thunderbolt_ip_header {
81         u32 route_hi;
82         u32 route_lo;
83         u32 length_sn;
84         uuid_t uuid;
85         uuid_t initiator_uuid;
86         uuid_t target_uuid;
87         u32 type;
88         u32 command_id;
89 };
90
91 #define TBIP_HDR_LENGTH_MASK            GENMASK(5, 0)
92 #define TBIP_HDR_SN_MASK                GENMASK(28, 27)
93 #define TBIP_HDR_SN_SHIFT               27
94
95 struct thunderbolt_ip_login {
96         struct thunderbolt_ip_header hdr;
97         u32 proto_version;
98         u32 transmit_path;
99         u32 reserved[4];
100 };
101
102 #define TBIP_LOGIN_PROTO_VERSION        1
103
104 struct thunderbolt_ip_login_response {
105         struct thunderbolt_ip_header hdr;
106         u32 status;
107         u32 receiver_mac[2];
108         u32 receiver_mac_len;
109         u32 reserved[4];
110 };
111
112 struct thunderbolt_ip_logout {
113         struct thunderbolt_ip_header hdr;
114 };
115
116 struct thunderbolt_ip_status {
117         struct thunderbolt_ip_header hdr;
118         u32 status;
119 };
120
121 struct tbnet_stats {
122         u64 tx_packets;
123         u64 rx_packets;
124         u64 tx_bytes;
125         u64 rx_bytes;
126         u64 rx_errors;
127         u64 tx_errors;
128         u64 rx_length_errors;
129         u64 rx_over_errors;
130         u64 rx_crc_errors;
131         u64 rx_missed_errors;
132 };
133
134 struct tbnet_frame {
135         struct net_device *dev;
136         struct page *page;
137         struct ring_frame frame;
138 };
139
140 struct tbnet_ring {
141         struct tbnet_frame frames[TBNET_RING_SIZE];
142         unsigned int cons;
143         unsigned int prod;
144         struct tb_ring *ring;
145 };
146
147 /**
148  * struct tbnet - ThunderboltIP network driver private data
149  * @svc: XDomain service the driver is bound to
150  * @xd: XDomain the service blongs to
151  * @handler: ThunderboltIP configuration protocol handler
152  * @dev: Networking device
153  * @napi: NAPI structure for Rx polling
154  * @stats: Network statistics
155  * @skb: Network packet that is currently processed on Rx path
156  * @command_id: ID used for next configuration protocol packet
157  * @login_sent: ThunderboltIP login message successfully sent
158  * @login_received: ThunderboltIP login message received from the remote
159  *                  host
160  * @transmit_path: HopID the other end needs to use building the
161  *                 opposite side path.
162  * @connection_lock: Lock serializing access to @login_sent,
163  *                   @login_received and @transmit_path.
164  * @login_retries: Number of login retries currently done
165  * @login_work: Worker to send ThunderboltIP login packets
166  * @connected_work: Worker that finalizes the ThunderboltIP connection
167  *                  setup and enables DMA paths for high speed data
168  *                  transfers
169  * @rx_hdr: Copy of the currently processed Rx frame. Used when a
170  *          network packet consists of multiple Thunderbolt frames.
171  *          In host byte order.
172  * @rx_ring: Software ring holding Rx frames
173  * @frame_id: Frame ID use for next Tx packet
174  *            (if %TBNET_MATCH_FRAGS_ID is supported in both ends)
175  * @tx_ring: Software ring holding Tx frames
176  */
177 struct tbnet {
178         const struct tb_service *svc;
179         struct tb_xdomain *xd;
180         struct tb_protocol_handler handler;
181         struct net_device *dev;
182         struct napi_struct napi;
183         struct tbnet_stats stats;
184         struct sk_buff *skb;
185         atomic_t command_id;
186         bool login_sent;
187         bool login_received;
188         u32 transmit_path;
189         struct mutex connection_lock;
190         int login_retries;
191         struct delayed_work login_work;
192         struct work_struct connected_work;
193         struct thunderbolt_ip_frame_header rx_hdr;
194         struct tbnet_ring rx_ring;
195         atomic_t frame_id;
196         struct tbnet_ring tx_ring;
197 };
198
199 /* Network property directory UUID: c66189ca-1cce-4195-bdb8-49592e5f5a4f */
200 static const uuid_t tbnet_dir_uuid =
201         UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
202                   0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
203
204 /* ThunderboltIP protocol UUID: 798f589e-3616-8a47-97c6-5664a920c8dd */
205 static const uuid_t tbnet_svc_uuid =
206         UUID_INIT(0x798f589e, 0x3616, 0x8a47,
207                   0x97, 0xc6, 0x56, 0x64, 0xa9, 0x20, 0xc8, 0xdd);
208
209 static struct tb_property_dir *tbnet_dir;
210
211 static void tbnet_fill_header(struct thunderbolt_ip_header *hdr, u64 route,
212         u8 sequence, const uuid_t *initiator_uuid, const uuid_t *target_uuid,
213         enum thunderbolt_ip_type type, size_t size, u32 command_id)
214 {
215         u32 length_sn;
216
217         /* Length does not include route_hi/lo and length_sn fields */
218         length_sn = (size - 3 * 4) / 4;
219         length_sn |= (sequence << TBIP_HDR_SN_SHIFT) & TBIP_HDR_SN_MASK;
220
221         hdr->route_hi = upper_32_bits(route);
222         hdr->route_lo = lower_32_bits(route);
223         hdr->length_sn = length_sn;
224         uuid_copy(&hdr->uuid, &tbnet_svc_uuid);
225         uuid_copy(&hdr->initiator_uuid, initiator_uuid);
226         uuid_copy(&hdr->target_uuid, target_uuid);
227         hdr->type = type;
228         hdr->command_id = command_id;
229 }
230
231 static int tbnet_login_response(struct tbnet *net, u64 route, u8 sequence,
232                                 u32 command_id)
233 {
234         struct thunderbolt_ip_login_response reply;
235         struct tb_xdomain *xd = net->xd;
236
237         memset(&reply, 0, sizeof(reply));
238         tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid,
239                           xd->remote_uuid, TBIP_LOGIN_RESPONSE, sizeof(reply),
240                           command_id);
241         memcpy(reply.receiver_mac, net->dev->dev_addr, ETH_ALEN);
242         reply.receiver_mac_len = ETH_ALEN;
243
244         return tb_xdomain_response(xd, &reply, sizeof(reply),
245                                    TB_CFG_PKG_XDOMAIN_RESP);
246 }
247
248 static int tbnet_login_request(struct tbnet *net, u8 sequence)
249 {
250         struct thunderbolt_ip_login_response reply;
251         struct thunderbolt_ip_login request;
252         struct tb_xdomain *xd = net->xd;
253
254         memset(&request, 0, sizeof(request));
255         tbnet_fill_header(&request.hdr, xd->route, sequence, xd->local_uuid,
256                           xd->remote_uuid, TBIP_LOGIN, sizeof(request),
257                           atomic_inc_return(&net->command_id));
258
259         request.proto_version = TBIP_LOGIN_PROTO_VERSION;
260         request.transmit_path = TBNET_LOCAL_PATH;
261
262         return tb_xdomain_request(xd, &request, sizeof(request),
263                                   TB_CFG_PKG_XDOMAIN_RESP, &reply,
264                                   sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP,
265                                   TBNET_LOGIN_TIMEOUT);
266 }
267
268 static int tbnet_logout_response(struct tbnet *net, u64 route, u8 sequence,
269                                  u32 command_id)
270 {
271         struct thunderbolt_ip_status reply;
272         struct tb_xdomain *xd = net->xd;
273
274         memset(&reply, 0, sizeof(reply));
275         tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid,
276                           xd->remote_uuid, TBIP_STATUS, sizeof(reply),
277                           atomic_inc_return(&net->command_id));
278         return tb_xdomain_response(xd, &reply, sizeof(reply),
279                                    TB_CFG_PKG_XDOMAIN_RESP);
280 }
281
282 static int tbnet_logout_request(struct tbnet *net)
283 {
284         struct thunderbolt_ip_logout request;
285         struct thunderbolt_ip_status reply;
286         struct tb_xdomain *xd = net->xd;
287
288         memset(&request, 0, sizeof(request));
289         tbnet_fill_header(&request.hdr, xd->route, 0, xd->local_uuid,
290                           xd->remote_uuid, TBIP_LOGOUT, sizeof(request),
291                           atomic_inc_return(&net->command_id));
292
293         return tb_xdomain_request(xd, &request, sizeof(request),
294                                   TB_CFG_PKG_XDOMAIN_RESP, &reply,
295                                   sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP,
296                                   TBNET_LOGOUT_TIMEOUT);
297 }
298
299 static void start_login(struct tbnet *net)
300 {
301         mutex_lock(&net->connection_lock);
302         net->login_sent = false;
303         net->login_received = false;
304         mutex_unlock(&net->connection_lock);
305
306         queue_delayed_work(system_long_wq, &net->login_work,
307                            msecs_to_jiffies(1000));
308 }
309
310 static void stop_login(struct tbnet *net)
311 {
312         cancel_delayed_work_sync(&net->login_work);
313         cancel_work_sync(&net->connected_work);
314 }
315
316 static inline unsigned int tbnet_frame_size(const struct tbnet_frame *tf)
317 {
318         return tf->frame.size ? : TBNET_FRAME_SIZE;
319 }
320
321 static void tbnet_free_buffers(struct tbnet_ring *ring)
322 {
323         unsigned int i;
324
325         for (i = 0; i < TBNET_RING_SIZE; i++) {
326                 struct device *dma_dev = tb_ring_dma_device(ring->ring);
327                 struct tbnet_frame *tf = &ring->frames[i];
328                 enum dma_data_direction dir;
329                 unsigned int order;
330                 size_t size;
331
332                 if (!tf->page)
333                         continue;
334
335                 if (ring->ring->is_tx) {
336                         dir = DMA_TO_DEVICE;
337                         order = 0;
338                         size = tbnet_frame_size(tf);
339                 } else {
340                         dir = DMA_FROM_DEVICE;
341                         order = TBNET_RX_PAGE_ORDER;
342                         size = TBNET_RX_PAGE_SIZE;
343                 }
344
345                 if (tf->frame.buffer_phy)
346                         dma_unmap_page(dma_dev, tf->frame.buffer_phy, size,
347                                        dir);
348
349                 __free_pages(tf->page, order);
350                 tf->page = NULL;
351         }
352
353         ring->cons = 0;
354         ring->prod = 0;
355 }
356
357 static void tbnet_tear_down(struct tbnet *net, bool send_logout)
358 {
359         netif_carrier_off(net->dev);
360         netif_stop_queue(net->dev);
361
362         stop_login(net);
363
364         mutex_lock(&net->connection_lock);
365
366         if (net->login_sent && net->login_received) {
367                 int retries = TBNET_LOGOUT_RETRIES;
368
369                 while (send_logout && retries-- > 0) {
370                         int ret = tbnet_logout_request(net);
371                         if (ret != -ETIMEDOUT)
372                                 break;
373                 }
374
375                 tb_ring_stop(net->rx_ring.ring);
376                 tb_ring_stop(net->tx_ring.ring);
377                 tbnet_free_buffers(&net->rx_ring);
378                 tbnet_free_buffers(&net->tx_ring);
379
380                 if (tb_xdomain_disable_paths(net->xd))
381                         netdev_warn(net->dev, "failed to disable DMA paths\n");
382         }
383
384         net->login_retries = 0;
385         net->login_sent = false;
386         net->login_received = false;
387
388         mutex_unlock(&net->connection_lock);
389 }
390
391 static int tbnet_handle_packet(const void *buf, size_t size, void *data)
392 {
393         const struct thunderbolt_ip_login *pkg = buf;
394         struct tbnet *net = data;
395         u32 command_id;
396         int ret = 0;
397         u8 sequence;
398         u64 route;
399
400         /* Make sure the packet is for us */
401         if (size < sizeof(struct thunderbolt_ip_header))
402                 return 0;
403         if (!uuid_equal(&pkg->hdr.initiator_uuid, net->xd->remote_uuid))
404                 return 0;
405         if (!uuid_equal(&pkg->hdr.target_uuid, net->xd->local_uuid))
406                 return 0;
407
408         route = ((u64)pkg->hdr.route_hi << 32) | pkg->hdr.route_lo;
409         route &= ~BIT_ULL(63);
410         if (route != net->xd->route)
411                 return 0;
412
413         sequence = pkg->hdr.length_sn & TBIP_HDR_SN_MASK;
414         sequence >>= TBIP_HDR_SN_SHIFT;
415         command_id = pkg->hdr.command_id;
416
417         switch (pkg->hdr.type) {
418         case TBIP_LOGIN:
419                 if (!netif_running(net->dev))
420                         break;
421
422                 ret = tbnet_login_response(net, route, sequence,
423                                            pkg->hdr.command_id);
424                 if (!ret) {
425                         mutex_lock(&net->connection_lock);
426                         net->login_received = true;
427                         net->transmit_path = pkg->transmit_path;
428
429                         /* If we reached the number of max retries or
430                          * previous logout, schedule another round of
431                          * login retries
432                          */
433                         if (net->login_retries >= TBNET_LOGIN_RETRIES ||
434                             !net->login_sent) {
435                                 net->login_retries = 0;
436                                 queue_delayed_work(system_long_wq,
437                                                    &net->login_work, 0);
438                         }
439                         mutex_unlock(&net->connection_lock);
440
441                         queue_work(system_long_wq, &net->connected_work);
442                 }
443                 break;
444
445         case TBIP_LOGOUT:
446                 ret = tbnet_logout_response(net, route, sequence, command_id);
447                 if (!ret)
448                         tbnet_tear_down(net, false);
449                 break;
450
451         default:
452                 return 0;
453         }
454
455         if (ret)
456                 netdev_warn(net->dev, "failed to send ThunderboltIP response\n");
457
458         return 1;
459 }
460
461 static unsigned int tbnet_available_buffers(const struct tbnet_ring *ring)
462 {
463         return ring->prod - ring->cons;
464 }
465
466 static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers)
467 {
468         struct tbnet_ring *ring = &net->rx_ring;
469         int ret;
470
471         while (nbuffers--) {
472                 struct device *dma_dev = tb_ring_dma_device(ring->ring);
473                 unsigned int index = ring->prod & (TBNET_RING_SIZE - 1);
474                 struct tbnet_frame *tf = &ring->frames[index];
475                 dma_addr_t dma_addr;
476
477                 if (tf->page)
478                         break;
479
480                 /* Allocate page (order > 0) so that it can hold maximum
481                  * ThunderboltIP frame (4kB) and the additional room for
482                  * SKB shared info required by build_skb().
483                  */
484                 tf->page = dev_alloc_pages(TBNET_RX_PAGE_ORDER);
485                 if (!tf->page) {
486                         ret = -ENOMEM;
487                         goto err_free;
488                 }
489
490                 dma_addr = dma_map_page(dma_dev, tf->page, 0,
491                                         TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE);
492                 if (dma_mapping_error(dma_dev, dma_addr)) {
493                         ret = -ENOMEM;
494                         goto err_free;
495                 }
496
497                 tf->frame.buffer_phy = dma_addr;
498                 tf->dev = net->dev;
499
500                 tb_ring_rx(ring->ring, &tf->frame);
501
502                 ring->prod++;
503         }
504
505         return 0;
506
507 err_free:
508         tbnet_free_buffers(ring);
509         return ret;
510 }
511
512 static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
513 {
514         struct tbnet_ring *ring = &net->tx_ring;
515         struct tbnet_frame *tf;
516         unsigned int index;
517
518         if (!tbnet_available_buffers(ring))
519                 return NULL;
520
521         index = ring->cons++ & (TBNET_RING_SIZE - 1);
522
523         tf = &ring->frames[index];
524         tf->frame.size = 0;
525         tf->frame.buffer_phy = 0;
526
527         return tf;
528 }
529
530 static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
531                               bool canceled)
532 {
533         struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame);
534         struct device *dma_dev = tb_ring_dma_device(ring);
535         struct tbnet *net = netdev_priv(tf->dev);
536
537         dma_unmap_page(dma_dev, tf->frame.buffer_phy, tbnet_frame_size(tf),
538                        DMA_TO_DEVICE);
539
540         /* Return buffer to the ring */
541         net->tx_ring.prod++;
542
543         if (tbnet_available_buffers(&net->tx_ring) >= TBNET_RING_SIZE / 2)
544                 netif_wake_queue(net->dev);
545 }
546
547 static int tbnet_alloc_tx_buffers(struct tbnet *net)
548 {
549         struct tbnet_ring *ring = &net->tx_ring;
550         unsigned int i;
551
552         for (i = 0; i < TBNET_RING_SIZE; i++) {
553                 struct tbnet_frame *tf = &ring->frames[i];
554
555                 tf->page = alloc_page(GFP_KERNEL);
556                 if (!tf->page) {
557                         tbnet_free_buffers(ring);
558                         return -ENOMEM;
559                 }
560
561                 tf->dev = net->dev;
562                 tf->frame.callback = tbnet_tx_callback;
563                 tf->frame.sof = TBIP_PDF_FRAME_START;
564                 tf->frame.eof = TBIP_PDF_FRAME_END;
565         }
566
567         ring->cons = 0;
568         ring->prod = TBNET_RING_SIZE - 1;
569
570         return 0;
571 }
572
573 static void tbnet_connected_work(struct work_struct *work)
574 {
575         struct tbnet *net = container_of(work, typeof(*net), connected_work);
576         bool connected;
577         int ret;
578
579         if (netif_carrier_ok(net->dev))
580                 return;
581
582         mutex_lock(&net->connection_lock);
583         connected = net->login_sent && net->login_received;
584         mutex_unlock(&net->connection_lock);
585
586         if (!connected)
587                 return;
588
589         /* Both logins successful so enable the high-speed DMA paths and
590          * start the network device queue.
591          */
592         ret = tb_xdomain_enable_paths(net->xd, TBNET_LOCAL_PATH,
593                                       net->rx_ring.ring->hop,
594                                       net->transmit_path,
595                                       net->tx_ring.ring->hop);
596         if (ret) {
597                 netdev_err(net->dev, "failed to enable DMA paths\n");
598                 return;
599         }
600
601         tb_ring_start(net->tx_ring.ring);
602         tb_ring_start(net->rx_ring.ring);
603
604         ret = tbnet_alloc_rx_buffers(net, TBNET_RING_SIZE);
605         if (ret)
606                 goto err_stop_rings;
607
608         ret = tbnet_alloc_tx_buffers(net);
609         if (ret)
610                 goto err_free_rx_buffers;
611
612         netif_carrier_on(net->dev);
613         netif_start_queue(net->dev);
614         return;
615
616 err_free_rx_buffers:
617         tbnet_free_buffers(&net->rx_ring);
618 err_stop_rings:
619         tb_ring_stop(net->rx_ring.ring);
620         tb_ring_stop(net->tx_ring.ring);
621 }
622
623 static void tbnet_login_work(struct work_struct *work)
624 {
625         struct tbnet *net = container_of(work, typeof(*net), login_work.work);
626         unsigned long delay = msecs_to_jiffies(TBNET_LOGIN_DELAY);
627         int ret;
628
629         if (netif_carrier_ok(net->dev))
630                 return;
631
632         ret = tbnet_login_request(net, net->login_retries % 4);
633         if (ret) {
634                 if (net->login_retries++ < TBNET_LOGIN_RETRIES) {
635                         queue_delayed_work(system_long_wq, &net->login_work,
636                                            delay);
637                 } else {
638                         netdev_info(net->dev, "ThunderboltIP login timed out\n");
639                 }
640         } else {
641                 net->login_retries = 0;
642
643                 mutex_lock(&net->connection_lock);
644                 net->login_sent = true;
645                 mutex_unlock(&net->connection_lock);
646
647                 queue_work(system_long_wq, &net->connected_work);
648         }
649 }
650
651 static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf,
652                               const struct thunderbolt_ip_frame_header *hdr)
653 {
654         u32 frame_id, frame_count, frame_size, frame_index;
655         unsigned int size;
656
657         if (tf->frame.flags & RING_DESC_CRC_ERROR) {
658                 net->stats.rx_crc_errors++;
659                 return false;
660         } else if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN) {
661                 net->stats.rx_over_errors++;
662                 return false;
663         }
664
665         /* Should be greater than just header i.e. contains data */
666         size = tbnet_frame_size(tf);
667         if (size <= sizeof(*hdr)) {
668                 net->stats.rx_length_errors++;
669                 return false;
670         }
671
672         frame_count = le32_to_cpu(hdr->frame_count);
673         frame_size = le32_to_cpu(hdr->frame_size);
674         frame_index = le16_to_cpu(hdr->frame_index);
675         frame_id = le16_to_cpu(hdr->frame_id);
676
677         if ((frame_size > size - sizeof(*hdr)) || !frame_size) {
678                 net->stats.rx_length_errors++;
679                 return false;
680         }
681
682         /* In case we're in the middle of packet, validate the frame
683          * header based on first fragment of the packet.
684          */
685         if (net->skb && net->rx_hdr.frame_count) {
686                 /* Check the frame count fits the count field */
687                 if (frame_count != net->rx_hdr.frame_count) {
688                         net->stats.rx_length_errors++;
689                         return false;
690                 }
691
692                 /* Check the frame identifiers are incremented correctly,
693                  * and id is matching.
694                  */
695                 if (frame_index != net->rx_hdr.frame_index + 1 ||
696                     frame_id != net->rx_hdr.frame_id) {
697                         net->stats.rx_missed_errors++;
698                         return false;
699                 }
700
701                 if (net->skb->len + frame_size > TBNET_MAX_MTU) {
702                         net->stats.rx_length_errors++;
703                         return false;
704                 }
705
706                 return true;
707         }
708
709         /* Start of packet, validate the frame header */
710         if (frame_count == 0 || frame_count > TBNET_RING_SIZE / 4) {
711                 net->stats.rx_length_errors++;
712                 return false;
713         }
714         if (frame_index != 0) {
715                 net->stats.rx_missed_errors++;
716                 return false;
717         }
718
719         return true;
720 }
721
722 static int tbnet_poll(struct napi_struct *napi, int budget)
723 {
724         struct tbnet *net = container_of(napi, struct tbnet, napi);
725         unsigned int cleaned_count = tbnet_available_buffers(&net->rx_ring);
726         struct device *dma_dev = tb_ring_dma_device(net->rx_ring.ring);
727         unsigned int rx_packets = 0;
728
729         while (rx_packets < budget) {
730                 const struct thunderbolt_ip_frame_header *hdr;
731                 unsigned int hdr_size = sizeof(*hdr);
732                 struct sk_buff *skb = NULL;
733                 struct ring_frame *frame;
734                 struct tbnet_frame *tf;
735                 struct page *page;
736                 bool last = true;
737                 u32 frame_size;
738
739                 /* Return some buffers to hardware, one at a time is too
740                  * slow so allocate MAX_SKB_FRAGS buffers at the same
741                  * time.
742                  */
743                 if (cleaned_count >= MAX_SKB_FRAGS) {
744                         tbnet_alloc_rx_buffers(net, cleaned_count);
745                         cleaned_count = 0;
746                 }
747
748                 frame = tb_ring_poll(net->rx_ring.ring);
749                 if (!frame)
750                         break;
751
752                 dma_unmap_page(dma_dev, frame->buffer_phy,
753                                TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE);
754
755                 tf = container_of(frame, typeof(*tf), frame);
756
757                 page = tf->page;
758                 tf->page = NULL;
759                 net->rx_ring.cons++;
760                 cleaned_count++;
761
762                 hdr = page_address(page);
763                 if (!tbnet_check_frame(net, tf, hdr)) {
764                         __free_pages(page, TBNET_RX_PAGE_ORDER);
765                         dev_kfree_skb_any(net->skb);
766                         net->skb = NULL;
767                         continue;
768                 }
769
770                 frame_size = le32_to_cpu(hdr->frame_size);
771
772                 skb = net->skb;
773                 if (!skb) {
774                         skb = build_skb(page_address(page),
775                                         TBNET_RX_PAGE_SIZE);
776                         if (!skb) {
777                                 __free_pages(page, TBNET_RX_PAGE_ORDER);
778                                 net->stats.rx_errors++;
779                                 break;
780                         }
781
782                         skb_reserve(skb, hdr_size);
783                         skb_put(skb, frame_size);
784
785                         net->skb = skb;
786                 } else {
787                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
788                                         page, hdr_size, frame_size,
789                                         TBNET_RX_PAGE_SIZE - hdr_size);
790                 }
791
792                 net->rx_hdr.frame_size = frame_size;
793                 net->rx_hdr.frame_count = le32_to_cpu(hdr->frame_count);
794                 net->rx_hdr.frame_index = le16_to_cpu(hdr->frame_index);
795                 net->rx_hdr.frame_id = le16_to_cpu(hdr->frame_id);
796                 last = net->rx_hdr.frame_index == net->rx_hdr.frame_count - 1;
797
798                 rx_packets++;
799                 net->stats.rx_bytes += frame_size;
800
801                 if (last) {
802                         skb->protocol = eth_type_trans(skb, net->dev);
803                         napi_gro_receive(&net->napi, skb);
804                         net->skb = NULL;
805                 }
806         }
807
808         net->stats.rx_packets += rx_packets;
809
810         if (cleaned_count)
811                 tbnet_alloc_rx_buffers(net, cleaned_count);
812
813         if (rx_packets >= budget)
814                 return budget;
815
816         napi_complete_done(napi, rx_packets);
817         /* Re-enable the ring interrupt */
818         tb_ring_poll_complete(net->rx_ring.ring);
819
820         return rx_packets;
821 }
822
823 static void tbnet_start_poll(void *data)
824 {
825         struct tbnet *net = data;
826
827         napi_schedule(&net->napi);
828 }
829
830 static int tbnet_open(struct net_device *dev)
831 {
832         struct tbnet *net = netdev_priv(dev);
833         struct tb_xdomain *xd = net->xd;
834         u16 sof_mask, eof_mask;
835         struct tb_ring *ring;
836
837         netif_carrier_off(dev);
838
839         ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE,
840                                 RING_FLAG_FRAME);
841         if (!ring) {
842                 netdev_err(dev, "failed to allocate Tx ring\n");
843                 return -ENOMEM;
844         }
845         net->tx_ring.ring = ring;
846
847         sof_mask = BIT(TBIP_PDF_FRAME_START);
848         eof_mask = BIT(TBIP_PDF_FRAME_END);
849
850         ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE,
851                                 RING_FLAG_FRAME | RING_FLAG_E2E, sof_mask,
852                                 eof_mask, tbnet_start_poll, net);
853         if (!ring) {
854                 netdev_err(dev, "failed to allocate Rx ring\n");
855                 tb_ring_free(net->tx_ring.ring);
856                 net->tx_ring.ring = NULL;
857                 return -ENOMEM;
858         }
859         net->rx_ring.ring = ring;
860
861         napi_enable(&net->napi);
862         start_login(net);
863
864         return 0;
865 }
866
867 static int tbnet_stop(struct net_device *dev)
868 {
869         struct tbnet *net = netdev_priv(dev);
870
871         napi_disable(&net->napi);
872
873         tbnet_tear_down(net, true);
874
875         tb_ring_free(net->rx_ring.ring);
876         net->rx_ring.ring = NULL;
877         tb_ring_free(net->tx_ring.ring);
878         net->tx_ring.ring = NULL;
879
880         return 0;
881 }
882
883 static bool tbnet_xmit_map(struct device *dma_dev, struct tbnet_frame *tf)
884 {
885         dma_addr_t dma_addr;
886
887         dma_addr = dma_map_page(dma_dev, tf->page, 0, tbnet_frame_size(tf),
888                                 DMA_TO_DEVICE);
889         if (dma_mapping_error(dma_dev, dma_addr))
890                 return false;
891
892         tf->frame.buffer_phy = dma_addr;
893         return true;
894 }
895
896 static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
897         struct tbnet_frame **frames, u32 frame_count)
898 {
899         struct thunderbolt_ip_frame_header *hdr = page_address(frames[0]->page);
900         struct device *dma_dev = tb_ring_dma_device(net->tx_ring.ring);
901         __wsum wsum = htonl(skb->len - skb_transport_offset(skb));
902         unsigned int i, len, offset = skb_transport_offset(skb);
903         __be16 protocol = skb->protocol;
904         void *data = skb->data;
905         void *dest = hdr + 1;
906         __sum16 *tucso;
907
908         if (skb->ip_summed != CHECKSUM_PARTIAL) {
909                 /* No need to calculate checksum so we just update the
910                  * total frame count and map the frames for DMA.
911                  */
912                 for (i = 0; i < frame_count; i++) {
913                         hdr = page_address(frames[i]->page);
914                         hdr->frame_count = cpu_to_le32(frame_count);
915                         if (!tbnet_xmit_map(dma_dev, frames[i]))
916                                 goto err_unmap;
917                 }
918
919                 return true;
920         }
921
922         if (protocol == htons(ETH_P_8021Q)) {
923                 struct vlan_hdr *vhdr, vh;
924
925                 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(vh), &vh);
926                 if (!vhdr)
927                         return false;
928
929                 protocol = vhdr->h_vlan_encapsulated_proto;
930         }
931
932         /* Data points on the beginning of packet.
933          * Check is the checksum absolute place in the packet.
934          * ipcso will update IP checksum.
935          * tucso will update TCP/UPD checksum.
936          */
937         if (protocol == htons(ETH_P_IP)) {
938                 __sum16 *ipcso = dest + ((void *)&(ip_hdr(skb)->check) - data);
939
940                 *ipcso = 0;
941                 *ipcso = ip_fast_csum(dest + skb_network_offset(skb),
942                                       ip_hdr(skb)->ihl);
943
944                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
945                         tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
946                 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
947                         tucso = dest + ((void *)&(udp_hdr(skb)->check) - data);
948                 else
949                         return false;
950
951                 *tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
952                                             ip_hdr(skb)->daddr, 0,
953                                             ip_hdr(skb)->protocol, 0);
954         } else if (skb_is_gso_v6(skb)) {
955                 tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
956                 *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
957                                           &ipv6_hdr(skb)->daddr, 0,
958                                           IPPROTO_TCP, 0);
959                 return false;
960         } else if (protocol == htons(ETH_P_IPV6)) {
961                 tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset;
962                 *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
963                                           &ipv6_hdr(skb)->daddr, 0,
964                                           ipv6_hdr(skb)->nexthdr, 0);
965         } else {
966                 return false;
967         }
968
969         /* First frame was headers, rest of the frames contain data.
970          * Calculate checksum over each frame.
971          */
972         for (i = 0; i < frame_count; i++) {
973                 hdr = page_address(frames[i]->page);
974                 dest = (void *)(hdr + 1) + offset;
975                 len = le32_to_cpu(hdr->frame_size) - offset;
976                 wsum = csum_partial(dest, len, wsum);
977                 hdr->frame_count = cpu_to_le32(frame_count);
978
979                 offset = 0;
980         }
981
982         *tucso = csum_fold(wsum);
983
984         /* Checksum is finally calculated and we don't touch the memory
985          * anymore, so DMA map the frames now.
986          */
987         for (i = 0; i < frame_count; i++) {
988                 if (!tbnet_xmit_map(dma_dev, frames[i]))
989                         goto err_unmap;
990         }
991
992         return true;
993
994 err_unmap:
995         while (i--)
996                 dma_unmap_page(dma_dev, frames[i]->frame.buffer_phy,
997                                tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
998
999         return false;
1000 }
1001
1002 static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
1003                              unsigned int *len)
1004 {
1005         const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
1006
1007         *len = skb_frag_size(frag);
1008         return kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
1009 }
1010
1011 static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
1012                                     struct net_device *dev)
1013 {
1014         struct tbnet *net = netdev_priv(dev);
1015         struct tbnet_frame *frames[MAX_SKB_FRAGS];
1016         u16 frame_id = atomic_read(&net->frame_id);
1017         struct thunderbolt_ip_frame_header *hdr;
1018         unsigned int len = skb_headlen(skb);
1019         unsigned int data_len = skb->len;
1020         unsigned int nframes, i;
1021         unsigned int frag = 0;
1022         void *src = skb->data;
1023         u32 frame_index = 0;
1024         bool unmap = false;
1025         void *dest;
1026
1027         nframes = DIV_ROUND_UP(data_len, TBNET_MAX_PAYLOAD_SIZE);
1028         if (tbnet_available_buffers(&net->tx_ring) < nframes) {
1029                 netif_stop_queue(net->dev);
1030                 return NETDEV_TX_BUSY;
1031         }
1032
1033         frames[frame_index] = tbnet_get_tx_buffer(net);
1034         if (!frames[frame_index])
1035                 goto err_drop;
1036
1037         hdr = page_address(frames[frame_index]->page);
1038         dest = hdr + 1;
1039
1040         /* If overall packet is bigger than the frame data size */
1041         while (data_len > TBNET_MAX_PAYLOAD_SIZE) {
1042                 unsigned int size_left = TBNET_MAX_PAYLOAD_SIZE;
1043
1044                 hdr->frame_size = cpu_to_le32(TBNET_MAX_PAYLOAD_SIZE);
1045                 hdr->frame_index = cpu_to_le16(frame_index);
1046                 hdr->frame_id = cpu_to_le16(frame_id);
1047
1048                 do {
1049                         if (len > size_left) {
1050                                 /* Copy data onto Tx buffer data with
1051                                  * full frame size then break and go to
1052                                  * next frame
1053                                  */
1054                                 memcpy(dest, src, size_left);
1055                                 len -= size_left;
1056                                 dest += size_left;
1057                                 src += size_left;
1058                                 break;
1059                         }
1060
1061                         memcpy(dest, src, len);
1062                         size_left -= len;
1063                         dest += len;
1064
1065                         if (unmap) {
1066                                 kunmap_atomic(src);
1067                                 unmap = false;
1068                         }
1069
1070                         /* Ensure all fragments have been processed */
1071                         if (frag < skb_shinfo(skb)->nr_frags) {
1072                                 /* Map and then unmap quickly */
1073                                 src = tbnet_kmap_frag(skb, frag++, &len);
1074                                 unmap = true;
1075                         } else if (unlikely(size_left > 0)) {
1076                                 goto err_drop;
1077                         }
1078                 } while (size_left > 0);
1079
1080                 data_len -= TBNET_MAX_PAYLOAD_SIZE;
1081                 frame_index++;
1082
1083                 frames[frame_index] = tbnet_get_tx_buffer(net);
1084                 if (!frames[frame_index])
1085                         goto err_drop;
1086
1087                 hdr = page_address(frames[frame_index]->page);
1088                 dest = hdr + 1;
1089         }
1090
1091         hdr->frame_size = cpu_to_le32(data_len);
1092         hdr->frame_index = cpu_to_le16(frame_index);
1093         hdr->frame_id = cpu_to_le16(frame_id);
1094
1095         frames[frame_index]->frame.size = data_len + sizeof(*hdr);
1096
1097         /* In case the remaining data_len is smaller than a frame */
1098         while (len < data_len) {
1099                 memcpy(dest, src, len);
1100                 data_len -= len;
1101                 dest += len;
1102
1103                 if (unmap) {
1104                         kunmap_atomic(src);
1105                         unmap = false;
1106                 }
1107
1108                 if (frag < skb_shinfo(skb)->nr_frags) {
1109                         src = tbnet_kmap_frag(skb, frag++, &len);
1110                         unmap = true;
1111                 } else if (unlikely(data_len > 0)) {
1112                         goto err_drop;
1113                 }
1114         }
1115
1116         memcpy(dest, src, data_len);
1117
1118         if (unmap)
1119                 kunmap_atomic(src);
1120
1121         if (!tbnet_xmit_csum_and_map(net, skb, frames, frame_index + 1))
1122                 goto err_drop;
1123
1124         for (i = 0; i < frame_index + 1; i++)
1125                 tb_ring_tx(net->tx_ring.ring, &frames[i]->frame);
1126
1127         if (net->svc->prtcstns & TBNET_MATCH_FRAGS_ID)
1128                 atomic_inc(&net->frame_id);
1129
1130         net->stats.tx_packets++;
1131         net->stats.tx_bytes += skb->len;
1132
1133         dev_consume_skb_any(skb);
1134
1135         return NETDEV_TX_OK;
1136
1137 err_drop:
1138         /* We can re-use the buffers */
1139         net->tx_ring.cons -= frame_index;
1140
1141         dev_kfree_skb_any(skb);
1142         net->stats.tx_errors++;
1143
1144         return NETDEV_TX_OK;
1145 }
1146
1147 static void tbnet_get_stats64(struct net_device *dev,
1148                               struct rtnl_link_stats64 *stats)
1149 {
1150         struct tbnet *net = netdev_priv(dev);
1151
1152         stats->tx_packets = net->stats.tx_packets;
1153         stats->rx_packets = net->stats.rx_packets;
1154         stats->tx_bytes = net->stats.tx_bytes;
1155         stats->rx_bytes = net->stats.rx_bytes;
1156         stats->rx_errors = net->stats.rx_errors + net->stats.rx_length_errors +
1157                 net->stats.rx_over_errors + net->stats.rx_crc_errors +
1158                 net->stats.rx_missed_errors;
1159         stats->tx_errors = net->stats.tx_errors;
1160         stats->rx_length_errors = net->stats.rx_length_errors;
1161         stats->rx_over_errors = net->stats.rx_over_errors;
1162         stats->rx_crc_errors = net->stats.rx_crc_errors;
1163         stats->rx_missed_errors = net->stats.rx_missed_errors;
1164 }
1165
1166 static const struct net_device_ops tbnet_netdev_ops = {
1167         .ndo_open = tbnet_open,
1168         .ndo_stop = tbnet_stop,
1169         .ndo_start_xmit = tbnet_start_xmit,
1170         .ndo_get_stats64 = tbnet_get_stats64,
1171 };
1172
1173 static void tbnet_generate_mac(struct net_device *dev)
1174 {
1175         const struct tbnet *net = netdev_priv(dev);
1176         const struct tb_xdomain *xd = net->xd;
1177         u8 phy_port;
1178         u32 hash;
1179
1180         phy_port = tb_phy_port_from_link(TBNET_L0_PORT_NUM(xd->route));
1181
1182         /* Unicast and locally administered MAC */
1183         dev->dev_addr[0] = phy_port << 4 | 0x02;
1184         hash = jhash2((u32 *)xd->local_uuid, 4, 0);
1185         memcpy(dev->dev_addr + 1, &hash, sizeof(hash));
1186         hash = jhash2((u32 *)xd->local_uuid, 4, hash);
1187         dev->dev_addr[5] = hash & 0xff;
1188 }
1189
1190 static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id)
1191 {
1192         struct tb_xdomain *xd = tb_service_parent(svc);
1193         struct net_device *dev;
1194         struct tbnet *net;
1195         int ret;
1196
1197         dev = alloc_etherdev(sizeof(*net));
1198         if (!dev)
1199                 return -ENOMEM;
1200
1201         SET_NETDEV_DEV(dev, &svc->dev);
1202
1203         net = netdev_priv(dev);
1204         INIT_DELAYED_WORK(&net->login_work, tbnet_login_work);
1205         INIT_WORK(&net->connected_work, tbnet_connected_work);
1206         mutex_init(&net->connection_lock);
1207         atomic_set(&net->command_id, 0);
1208         atomic_set(&net->frame_id, 0);
1209         net->svc = svc;
1210         net->dev = dev;
1211         net->xd = xd;
1212
1213         tbnet_generate_mac(dev);
1214
1215         strcpy(dev->name, "thunderbolt%d");
1216         dev->netdev_ops = &tbnet_netdev_ops;
1217
1218         /* ThunderboltIP takes advantage of TSO packets but instead of
1219          * segmenting them we just split the packet into Thunderbolt
1220          * frames (maximum payload size of each frame is 4084 bytes) and
1221          * calculate checksum over the whole packet here.
1222          *
1223          * The receiving side does the opposite if the host OS supports
1224          * LRO, otherwise it needs to split the large packet into MTU
1225          * sized smaller packets.
1226          *
1227          * In order to receive large packets from the networking stack,
1228          * we need to announce support for most of the offloading
1229          * features here.
1230          */
1231         dev->hw_features = NETIF_F_SG | NETIF_F_ALL_TSO | NETIF_F_GRO |
1232                            NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1233         dev->features = dev->hw_features | NETIF_F_HIGHDMA;
1234         dev->hard_header_len += sizeof(struct thunderbolt_ip_frame_header);
1235
1236         netif_napi_add(dev, &net->napi, tbnet_poll, NAPI_POLL_WEIGHT);
1237
1238         /* MTU range: 68 - 65522 */
1239         dev->min_mtu = ETH_MIN_MTU;
1240         dev->max_mtu = TBNET_MAX_MTU - ETH_HLEN;
1241
1242         net->handler.uuid = &tbnet_svc_uuid;
1243         net->handler.callback = tbnet_handle_packet,
1244         net->handler.data = net;
1245         tb_register_protocol_handler(&net->handler);
1246
1247         tb_service_set_drvdata(svc, net);
1248
1249         ret = register_netdev(dev);
1250         if (ret) {
1251                 tb_unregister_protocol_handler(&net->handler);
1252                 free_netdev(dev);
1253                 return ret;
1254         }
1255
1256         return 0;
1257 }
1258
1259 static void tbnet_remove(struct tb_service *svc)
1260 {
1261         struct tbnet *net = tb_service_get_drvdata(svc);
1262
1263         unregister_netdev(net->dev);
1264         tb_unregister_protocol_handler(&net->handler);
1265         free_netdev(net->dev);
1266 }
1267
1268 static void tbnet_shutdown(struct tb_service *svc)
1269 {
1270         tbnet_tear_down(tb_service_get_drvdata(svc), true);
1271 }
1272
1273 static int __maybe_unused tbnet_suspend(struct device *dev)
1274 {
1275         struct tb_service *svc = tb_to_service(dev);
1276         struct tbnet *net = tb_service_get_drvdata(svc);
1277
1278         stop_login(net);
1279         if (netif_running(net->dev)) {
1280                 netif_device_detach(net->dev);
1281                 tb_ring_stop(net->rx_ring.ring);
1282                 tb_ring_stop(net->tx_ring.ring);
1283                 tbnet_free_buffers(&net->rx_ring);
1284                 tbnet_free_buffers(&net->tx_ring);
1285         }
1286
1287         return 0;
1288 }
1289
1290 static int __maybe_unused tbnet_resume(struct device *dev)
1291 {
1292         struct tb_service *svc = tb_to_service(dev);
1293         struct tbnet *net = tb_service_get_drvdata(svc);
1294
1295         netif_carrier_off(net->dev);
1296         if (netif_running(net->dev)) {
1297                 netif_device_attach(net->dev);
1298                 start_login(net);
1299         }
1300
1301         return 0;
1302 }
1303
1304 static const struct dev_pm_ops tbnet_pm_ops = {
1305         SET_SYSTEM_SLEEP_PM_OPS(tbnet_suspend, tbnet_resume)
1306 };
1307
1308 static const struct tb_service_id tbnet_ids[] = {
1309         { TB_SERVICE("network", 1) },
1310         { },
1311 };
1312 MODULE_DEVICE_TABLE(tbsvc, tbnet_ids);
1313
1314 static struct tb_service_driver tbnet_driver = {
1315         .driver = {
1316                 .owner = THIS_MODULE,
1317                 .name = "thunderbolt-net",
1318                 .pm = &tbnet_pm_ops,
1319         },
1320         .probe = tbnet_probe,
1321         .remove = tbnet_remove,
1322         .shutdown = tbnet_shutdown,
1323         .id_table = tbnet_ids,
1324 };
1325
1326 static int __init tbnet_init(void)
1327 {
1328         int ret;
1329
1330         tbnet_dir = tb_property_create_dir(&tbnet_dir_uuid);
1331         if (!tbnet_dir)
1332                 return -ENOMEM;
1333
1334         tb_property_add_immediate(tbnet_dir, "prtcid", 1);
1335         tb_property_add_immediate(tbnet_dir, "prtcvers", 1);
1336         tb_property_add_immediate(tbnet_dir, "prtcrevs", 1);
1337         tb_property_add_immediate(tbnet_dir, "prtcstns",
1338                                   TBNET_MATCH_FRAGS_ID);
1339
1340         ret = tb_register_property_dir("network", tbnet_dir);
1341         if (ret) {
1342                 tb_property_free_dir(tbnet_dir);
1343                 return ret;
1344         }
1345
1346         return tb_register_service_driver(&tbnet_driver);
1347 }
1348 module_init(tbnet_init);
1349
1350 static void __exit tbnet_exit(void)
1351 {
1352         tb_unregister_service_driver(&tbnet_driver);
1353         tb_unregister_property_dir("network", tbnet_dir);
1354         tb_property_free_dir(tbnet_dir);
1355 }
1356 module_exit(tbnet_exit);
1357
1358 MODULE_AUTHOR("Amir Levy <amir.jer.levy@intel.com>");
1359 MODULE_AUTHOR("Michael Jamet <michael.jamet@intel.com>");
1360 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
1361 MODULE_DESCRIPTION("Thunderbolt network driver");
1362 MODULE_LICENSE("GPL v2");