]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/thunderbolt/tb.c
thunderbolt: Expand controller name in tb_switch_is_xy()
[linux.git] / drivers / thunderbolt / tb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - bus logic (NHI independent)
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/platform_data/x86/apple.h>
13
14 #include "tb.h"
15 #include "tb_regs.h"
16 #include "tunnel.h"
17
18 /**
19  * struct tb_cm - Simple Thunderbolt connection manager
20  * @tunnel_list: List of active tunnels
21  * @hotplug_active: tb_handle_hotplug will stop progressing plug
22  *                  events and exit if this is not set (it needs to
23  *                  acquire the lock one more time). Used to drain wq
24  *                  after cfg has been paused.
25  */
26 struct tb_cm {
27         struct list_head tunnel_list;
28         bool hotplug_active;
29 };
30
31 struct tb_hotplug_event {
32         struct work_struct work;
33         struct tb *tb;
34         u64 route;
35         u8 port;
36         bool unplug;
37 };
38
39 static void tb_handle_hotplug(struct work_struct *work);
40
41 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
42 {
43         struct tb_hotplug_event *ev;
44
45         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
46         if (!ev)
47                 return;
48
49         ev->tb = tb;
50         ev->route = route;
51         ev->port = port;
52         ev->unplug = unplug;
53         INIT_WORK(&ev->work, tb_handle_hotplug);
54         queue_work(tb->wq, &ev->work);
55 }
56
57 /* enumeration & hot plug handling */
58
59 static void tb_discover_tunnels(struct tb_switch *sw)
60 {
61         struct tb *tb = sw->tb;
62         struct tb_cm *tcm = tb_priv(tb);
63         struct tb_port *port;
64
65         tb_switch_for_each_port(sw, port) {
66                 struct tb_tunnel *tunnel = NULL;
67
68                 switch (port->config.type) {
69                 case TB_TYPE_DP_HDMI_IN:
70                         tunnel = tb_tunnel_discover_dp(tb, port);
71                         break;
72
73                 case TB_TYPE_PCIE_DOWN:
74                         tunnel = tb_tunnel_discover_pci(tb, port);
75                         break;
76
77                 default:
78                         break;
79                 }
80
81                 if (!tunnel)
82                         continue;
83
84                 if (tb_tunnel_is_pci(tunnel)) {
85                         struct tb_switch *parent = tunnel->dst_port->sw;
86
87                         while (parent != tunnel->src_port->sw) {
88                                 parent->boot = true;
89                                 parent = tb_switch_parent(parent);
90                         }
91                 }
92
93                 list_add_tail(&tunnel->list, &tcm->tunnel_list);
94         }
95
96         tb_switch_for_each_port(sw, port) {
97                 if (tb_port_has_remote(port))
98                         tb_discover_tunnels(port->remote->sw);
99         }
100 }
101
102 static void tb_scan_xdomain(struct tb_port *port)
103 {
104         struct tb_switch *sw = port->sw;
105         struct tb *tb = sw->tb;
106         struct tb_xdomain *xd;
107         u64 route;
108
109         route = tb_downstream_route(port);
110         xd = tb_xdomain_find_by_route(tb, route);
111         if (xd) {
112                 tb_xdomain_put(xd);
113                 return;
114         }
115
116         xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
117                               NULL);
118         if (xd) {
119                 tb_port_at(route, sw)->xdomain = xd;
120                 tb_xdomain_add(xd);
121         }
122 }
123
124 static void tb_scan_port(struct tb_port *port);
125
126 /**
127  * tb_scan_switch() - scan for and initialize downstream switches
128  */
129 static void tb_scan_switch(struct tb_switch *sw)
130 {
131         struct tb_port *port;
132
133         tb_switch_for_each_port(sw, port)
134                 tb_scan_port(port);
135 }
136
137 /**
138  * tb_scan_port() - check for and initialize switches below port
139  */
140 static void tb_scan_port(struct tb_port *port)
141 {
142         struct tb_cm *tcm = tb_priv(port->sw->tb);
143         struct tb_port *upstream_port;
144         struct tb_switch *sw;
145
146         if (tb_is_upstream_port(port))
147                 return;
148
149         if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
150             !tb_dp_port_is_enabled(port)) {
151                 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
152                 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
153                                  false);
154                 return;
155         }
156
157         if (port->config.type != TB_TYPE_PORT)
158                 return;
159         if (port->dual_link_port && port->link_nr)
160                 return; /*
161                          * Downstream switch is reachable through two ports.
162                          * Only scan on the primary port (link_nr == 0).
163                          */
164         if (tb_wait_for_port(port, false) <= 0)
165                 return;
166         if (port->remote) {
167                 tb_port_dbg(port, "port already has a remote\n");
168                 return;
169         }
170         sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
171                              tb_downstream_route(port));
172         if (IS_ERR(sw)) {
173                 /*
174                  * If there is an error accessing the connected switch
175                  * it may be connected to another domain. Also we allow
176                  * the other domain to be connected to a max depth switch.
177                  */
178                 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
179                         tb_scan_xdomain(port);
180                 return;
181         }
182
183         if (tb_switch_configure(sw)) {
184                 tb_switch_put(sw);
185                 return;
186         }
187
188         /*
189          * If there was previously another domain connected remove it
190          * first.
191          */
192         if (port->xdomain) {
193                 tb_xdomain_remove(port->xdomain);
194                 port->xdomain = NULL;
195         }
196
197         /*
198          * Do not send uevents until we have discovered all existing
199          * tunnels and know which switches were authorized already by
200          * the boot firmware.
201          */
202         if (!tcm->hotplug_active)
203                 dev_set_uevent_suppress(&sw->dev, true);
204
205         if (tb_switch_add(sw)) {
206                 tb_switch_put(sw);
207                 return;
208         }
209
210         /* Link the switches using both links if available */
211         upstream_port = tb_upstream_port(sw);
212         port->remote = upstream_port;
213         upstream_port->remote = port;
214         if (port->dual_link_port && upstream_port->dual_link_port) {
215                 port->dual_link_port->remote = upstream_port->dual_link_port;
216                 upstream_port->dual_link_port->remote = port->dual_link_port;
217         }
218
219         /* Enable lane bonding if supported */
220         if (tb_switch_lane_bonding_enable(sw))
221                 tb_sw_warn(sw, "failed to enable lane bonding\n");
222
223         tb_scan_switch(sw);
224 }
225
226 static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type,
227                           struct tb_port *src_port, struct tb_port *dst_port)
228 {
229         struct tb_cm *tcm = tb_priv(tb);
230         struct tb_tunnel *tunnel;
231
232         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
233                 if (tunnel->type == type &&
234                     ((src_port && src_port == tunnel->src_port) ||
235                      (dst_port && dst_port == tunnel->dst_port))) {
236                         tb_tunnel_deactivate(tunnel);
237                         list_del(&tunnel->list);
238                         tb_tunnel_free(tunnel);
239                         return 0;
240                 }
241         }
242
243         return -ENODEV;
244 }
245
246 /**
247  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
248  */
249 static void tb_free_invalid_tunnels(struct tb *tb)
250 {
251         struct tb_cm *tcm = tb_priv(tb);
252         struct tb_tunnel *tunnel;
253         struct tb_tunnel *n;
254
255         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
256                 if (tb_tunnel_is_invalid(tunnel)) {
257                         tb_tunnel_deactivate(tunnel);
258                         list_del(&tunnel->list);
259                         tb_tunnel_free(tunnel);
260                 }
261         }
262 }
263
264 /**
265  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
266  */
267 static void tb_free_unplugged_children(struct tb_switch *sw)
268 {
269         struct tb_port *port;
270
271         tb_switch_for_each_port(sw, port) {
272                 if (!tb_port_has_remote(port))
273                         continue;
274
275                 if (port->remote->sw->is_unplugged) {
276                         tb_switch_lane_bonding_disable(port->remote->sw);
277                         tb_switch_remove(port->remote->sw);
278                         port->remote = NULL;
279                         if (port->dual_link_port)
280                                 port->dual_link_port->remote = NULL;
281                 } else {
282                         tb_free_unplugged_children(port->remote->sw);
283                 }
284         }
285 }
286
287 /**
288  * tb_find_port() - return the first port of @type on @sw or NULL
289  * @sw: Switch to find the port from
290  * @type: Port type to look for
291  */
292 static struct tb_port *tb_find_port(struct tb_switch *sw,
293                                     enum tb_port_type type)
294 {
295         struct tb_port *port;
296
297         tb_switch_for_each_port(sw, port) {
298                 if (port->config.type == type)
299                         return port;
300         }
301
302         return NULL;
303 }
304
305 /**
306  * tb_find_unused_port() - return the first inactive port on @sw
307  * @sw: Switch to find the port on
308  * @type: Port type to look for
309  */
310 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
311                                            enum tb_port_type type)
312 {
313         struct tb_port *port;
314
315         tb_switch_for_each_port(sw, port) {
316                 if (tb_is_upstream_port(port))
317                         continue;
318                 if (port->config.type != type)
319                         continue;
320                 if (port->cap_adap)
321                         continue;
322                 if (tb_port_is_enabled(port))
323                         continue;
324                 return port;
325         }
326         return NULL;
327 }
328
329 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
330                                          const struct tb_port *port)
331 {
332         /*
333          * To keep plugging devices consistently in the same PCIe
334          * hierarchy, do mapping here for root switch downstream PCIe
335          * ports.
336          */
337         if (!tb_route(sw)) {
338                 int phy_port = tb_phy_port_from_link(port->port);
339                 int index;
340
341                 /*
342                  * Hard-coded Thunderbolt port to PCIe down port mapping
343                  * per controller.
344                  */
345                 if (tb_switch_is_cactus_ridge(sw))
346                         index = !phy_port ? 6 : 7;
347                 else if (tb_switch_is_falcon_ridge(sw))
348                         index = !phy_port ? 6 : 8;
349                 else
350                         goto out;
351
352                 /* Validate the hard-coding */
353                 if (WARN_ON(index > sw->config.max_port_number))
354                         goto out;
355                 if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index])))
356                         goto out;
357                 if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index])))
358                         goto out;
359
360                 return &sw->ports[index];
361         }
362
363 out:
364         return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
365 }
366
367 static int tb_tunnel_dp(struct tb *tb, struct tb_port *out)
368 {
369         struct tb_cm *tcm = tb_priv(tb);
370         struct tb_switch *sw = out->sw;
371         struct tb_tunnel *tunnel;
372         struct tb_port *in;
373
374         if (tb_port_is_enabled(out))
375                 return 0;
376
377         do {
378                 sw = tb_to_switch(sw->dev.parent);
379                 if (!sw)
380                         return 0;
381                 in = tb_find_unused_port(sw, TB_TYPE_DP_HDMI_IN);
382         } while (!in);
383
384         tunnel = tb_tunnel_alloc_dp(tb, in, out);
385         if (!tunnel) {
386                 tb_port_dbg(out, "DP tunnel allocation failed\n");
387                 return -ENOMEM;
388         }
389
390         if (tb_tunnel_activate(tunnel)) {
391                 tb_port_info(out, "DP tunnel activation failed, aborting\n");
392                 tb_tunnel_free(tunnel);
393                 return -EIO;
394         }
395
396         list_add_tail(&tunnel->list, &tcm->tunnel_list);
397         return 0;
398 }
399
400 static void tb_teardown_dp(struct tb *tb, struct tb_port *out)
401 {
402         tb_free_tunnel(tb, TB_TUNNEL_DP, NULL, out);
403 }
404
405 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
406 {
407         struct tb_port *up, *down, *port;
408         struct tb_cm *tcm = tb_priv(tb);
409         struct tb_switch *parent_sw;
410         struct tb_tunnel *tunnel;
411
412         up = tb_find_port(sw, TB_TYPE_PCIE_UP);
413         if (!up)
414                 return 0;
415
416         /*
417          * Look up available down port. Since we are chaining it should
418          * be found right above this switch.
419          */
420         parent_sw = tb_to_switch(sw->dev.parent);
421         port = tb_port_at(tb_route(sw), parent_sw);
422         down = tb_find_pcie_down(parent_sw, port);
423         if (!down)
424                 return 0;
425
426         tunnel = tb_tunnel_alloc_pci(tb, up, down);
427         if (!tunnel)
428                 return -ENOMEM;
429
430         if (tb_tunnel_activate(tunnel)) {
431                 tb_port_info(up,
432                              "PCIe tunnel activation failed, aborting\n");
433                 tb_tunnel_free(tunnel);
434                 return -EIO;
435         }
436
437         list_add_tail(&tunnel->list, &tcm->tunnel_list);
438         return 0;
439 }
440
441 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
442 {
443         struct tb_cm *tcm = tb_priv(tb);
444         struct tb_port *nhi_port, *dst_port;
445         struct tb_tunnel *tunnel;
446         struct tb_switch *sw;
447
448         sw = tb_to_switch(xd->dev.parent);
449         dst_port = tb_port_at(xd->route, sw);
450         nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI);
451
452         mutex_lock(&tb->lock);
453         tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
454                                      xd->transmit_path, xd->receive_ring,
455                                      xd->receive_path);
456         if (!tunnel) {
457                 mutex_unlock(&tb->lock);
458                 return -ENOMEM;
459         }
460
461         if (tb_tunnel_activate(tunnel)) {
462                 tb_port_info(nhi_port,
463                              "DMA tunnel activation failed, aborting\n");
464                 tb_tunnel_free(tunnel);
465                 mutex_unlock(&tb->lock);
466                 return -EIO;
467         }
468
469         list_add_tail(&tunnel->list, &tcm->tunnel_list);
470         mutex_unlock(&tb->lock);
471         return 0;
472 }
473
474 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
475 {
476         struct tb_port *dst_port;
477         struct tb_switch *sw;
478
479         sw = tb_to_switch(xd->dev.parent);
480         dst_port = tb_port_at(xd->route, sw);
481
482         /*
483          * It is possible that the tunnel was already teared down (in
484          * case of cable disconnect) so it is fine if we cannot find it
485          * here anymore.
486          */
487         tb_free_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
488 }
489
490 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
491 {
492         if (!xd->is_unplugged) {
493                 mutex_lock(&tb->lock);
494                 __tb_disconnect_xdomain_paths(tb, xd);
495                 mutex_unlock(&tb->lock);
496         }
497         return 0;
498 }
499
500 /* hotplug handling */
501
502 /**
503  * tb_handle_hotplug() - handle hotplug event
504  *
505  * Executes on tb->wq.
506  */
507 static void tb_handle_hotplug(struct work_struct *work)
508 {
509         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
510         struct tb *tb = ev->tb;
511         struct tb_cm *tcm = tb_priv(tb);
512         struct tb_switch *sw;
513         struct tb_port *port;
514         mutex_lock(&tb->lock);
515         if (!tcm->hotplug_active)
516                 goto out; /* during init, suspend or shutdown */
517
518         sw = tb_switch_find_by_route(tb, ev->route);
519         if (!sw) {
520                 tb_warn(tb,
521                         "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
522                         ev->route, ev->port, ev->unplug);
523                 goto out;
524         }
525         if (ev->port > sw->config.max_port_number) {
526                 tb_warn(tb,
527                         "hotplug event from non existent port %llx:%x (unplug: %d)\n",
528                         ev->route, ev->port, ev->unplug);
529                 goto put_sw;
530         }
531         port = &sw->ports[ev->port];
532         if (tb_is_upstream_port(port)) {
533                 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
534                        ev->route, ev->port, ev->unplug);
535                 goto put_sw;
536         }
537         if (ev->unplug) {
538                 if (tb_port_has_remote(port)) {
539                         tb_port_dbg(port, "switch unplugged\n");
540                         tb_sw_set_unplugged(port->remote->sw);
541                         tb_free_invalid_tunnels(tb);
542                         tb_switch_lane_bonding_disable(port->remote->sw);
543                         tb_switch_remove(port->remote->sw);
544                         port->remote = NULL;
545                         if (port->dual_link_port)
546                                 port->dual_link_port->remote = NULL;
547                 } else if (port->xdomain) {
548                         struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
549
550                         tb_port_dbg(port, "xdomain unplugged\n");
551                         /*
552                          * Service drivers are unbound during
553                          * tb_xdomain_remove() so setting XDomain as
554                          * unplugged here prevents deadlock if they call
555                          * tb_xdomain_disable_paths(). We will tear down
556                          * the path below.
557                          */
558                         xd->is_unplugged = true;
559                         tb_xdomain_remove(xd);
560                         port->xdomain = NULL;
561                         __tb_disconnect_xdomain_paths(tb, xd);
562                         tb_xdomain_put(xd);
563                 } else if (tb_port_is_dpout(port)) {
564                         tb_teardown_dp(tb, port);
565                 } else {
566                         tb_port_dbg(port,
567                                    "got unplug event for disconnected port, ignoring\n");
568                 }
569         } else if (port->remote) {
570                 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
571         } else {
572                 if (tb_port_is_null(port)) {
573                         tb_port_dbg(port, "hotplug: scanning\n");
574                         tb_scan_port(port);
575                         if (!port->remote)
576                                 tb_port_dbg(port, "hotplug: no switch found\n");
577                 } else if (tb_port_is_dpout(port)) {
578                         tb_tunnel_dp(tb, port);
579                 }
580         }
581
582 put_sw:
583         tb_switch_put(sw);
584 out:
585         mutex_unlock(&tb->lock);
586         kfree(ev);
587 }
588
589 /**
590  * tb_schedule_hotplug_handler() - callback function for the control channel
591  *
592  * Delegates to tb_handle_hotplug.
593  */
594 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
595                             const void *buf, size_t size)
596 {
597         const struct cfg_event_pkg *pkg = buf;
598         u64 route;
599
600         if (type != TB_CFG_PKG_EVENT) {
601                 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
602                 return;
603         }
604
605         route = tb_cfg_get_route(&pkg->header);
606
607         if (tb_cfg_error(tb->ctl, route, pkg->port,
608                          TB_CFG_ERROR_ACK_PLUG_EVENT)) {
609                 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
610                         pkg->port);
611         }
612
613         tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
614 }
615
616 static void tb_stop(struct tb *tb)
617 {
618         struct tb_cm *tcm = tb_priv(tb);
619         struct tb_tunnel *tunnel;
620         struct tb_tunnel *n;
621
622         /* tunnels are only present after everything has been initialized */
623         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
624                 /*
625                  * DMA tunnels require the driver to be functional so we
626                  * tear them down. Other protocol tunnels can be left
627                  * intact.
628                  */
629                 if (tb_tunnel_is_dma(tunnel))
630                         tb_tunnel_deactivate(tunnel);
631                 tb_tunnel_free(tunnel);
632         }
633         tb_switch_remove(tb->root_switch);
634         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
635 }
636
637 static int tb_scan_finalize_switch(struct device *dev, void *data)
638 {
639         if (tb_is_switch(dev)) {
640                 struct tb_switch *sw = tb_to_switch(dev);
641
642                 /*
643                  * If we found that the switch was already setup by the
644                  * boot firmware, mark it as authorized now before we
645                  * send uevent to userspace.
646                  */
647                 if (sw->boot)
648                         sw->authorized = 1;
649
650                 dev_set_uevent_suppress(dev, false);
651                 kobject_uevent(&dev->kobj, KOBJ_ADD);
652                 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
653         }
654
655         return 0;
656 }
657
658 static int tb_start(struct tb *tb)
659 {
660         struct tb_cm *tcm = tb_priv(tb);
661         int ret;
662
663         tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
664         if (IS_ERR(tb->root_switch))
665                 return PTR_ERR(tb->root_switch);
666
667         /*
668          * ICM firmware upgrade needs running firmware and in native
669          * mode that is not available so disable firmware upgrade of the
670          * root switch.
671          */
672         tb->root_switch->no_nvm_upgrade = true;
673
674         ret = tb_switch_configure(tb->root_switch);
675         if (ret) {
676                 tb_switch_put(tb->root_switch);
677                 return ret;
678         }
679
680         /* Announce the switch to the world */
681         ret = tb_switch_add(tb->root_switch);
682         if (ret) {
683                 tb_switch_put(tb->root_switch);
684                 return ret;
685         }
686
687         /* Full scan to discover devices added before the driver was loaded. */
688         tb_scan_switch(tb->root_switch);
689         /* Find out tunnels created by the boot firmware */
690         tb_discover_tunnels(tb->root_switch);
691         /* Make the discovered switches available to the userspace */
692         device_for_each_child(&tb->root_switch->dev, NULL,
693                               tb_scan_finalize_switch);
694
695         /* Allow tb_handle_hotplug to progress events */
696         tcm->hotplug_active = true;
697         return 0;
698 }
699
700 static int tb_suspend_noirq(struct tb *tb)
701 {
702         struct tb_cm *tcm = tb_priv(tb);
703
704         tb_dbg(tb, "suspending...\n");
705         tb_switch_suspend(tb->root_switch);
706         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
707         tb_dbg(tb, "suspend finished\n");
708
709         return 0;
710 }
711
712 static void tb_restore_children(struct tb_switch *sw)
713 {
714         struct tb_port *port;
715
716         tb_switch_for_each_port(sw, port) {
717                 if (!tb_port_has_remote(port))
718                         continue;
719
720                 if (tb_switch_lane_bonding_enable(port->remote->sw))
721                         dev_warn(&sw->dev, "failed to restore lane bonding\n");
722
723                 tb_restore_children(port->remote->sw);
724         }
725 }
726
727 static int tb_resume_noirq(struct tb *tb)
728 {
729         struct tb_cm *tcm = tb_priv(tb);
730         struct tb_tunnel *tunnel, *n;
731
732         tb_dbg(tb, "resuming...\n");
733
734         /* remove any pci devices the firmware might have setup */
735         tb_switch_reset(tb, 0);
736
737         tb_switch_resume(tb->root_switch);
738         tb_free_invalid_tunnels(tb);
739         tb_free_unplugged_children(tb->root_switch);
740         tb_restore_children(tb->root_switch);
741         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
742                 tb_tunnel_restart(tunnel);
743         if (!list_empty(&tcm->tunnel_list)) {
744                 /*
745                  * the pcie links need some time to get going.
746                  * 100ms works for me...
747                  */
748                 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
749                 msleep(100);
750         }
751          /* Allow tb_handle_hotplug to progress events */
752         tcm->hotplug_active = true;
753         tb_dbg(tb, "resume finished\n");
754
755         return 0;
756 }
757
758 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
759 {
760         struct tb_port *port;
761         int ret = 0;
762
763         tb_switch_for_each_port(sw, port) {
764                 if (tb_is_upstream_port(port))
765                         continue;
766                 if (port->xdomain && port->xdomain->is_unplugged) {
767                         tb_xdomain_remove(port->xdomain);
768                         port->xdomain = NULL;
769                         ret++;
770                 } else if (port->remote) {
771                         ret += tb_free_unplugged_xdomains(port->remote->sw);
772                 }
773         }
774
775         return ret;
776 }
777
778 static void tb_complete(struct tb *tb)
779 {
780         /*
781          * Release any unplugged XDomains and if there is a case where
782          * another domain is swapped in place of unplugged XDomain we
783          * need to run another rescan.
784          */
785         mutex_lock(&tb->lock);
786         if (tb_free_unplugged_xdomains(tb->root_switch))
787                 tb_scan_switch(tb->root_switch);
788         mutex_unlock(&tb->lock);
789 }
790
791 static const struct tb_cm_ops tb_cm_ops = {
792         .start = tb_start,
793         .stop = tb_stop,
794         .suspend_noirq = tb_suspend_noirq,
795         .resume_noirq = tb_resume_noirq,
796         .complete = tb_complete,
797         .handle_event = tb_handle_event,
798         .approve_switch = tb_tunnel_pci,
799         .approve_xdomain_paths = tb_approve_xdomain_paths,
800         .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
801 };
802
803 struct tb *tb_probe(struct tb_nhi *nhi)
804 {
805         struct tb_cm *tcm;
806         struct tb *tb;
807
808         if (!x86_apple_machine)
809                 return NULL;
810
811         tb = tb_domain_alloc(nhi, sizeof(*tcm));
812         if (!tb)
813                 return NULL;
814
815         tb->security_level = TB_SECURITY_USER;
816         tb->cm_ops = &tb_cm_ops;
817
818         tcm = tb_priv(tb);
819         INIT_LIST_HEAD(&tcm->tunnel_list);
820
821         return tb;
822 }