]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/thunderbolt/tb.c
thunderbolt: Add downstream PCIe port mappings for Alpine and Titan Ridge
[linux.git] / drivers / thunderbolt / tb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - bus logic (NHI independent)
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/platform_data/x86/apple.h>
13
14 #include "tb.h"
15 #include "tb_regs.h"
16 #include "tunnel.h"
17
18 /**
19  * struct tb_cm - Simple Thunderbolt connection manager
20  * @tunnel_list: List of active tunnels
21  * @hotplug_active: tb_handle_hotplug will stop progressing plug
22  *                  events and exit if this is not set (it needs to
23  *                  acquire the lock one more time). Used to drain wq
24  *                  after cfg has been paused.
25  */
26 struct tb_cm {
27         struct list_head tunnel_list;
28         bool hotplug_active;
29 };
30
31 struct tb_hotplug_event {
32         struct work_struct work;
33         struct tb *tb;
34         u64 route;
35         u8 port;
36         bool unplug;
37 };
38
39 static void tb_handle_hotplug(struct work_struct *work);
40
41 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
42 {
43         struct tb_hotplug_event *ev;
44
45         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
46         if (!ev)
47                 return;
48
49         ev->tb = tb;
50         ev->route = route;
51         ev->port = port;
52         ev->unplug = unplug;
53         INIT_WORK(&ev->work, tb_handle_hotplug);
54         queue_work(tb->wq, &ev->work);
55 }
56
57 /* enumeration & hot plug handling */
58
59 static void tb_discover_tunnels(struct tb_switch *sw)
60 {
61         struct tb *tb = sw->tb;
62         struct tb_cm *tcm = tb_priv(tb);
63         struct tb_port *port;
64
65         tb_switch_for_each_port(sw, port) {
66                 struct tb_tunnel *tunnel = NULL;
67
68                 switch (port->config.type) {
69                 case TB_TYPE_DP_HDMI_IN:
70                         tunnel = tb_tunnel_discover_dp(tb, port);
71                         break;
72
73                 case TB_TYPE_PCIE_DOWN:
74                         tunnel = tb_tunnel_discover_pci(tb, port);
75                         break;
76
77                 default:
78                         break;
79                 }
80
81                 if (!tunnel)
82                         continue;
83
84                 if (tb_tunnel_is_pci(tunnel)) {
85                         struct tb_switch *parent = tunnel->dst_port->sw;
86
87                         while (parent != tunnel->src_port->sw) {
88                                 parent->boot = true;
89                                 parent = tb_switch_parent(parent);
90                         }
91                 }
92
93                 list_add_tail(&tunnel->list, &tcm->tunnel_list);
94         }
95
96         tb_switch_for_each_port(sw, port) {
97                 if (tb_port_has_remote(port))
98                         tb_discover_tunnels(port->remote->sw);
99         }
100 }
101
102 static void tb_scan_xdomain(struct tb_port *port)
103 {
104         struct tb_switch *sw = port->sw;
105         struct tb *tb = sw->tb;
106         struct tb_xdomain *xd;
107         u64 route;
108
109         route = tb_downstream_route(port);
110         xd = tb_xdomain_find_by_route(tb, route);
111         if (xd) {
112                 tb_xdomain_put(xd);
113                 return;
114         }
115
116         xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
117                               NULL);
118         if (xd) {
119                 tb_port_at(route, sw)->xdomain = xd;
120                 tb_xdomain_add(xd);
121         }
122 }
123
124 static void tb_scan_port(struct tb_port *port);
125
126 /**
127  * tb_scan_switch() - scan for and initialize downstream switches
128  */
129 static void tb_scan_switch(struct tb_switch *sw)
130 {
131         struct tb_port *port;
132
133         tb_switch_for_each_port(sw, port)
134                 tb_scan_port(port);
135 }
136
137 /**
138  * tb_scan_port() - check for and initialize switches below port
139  */
140 static void tb_scan_port(struct tb_port *port)
141 {
142         struct tb_cm *tcm = tb_priv(port->sw->tb);
143         struct tb_port *upstream_port;
144         struct tb_switch *sw;
145
146         if (tb_is_upstream_port(port))
147                 return;
148
149         if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
150             !tb_dp_port_is_enabled(port)) {
151                 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
152                 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
153                                  false);
154                 return;
155         }
156
157         if (port->config.type != TB_TYPE_PORT)
158                 return;
159         if (port->dual_link_port && port->link_nr)
160                 return; /*
161                          * Downstream switch is reachable through two ports.
162                          * Only scan on the primary port (link_nr == 0).
163                          */
164         if (tb_wait_for_port(port, false) <= 0)
165                 return;
166         if (port->remote) {
167                 tb_port_dbg(port, "port already has a remote\n");
168                 return;
169         }
170         sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
171                              tb_downstream_route(port));
172         if (IS_ERR(sw)) {
173                 /*
174                  * If there is an error accessing the connected switch
175                  * it may be connected to another domain. Also we allow
176                  * the other domain to be connected to a max depth switch.
177                  */
178                 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
179                         tb_scan_xdomain(port);
180                 return;
181         }
182
183         if (tb_switch_configure(sw)) {
184                 tb_switch_put(sw);
185                 return;
186         }
187
188         /*
189          * If there was previously another domain connected remove it
190          * first.
191          */
192         if (port->xdomain) {
193                 tb_xdomain_remove(port->xdomain);
194                 port->xdomain = NULL;
195         }
196
197         /*
198          * Do not send uevents until we have discovered all existing
199          * tunnels and know which switches were authorized already by
200          * the boot firmware.
201          */
202         if (!tcm->hotplug_active)
203                 dev_set_uevent_suppress(&sw->dev, true);
204
205         if (tb_switch_add(sw)) {
206                 tb_switch_put(sw);
207                 return;
208         }
209
210         /* Link the switches using both links if available */
211         upstream_port = tb_upstream_port(sw);
212         port->remote = upstream_port;
213         upstream_port->remote = port;
214         if (port->dual_link_port && upstream_port->dual_link_port) {
215                 port->dual_link_port->remote = upstream_port->dual_link_port;
216                 upstream_port->dual_link_port->remote = port->dual_link_port;
217         }
218
219         /* Enable lane bonding if supported */
220         if (tb_switch_lane_bonding_enable(sw))
221                 tb_sw_warn(sw, "failed to enable lane bonding\n");
222
223         tb_scan_switch(sw);
224 }
225
226 static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type,
227                           struct tb_port *src_port, struct tb_port *dst_port)
228 {
229         struct tb_cm *tcm = tb_priv(tb);
230         struct tb_tunnel *tunnel;
231
232         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
233                 if (tunnel->type == type &&
234                     ((src_port && src_port == tunnel->src_port) ||
235                      (dst_port && dst_port == tunnel->dst_port))) {
236                         tb_tunnel_deactivate(tunnel);
237                         list_del(&tunnel->list);
238                         tb_tunnel_free(tunnel);
239                         return 0;
240                 }
241         }
242
243         return -ENODEV;
244 }
245
246 /**
247  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
248  */
249 static void tb_free_invalid_tunnels(struct tb *tb)
250 {
251         struct tb_cm *tcm = tb_priv(tb);
252         struct tb_tunnel *tunnel;
253         struct tb_tunnel *n;
254
255         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
256                 if (tb_tunnel_is_invalid(tunnel)) {
257                         tb_tunnel_deactivate(tunnel);
258                         list_del(&tunnel->list);
259                         tb_tunnel_free(tunnel);
260                 }
261         }
262 }
263
264 /**
265  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
266  */
267 static void tb_free_unplugged_children(struct tb_switch *sw)
268 {
269         struct tb_port *port;
270
271         tb_switch_for_each_port(sw, port) {
272                 if (!tb_port_has_remote(port))
273                         continue;
274
275                 if (port->remote->sw->is_unplugged) {
276                         tb_switch_lane_bonding_disable(port->remote->sw);
277                         tb_switch_remove(port->remote->sw);
278                         port->remote = NULL;
279                         if (port->dual_link_port)
280                                 port->dual_link_port->remote = NULL;
281                 } else {
282                         tb_free_unplugged_children(port->remote->sw);
283                 }
284         }
285 }
286
287 /**
288  * tb_find_port() - return the first port of @type on @sw or NULL
289  * @sw: Switch to find the port from
290  * @type: Port type to look for
291  */
292 static struct tb_port *tb_find_port(struct tb_switch *sw,
293                                     enum tb_port_type type)
294 {
295         struct tb_port *port;
296
297         tb_switch_for_each_port(sw, port) {
298                 if (port->config.type == type)
299                         return port;
300         }
301
302         return NULL;
303 }
304
305 /**
306  * tb_find_unused_port() - return the first inactive port on @sw
307  * @sw: Switch to find the port on
308  * @type: Port type to look for
309  */
310 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
311                                            enum tb_port_type type)
312 {
313         struct tb_port *port;
314
315         tb_switch_for_each_port(sw, port) {
316                 if (tb_is_upstream_port(port))
317                         continue;
318                 if (port->config.type != type)
319                         continue;
320                 if (port->cap_adap)
321                         continue;
322                 if (tb_port_is_enabled(port))
323                         continue;
324                 return port;
325         }
326         return NULL;
327 }
328
329 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
330                                          const struct tb_port *port)
331 {
332         /*
333          * To keep plugging devices consistently in the same PCIe
334          * hierarchy, do mapping here for root switch downstream PCIe
335          * ports.
336          */
337         if (!tb_route(sw)) {
338                 int phy_port = tb_phy_port_from_link(port->port);
339                 int index;
340
341                 /*
342                  * Hard-coded Thunderbolt port to PCIe down port mapping
343                  * per controller.
344                  */
345                 if (tb_switch_is_cactus_ridge(sw) ||
346                     tb_switch_is_alpine_ridge(sw))
347                         index = !phy_port ? 6 : 7;
348                 else if (tb_switch_is_falcon_ridge(sw))
349                         index = !phy_port ? 6 : 8;
350                 else if (tb_switch_is_titan_ridge(sw))
351                         index = !phy_port ? 8 : 9;
352                 else
353                         goto out;
354
355                 /* Validate the hard-coding */
356                 if (WARN_ON(index > sw->config.max_port_number))
357                         goto out;
358                 if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index])))
359                         goto out;
360                 if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index])))
361                         goto out;
362
363                 return &sw->ports[index];
364         }
365
366 out:
367         return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
368 }
369
370 static int tb_tunnel_dp(struct tb *tb, struct tb_port *out)
371 {
372         struct tb_cm *tcm = tb_priv(tb);
373         struct tb_switch *sw = out->sw;
374         struct tb_tunnel *tunnel;
375         struct tb_port *in;
376
377         if (tb_port_is_enabled(out))
378                 return 0;
379
380         do {
381                 sw = tb_to_switch(sw->dev.parent);
382                 if (!sw)
383                         return 0;
384                 in = tb_find_unused_port(sw, TB_TYPE_DP_HDMI_IN);
385         } while (!in);
386
387         tunnel = tb_tunnel_alloc_dp(tb, in, out);
388         if (!tunnel) {
389                 tb_port_dbg(out, "DP tunnel allocation failed\n");
390                 return -ENOMEM;
391         }
392
393         if (tb_tunnel_activate(tunnel)) {
394                 tb_port_info(out, "DP tunnel activation failed, aborting\n");
395                 tb_tunnel_free(tunnel);
396                 return -EIO;
397         }
398
399         list_add_tail(&tunnel->list, &tcm->tunnel_list);
400         return 0;
401 }
402
403 static void tb_teardown_dp(struct tb *tb, struct tb_port *out)
404 {
405         tb_free_tunnel(tb, TB_TUNNEL_DP, NULL, out);
406 }
407
408 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
409 {
410         struct tb_port *up, *down, *port;
411         struct tb_cm *tcm = tb_priv(tb);
412         struct tb_switch *parent_sw;
413         struct tb_tunnel *tunnel;
414
415         up = tb_find_port(sw, TB_TYPE_PCIE_UP);
416         if (!up)
417                 return 0;
418
419         /*
420          * Look up available down port. Since we are chaining it should
421          * be found right above this switch.
422          */
423         parent_sw = tb_to_switch(sw->dev.parent);
424         port = tb_port_at(tb_route(sw), parent_sw);
425         down = tb_find_pcie_down(parent_sw, port);
426         if (!down)
427                 return 0;
428
429         tunnel = tb_tunnel_alloc_pci(tb, up, down);
430         if (!tunnel)
431                 return -ENOMEM;
432
433         if (tb_tunnel_activate(tunnel)) {
434                 tb_port_info(up,
435                              "PCIe tunnel activation failed, aborting\n");
436                 tb_tunnel_free(tunnel);
437                 return -EIO;
438         }
439
440         list_add_tail(&tunnel->list, &tcm->tunnel_list);
441         return 0;
442 }
443
444 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
445 {
446         struct tb_cm *tcm = tb_priv(tb);
447         struct tb_port *nhi_port, *dst_port;
448         struct tb_tunnel *tunnel;
449         struct tb_switch *sw;
450
451         sw = tb_to_switch(xd->dev.parent);
452         dst_port = tb_port_at(xd->route, sw);
453         nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI);
454
455         mutex_lock(&tb->lock);
456         tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
457                                      xd->transmit_path, xd->receive_ring,
458                                      xd->receive_path);
459         if (!tunnel) {
460                 mutex_unlock(&tb->lock);
461                 return -ENOMEM;
462         }
463
464         if (tb_tunnel_activate(tunnel)) {
465                 tb_port_info(nhi_port,
466                              "DMA tunnel activation failed, aborting\n");
467                 tb_tunnel_free(tunnel);
468                 mutex_unlock(&tb->lock);
469                 return -EIO;
470         }
471
472         list_add_tail(&tunnel->list, &tcm->tunnel_list);
473         mutex_unlock(&tb->lock);
474         return 0;
475 }
476
477 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
478 {
479         struct tb_port *dst_port;
480         struct tb_switch *sw;
481
482         sw = tb_to_switch(xd->dev.parent);
483         dst_port = tb_port_at(xd->route, sw);
484
485         /*
486          * It is possible that the tunnel was already teared down (in
487          * case of cable disconnect) so it is fine if we cannot find it
488          * here anymore.
489          */
490         tb_free_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
491 }
492
493 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
494 {
495         if (!xd->is_unplugged) {
496                 mutex_lock(&tb->lock);
497                 __tb_disconnect_xdomain_paths(tb, xd);
498                 mutex_unlock(&tb->lock);
499         }
500         return 0;
501 }
502
503 /* hotplug handling */
504
505 /**
506  * tb_handle_hotplug() - handle hotplug event
507  *
508  * Executes on tb->wq.
509  */
510 static void tb_handle_hotplug(struct work_struct *work)
511 {
512         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
513         struct tb *tb = ev->tb;
514         struct tb_cm *tcm = tb_priv(tb);
515         struct tb_switch *sw;
516         struct tb_port *port;
517         mutex_lock(&tb->lock);
518         if (!tcm->hotplug_active)
519                 goto out; /* during init, suspend or shutdown */
520
521         sw = tb_switch_find_by_route(tb, ev->route);
522         if (!sw) {
523                 tb_warn(tb,
524                         "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
525                         ev->route, ev->port, ev->unplug);
526                 goto out;
527         }
528         if (ev->port > sw->config.max_port_number) {
529                 tb_warn(tb,
530                         "hotplug event from non existent port %llx:%x (unplug: %d)\n",
531                         ev->route, ev->port, ev->unplug);
532                 goto put_sw;
533         }
534         port = &sw->ports[ev->port];
535         if (tb_is_upstream_port(port)) {
536                 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
537                        ev->route, ev->port, ev->unplug);
538                 goto put_sw;
539         }
540         if (ev->unplug) {
541                 if (tb_port_has_remote(port)) {
542                         tb_port_dbg(port, "switch unplugged\n");
543                         tb_sw_set_unplugged(port->remote->sw);
544                         tb_free_invalid_tunnels(tb);
545                         tb_switch_lane_bonding_disable(port->remote->sw);
546                         tb_switch_remove(port->remote->sw);
547                         port->remote = NULL;
548                         if (port->dual_link_port)
549                                 port->dual_link_port->remote = NULL;
550                 } else if (port->xdomain) {
551                         struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
552
553                         tb_port_dbg(port, "xdomain unplugged\n");
554                         /*
555                          * Service drivers are unbound during
556                          * tb_xdomain_remove() so setting XDomain as
557                          * unplugged here prevents deadlock if they call
558                          * tb_xdomain_disable_paths(). We will tear down
559                          * the path below.
560                          */
561                         xd->is_unplugged = true;
562                         tb_xdomain_remove(xd);
563                         port->xdomain = NULL;
564                         __tb_disconnect_xdomain_paths(tb, xd);
565                         tb_xdomain_put(xd);
566                 } else if (tb_port_is_dpout(port)) {
567                         tb_teardown_dp(tb, port);
568                 } else {
569                         tb_port_dbg(port,
570                                    "got unplug event for disconnected port, ignoring\n");
571                 }
572         } else if (port->remote) {
573                 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
574         } else {
575                 if (tb_port_is_null(port)) {
576                         tb_port_dbg(port, "hotplug: scanning\n");
577                         tb_scan_port(port);
578                         if (!port->remote)
579                                 tb_port_dbg(port, "hotplug: no switch found\n");
580                 } else if (tb_port_is_dpout(port)) {
581                         tb_tunnel_dp(tb, port);
582                 }
583         }
584
585 put_sw:
586         tb_switch_put(sw);
587 out:
588         mutex_unlock(&tb->lock);
589         kfree(ev);
590 }
591
592 /**
593  * tb_schedule_hotplug_handler() - callback function for the control channel
594  *
595  * Delegates to tb_handle_hotplug.
596  */
597 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
598                             const void *buf, size_t size)
599 {
600         const struct cfg_event_pkg *pkg = buf;
601         u64 route;
602
603         if (type != TB_CFG_PKG_EVENT) {
604                 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
605                 return;
606         }
607
608         route = tb_cfg_get_route(&pkg->header);
609
610         if (tb_cfg_error(tb->ctl, route, pkg->port,
611                          TB_CFG_ERROR_ACK_PLUG_EVENT)) {
612                 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
613                         pkg->port);
614         }
615
616         tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
617 }
618
619 static void tb_stop(struct tb *tb)
620 {
621         struct tb_cm *tcm = tb_priv(tb);
622         struct tb_tunnel *tunnel;
623         struct tb_tunnel *n;
624
625         /* tunnels are only present after everything has been initialized */
626         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
627                 /*
628                  * DMA tunnels require the driver to be functional so we
629                  * tear them down. Other protocol tunnels can be left
630                  * intact.
631                  */
632                 if (tb_tunnel_is_dma(tunnel))
633                         tb_tunnel_deactivate(tunnel);
634                 tb_tunnel_free(tunnel);
635         }
636         tb_switch_remove(tb->root_switch);
637         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
638 }
639
640 static int tb_scan_finalize_switch(struct device *dev, void *data)
641 {
642         if (tb_is_switch(dev)) {
643                 struct tb_switch *sw = tb_to_switch(dev);
644
645                 /*
646                  * If we found that the switch was already setup by the
647                  * boot firmware, mark it as authorized now before we
648                  * send uevent to userspace.
649                  */
650                 if (sw->boot)
651                         sw->authorized = 1;
652
653                 dev_set_uevent_suppress(dev, false);
654                 kobject_uevent(&dev->kobj, KOBJ_ADD);
655                 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
656         }
657
658         return 0;
659 }
660
661 static int tb_start(struct tb *tb)
662 {
663         struct tb_cm *tcm = tb_priv(tb);
664         int ret;
665
666         tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
667         if (IS_ERR(tb->root_switch))
668                 return PTR_ERR(tb->root_switch);
669
670         /*
671          * ICM firmware upgrade needs running firmware and in native
672          * mode that is not available so disable firmware upgrade of the
673          * root switch.
674          */
675         tb->root_switch->no_nvm_upgrade = true;
676
677         ret = tb_switch_configure(tb->root_switch);
678         if (ret) {
679                 tb_switch_put(tb->root_switch);
680                 return ret;
681         }
682
683         /* Announce the switch to the world */
684         ret = tb_switch_add(tb->root_switch);
685         if (ret) {
686                 tb_switch_put(tb->root_switch);
687                 return ret;
688         }
689
690         /* Full scan to discover devices added before the driver was loaded. */
691         tb_scan_switch(tb->root_switch);
692         /* Find out tunnels created by the boot firmware */
693         tb_discover_tunnels(tb->root_switch);
694         /* Make the discovered switches available to the userspace */
695         device_for_each_child(&tb->root_switch->dev, NULL,
696                               tb_scan_finalize_switch);
697
698         /* Allow tb_handle_hotplug to progress events */
699         tcm->hotplug_active = true;
700         return 0;
701 }
702
703 static int tb_suspend_noirq(struct tb *tb)
704 {
705         struct tb_cm *tcm = tb_priv(tb);
706
707         tb_dbg(tb, "suspending...\n");
708         tb_switch_suspend(tb->root_switch);
709         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
710         tb_dbg(tb, "suspend finished\n");
711
712         return 0;
713 }
714
715 static void tb_restore_children(struct tb_switch *sw)
716 {
717         struct tb_port *port;
718
719         tb_switch_for_each_port(sw, port) {
720                 if (!tb_port_has_remote(port))
721                         continue;
722
723                 if (tb_switch_lane_bonding_enable(port->remote->sw))
724                         dev_warn(&sw->dev, "failed to restore lane bonding\n");
725
726                 tb_restore_children(port->remote->sw);
727         }
728 }
729
730 static int tb_resume_noirq(struct tb *tb)
731 {
732         struct tb_cm *tcm = tb_priv(tb);
733         struct tb_tunnel *tunnel, *n;
734
735         tb_dbg(tb, "resuming...\n");
736
737         /* remove any pci devices the firmware might have setup */
738         tb_switch_reset(tb, 0);
739
740         tb_switch_resume(tb->root_switch);
741         tb_free_invalid_tunnels(tb);
742         tb_free_unplugged_children(tb->root_switch);
743         tb_restore_children(tb->root_switch);
744         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
745                 tb_tunnel_restart(tunnel);
746         if (!list_empty(&tcm->tunnel_list)) {
747                 /*
748                  * the pcie links need some time to get going.
749                  * 100ms works for me...
750                  */
751                 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
752                 msleep(100);
753         }
754          /* Allow tb_handle_hotplug to progress events */
755         tcm->hotplug_active = true;
756         tb_dbg(tb, "resume finished\n");
757
758         return 0;
759 }
760
761 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
762 {
763         struct tb_port *port;
764         int ret = 0;
765
766         tb_switch_for_each_port(sw, port) {
767                 if (tb_is_upstream_port(port))
768                         continue;
769                 if (port->xdomain && port->xdomain->is_unplugged) {
770                         tb_xdomain_remove(port->xdomain);
771                         port->xdomain = NULL;
772                         ret++;
773                 } else if (port->remote) {
774                         ret += tb_free_unplugged_xdomains(port->remote->sw);
775                 }
776         }
777
778         return ret;
779 }
780
781 static void tb_complete(struct tb *tb)
782 {
783         /*
784          * Release any unplugged XDomains and if there is a case where
785          * another domain is swapped in place of unplugged XDomain we
786          * need to run another rescan.
787          */
788         mutex_lock(&tb->lock);
789         if (tb_free_unplugged_xdomains(tb->root_switch))
790                 tb_scan_switch(tb->root_switch);
791         mutex_unlock(&tb->lock);
792 }
793
794 static const struct tb_cm_ops tb_cm_ops = {
795         .start = tb_start,
796         .stop = tb_stop,
797         .suspend_noirq = tb_suspend_noirq,
798         .resume_noirq = tb_resume_noirq,
799         .complete = tb_complete,
800         .handle_event = tb_handle_event,
801         .approve_switch = tb_tunnel_pci,
802         .approve_xdomain_paths = tb_approve_xdomain_paths,
803         .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
804 };
805
806 struct tb *tb_probe(struct tb_nhi *nhi)
807 {
808         struct tb_cm *tcm;
809         struct tb *tb;
810
811         if (!x86_apple_machine)
812                 return NULL;
813
814         tb = tb_domain_alloc(nhi, sizeof(*tcm));
815         if (!tb)
816                 return NULL;
817
818         tb->security_level = TB_SECURITY_USER;
819         tb->cm_ops = &tb_cm_ops;
820
821         tcm = tb_priv(tb);
822         INIT_LIST_HEAD(&tcm->tunnel_list);
823
824         return tb;
825 }