]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/thunderbolt/tb.c
thunderbolt: Add helper macro to iterate over switch ports
[linux.git] / drivers / thunderbolt / tb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - bus logic (NHI independent)
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/platform_data/x86/apple.h>
13
14 #include "tb.h"
15 #include "tb_regs.h"
16 #include "tunnel.h"
17
18 /**
19  * struct tb_cm - Simple Thunderbolt connection manager
20  * @tunnel_list: List of active tunnels
21  * @hotplug_active: tb_handle_hotplug will stop progressing plug
22  *                  events and exit if this is not set (it needs to
23  *                  acquire the lock one more time). Used to drain wq
24  *                  after cfg has been paused.
25  */
26 struct tb_cm {
27         struct list_head tunnel_list;
28         bool hotplug_active;
29 };
30
31 struct tb_hotplug_event {
32         struct work_struct work;
33         struct tb *tb;
34         u64 route;
35         u8 port;
36         bool unplug;
37 };
38
39 static void tb_handle_hotplug(struct work_struct *work);
40
41 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
42 {
43         struct tb_hotplug_event *ev;
44
45         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
46         if (!ev)
47                 return;
48
49         ev->tb = tb;
50         ev->route = route;
51         ev->port = port;
52         ev->unplug = unplug;
53         INIT_WORK(&ev->work, tb_handle_hotplug);
54         queue_work(tb->wq, &ev->work);
55 }
56
57 /* enumeration & hot plug handling */
58
59 static void tb_discover_tunnels(struct tb_switch *sw)
60 {
61         struct tb *tb = sw->tb;
62         struct tb_cm *tcm = tb_priv(tb);
63         struct tb_port *port;
64
65         tb_switch_for_each_port(sw, port) {
66                 struct tb_tunnel *tunnel = NULL;
67
68                 switch (port->config.type) {
69                 case TB_TYPE_DP_HDMI_IN:
70                         tunnel = tb_tunnel_discover_dp(tb, port);
71                         break;
72
73                 case TB_TYPE_PCIE_DOWN:
74                         tunnel = tb_tunnel_discover_pci(tb, port);
75                         break;
76
77                 default:
78                         break;
79                 }
80
81                 if (!tunnel)
82                         continue;
83
84                 if (tb_tunnel_is_pci(tunnel)) {
85                         struct tb_switch *parent = tunnel->dst_port->sw;
86
87                         while (parent != tunnel->src_port->sw) {
88                                 parent->boot = true;
89                                 parent = tb_switch_parent(parent);
90                         }
91                 }
92
93                 list_add_tail(&tunnel->list, &tcm->tunnel_list);
94         }
95
96         tb_switch_for_each_port(sw, port) {
97                 if (tb_port_has_remote(port))
98                         tb_discover_tunnels(port->remote->sw);
99         }
100 }
101
102 static void tb_scan_xdomain(struct tb_port *port)
103 {
104         struct tb_switch *sw = port->sw;
105         struct tb *tb = sw->tb;
106         struct tb_xdomain *xd;
107         u64 route;
108
109         route = tb_downstream_route(port);
110         xd = tb_xdomain_find_by_route(tb, route);
111         if (xd) {
112                 tb_xdomain_put(xd);
113                 return;
114         }
115
116         xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
117                               NULL);
118         if (xd) {
119                 tb_port_at(route, sw)->xdomain = xd;
120                 tb_xdomain_add(xd);
121         }
122 }
123
124 static void tb_scan_port(struct tb_port *port);
125
126 /**
127  * tb_scan_switch() - scan for and initialize downstream switches
128  */
129 static void tb_scan_switch(struct tb_switch *sw)
130 {
131         struct tb_port *port;
132
133         tb_switch_for_each_port(sw, port)
134                 tb_scan_port(port);
135 }
136
137 /**
138  * tb_scan_port() - check for and initialize switches below port
139  */
140 static void tb_scan_port(struct tb_port *port)
141 {
142         struct tb_cm *tcm = tb_priv(port->sw->tb);
143         struct tb_port *upstream_port;
144         struct tb_switch *sw;
145
146         if (tb_is_upstream_port(port))
147                 return;
148
149         if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
150             !tb_dp_port_is_enabled(port)) {
151                 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
152                 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
153                                  false);
154                 return;
155         }
156
157         if (port->config.type != TB_TYPE_PORT)
158                 return;
159         if (port->dual_link_port && port->link_nr)
160                 return; /*
161                          * Downstream switch is reachable through two ports.
162                          * Only scan on the primary port (link_nr == 0).
163                          */
164         if (tb_wait_for_port(port, false) <= 0)
165                 return;
166         if (port->remote) {
167                 tb_port_dbg(port, "port already has a remote\n");
168                 return;
169         }
170         sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
171                              tb_downstream_route(port));
172         if (IS_ERR(sw)) {
173                 /*
174                  * If there is an error accessing the connected switch
175                  * it may be connected to another domain. Also we allow
176                  * the other domain to be connected to a max depth switch.
177                  */
178                 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
179                         tb_scan_xdomain(port);
180                 return;
181         }
182
183         if (tb_switch_configure(sw)) {
184                 tb_switch_put(sw);
185                 return;
186         }
187
188         /*
189          * If there was previously another domain connected remove it
190          * first.
191          */
192         if (port->xdomain) {
193                 tb_xdomain_remove(port->xdomain);
194                 port->xdomain = NULL;
195         }
196
197         /*
198          * Do not send uevents until we have discovered all existing
199          * tunnels and know which switches were authorized already by
200          * the boot firmware.
201          */
202         if (!tcm->hotplug_active)
203                 dev_set_uevent_suppress(&sw->dev, true);
204
205         if (tb_switch_add(sw)) {
206                 tb_switch_put(sw);
207                 return;
208         }
209
210         /* Link the switches using both links if available */
211         upstream_port = tb_upstream_port(sw);
212         port->remote = upstream_port;
213         upstream_port->remote = port;
214         if (port->dual_link_port && upstream_port->dual_link_port) {
215                 port->dual_link_port->remote = upstream_port->dual_link_port;
216                 upstream_port->dual_link_port->remote = port->dual_link_port;
217         }
218
219         tb_scan_switch(sw);
220 }
221
222 static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type,
223                           struct tb_port *src_port, struct tb_port *dst_port)
224 {
225         struct tb_cm *tcm = tb_priv(tb);
226         struct tb_tunnel *tunnel;
227
228         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
229                 if (tunnel->type == type &&
230                     ((src_port && src_port == tunnel->src_port) ||
231                      (dst_port && dst_port == tunnel->dst_port))) {
232                         tb_tunnel_deactivate(tunnel);
233                         list_del(&tunnel->list);
234                         tb_tunnel_free(tunnel);
235                         return 0;
236                 }
237         }
238
239         return -ENODEV;
240 }
241
242 /**
243  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
244  */
245 static void tb_free_invalid_tunnels(struct tb *tb)
246 {
247         struct tb_cm *tcm = tb_priv(tb);
248         struct tb_tunnel *tunnel;
249         struct tb_tunnel *n;
250
251         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
252                 if (tb_tunnel_is_invalid(tunnel)) {
253                         tb_tunnel_deactivate(tunnel);
254                         list_del(&tunnel->list);
255                         tb_tunnel_free(tunnel);
256                 }
257         }
258 }
259
260 /**
261  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
262  */
263 static void tb_free_unplugged_children(struct tb_switch *sw)
264 {
265         struct tb_port *port;
266
267         tb_switch_for_each_port(sw, port) {
268                 if (!tb_port_has_remote(port))
269                         continue;
270
271                 if (port->remote->sw->is_unplugged) {
272                         tb_switch_remove(port->remote->sw);
273                         port->remote = NULL;
274                         if (port->dual_link_port)
275                                 port->dual_link_port->remote = NULL;
276                 } else {
277                         tb_free_unplugged_children(port->remote->sw);
278                 }
279         }
280 }
281
282 /**
283  * tb_find_port() - return the first port of @type on @sw or NULL
284  * @sw: Switch to find the port from
285  * @type: Port type to look for
286  */
287 static struct tb_port *tb_find_port(struct tb_switch *sw,
288                                     enum tb_port_type type)
289 {
290         struct tb_port *port;
291
292         tb_switch_for_each_port(sw, port) {
293                 if (port->config.type == type)
294                         return port;
295         }
296
297         return NULL;
298 }
299
300 /**
301  * tb_find_unused_port() - return the first inactive port on @sw
302  * @sw: Switch to find the port on
303  * @type: Port type to look for
304  */
305 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
306                                            enum tb_port_type type)
307 {
308         struct tb_port *port;
309
310         tb_switch_for_each_port(sw, port) {
311                 if (tb_is_upstream_port(port))
312                         continue;
313                 if (port->config.type != type)
314                         continue;
315                 if (port->cap_adap)
316                         continue;
317                 if (tb_port_is_enabled(port))
318                         continue;
319                 return port;
320         }
321         return NULL;
322 }
323
324 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
325                                          const struct tb_port *port)
326 {
327         /*
328          * To keep plugging devices consistently in the same PCIe
329          * hierarchy, do mapping here for root switch downstream PCIe
330          * ports.
331          */
332         if (!tb_route(sw)) {
333                 int phy_port = tb_phy_port_from_link(port->port);
334                 int index;
335
336                 /*
337                  * Hard-coded Thunderbolt port to PCIe down port mapping
338                  * per controller.
339                  */
340                 if (tb_switch_is_cr(sw))
341                         index = !phy_port ? 6 : 7;
342                 else if (tb_switch_is_fr(sw))
343                         index = !phy_port ? 6 : 8;
344                 else
345                         goto out;
346
347                 /* Validate the hard-coding */
348                 if (WARN_ON(index > sw->config.max_port_number))
349                         goto out;
350                 if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index])))
351                         goto out;
352                 if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index])))
353                         goto out;
354
355                 return &sw->ports[index];
356         }
357
358 out:
359         return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
360 }
361
362 static int tb_tunnel_dp(struct tb *tb, struct tb_port *out)
363 {
364         struct tb_cm *tcm = tb_priv(tb);
365         struct tb_switch *sw = out->sw;
366         struct tb_tunnel *tunnel;
367         struct tb_port *in;
368
369         if (tb_port_is_enabled(out))
370                 return 0;
371
372         do {
373                 sw = tb_to_switch(sw->dev.parent);
374                 if (!sw)
375                         return 0;
376                 in = tb_find_unused_port(sw, TB_TYPE_DP_HDMI_IN);
377         } while (!in);
378
379         tunnel = tb_tunnel_alloc_dp(tb, in, out);
380         if (!tunnel) {
381                 tb_port_dbg(out, "DP tunnel allocation failed\n");
382                 return -ENOMEM;
383         }
384
385         if (tb_tunnel_activate(tunnel)) {
386                 tb_port_info(out, "DP tunnel activation failed, aborting\n");
387                 tb_tunnel_free(tunnel);
388                 return -EIO;
389         }
390
391         list_add_tail(&tunnel->list, &tcm->tunnel_list);
392         return 0;
393 }
394
395 static void tb_teardown_dp(struct tb *tb, struct tb_port *out)
396 {
397         tb_free_tunnel(tb, TB_TUNNEL_DP, NULL, out);
398 }
399
400 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
401 {
402         struct tb_port *up, *down, *port;
403         struct tb_cm *tcm = tb_priv(tb);
404         struct tb_switch *parent_sw;
405         struct tb_tunnel *tunnel;
406
407         up = tb_find_port(sw, TB_TYPE_PCIE_UP);
408         if (!up)
409                 return 0;
410
411         /*
412          * Look up available down port. Since we are chaining it should
413          * be found right above this switch.
414          */
415         parent_sw = tb_to_switch(sw->dev.parent);
416         port = tb_port_at(tb_route(sw), parent_sw);
417         down = tb_find_pcie_down(parent_sw, port);
418         if (!down)
419                 return 0;
420
421         tunnel = tb_tunnel_alloc_pci(tb, up, down);
422         if (!tunnel)
423                 return -ENOMEM;
424
425         if (tb_tunnel_activate(tunnel)) {
426                 tb_port_info(up,
427                              "PCIe tunnel activation failed, aborting\n");
428                 tb_tunnel_free(tunnel);
429                 return -EIO;
430         }
431
432         list_add_tail(&tunnel->list, &tcm->tunnel_list);
433         return 0;
434 }
435
436 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
437 {
438         struct tb_cm *tcm = tb_priv(tb);
439         struct tb_port *nhi_port, *dst_port;
440         struct tb_tunnel *tunnel;
441         struct tb_switch *sw;
442
443         sw = tb_to_switch(xd->dev.parent);
444         dst_port = tb_port_at(xd->route, sw);
445         nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI);
446
447         mutex_lock(&tb->lock);
448         tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
449                                      xd->transmit_path, xd->receive_ring,
450                                      xd->receive_path);
451         if (!tunnel) {
452                 mutex_unlock(&tb->lock);
453                 return -ENOMEM;
454         }
455
456         if (tb_tunnel_activate(tunnel)) {
457                 tb_port_info(nhi_port,
458                              "DMA tunnel activation failed, aborting\n");
459                 tb_tunnel_free(tunnel);
460                 mutex_unlock(&tb->lock);
461                 return -EIO;
462         }
463
464         list_add_tail(&tunnel->list, &tcm->tunnel_list);
465         mutex_unlock(&tb->lock);
466         return 0;
467 }
468
469 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
470 {
471         struct tb_port *dst_port;
472         struct tb_switch *sw;
473
474         sw = tb_to_switch(xd->dev.parent);
475         dst_port = tb_port_at(xd->route, sw);
476
477         /*
478          * It is possible that the tunnel was already teared down (in
479          * case of cable disconnect) so it is fine if we cannot find it
480          * here anymore.
481          */
482         tb_free_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
483 }
484
485 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
486 {
487         if (!xd->is_unplugged) {
488                 mutex_lock(&tb->lock);
489                 __tb_disconnect_xdomain_paths(tb, xd);
490                 mutex_unlock(&tb->lock);
491         }
492         return 0;
493 }
494
495 /* hotplug handling */
496
497 /**
498  * tb_handle_hotplug() - handle hotplug event
499  *
500  * Executes on tb->wq.
501  */
502 static void tb_handle_hotplug(struct work_struct *work)
503 {
504         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
505         struct tb *tb = ev->tb;
506         struct tb_cm *tcm = tb_priv(tb);
507         struct tb_switch *sw;
508         struct tb_port *port;
509         mutex_lock(&tb->lock);
510         if (!tcm->hotplug_active)
511                 goto out; /* during init, suspend or shutdown */
512
513         sw = tb_switch_find_by_route(tb, ev->route);
514         if (!sw) {
515                 tb_warn(tb,
516                         "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
517                         ev->route, ev->port, ev->unplug);
518                 goto out;
519         }
520         if (ev->port > sw->config.max_port_number) {
521                 tb_warn(tb,
522                         "hotplug event from non existent port %llx:%x (unplug: %d)\n",
523                         ev->route, ev->port, ev->unplug);
524                 goto put_sw;
525         }
526         port = &sw->ports[ev->port];
527         if (tb_is_upstream_port(port)) {
528                 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
529                        ev->route, ev->port, ev->unplug);
530                 goto put_sw;
531         }
532         if (ev->unplug) {
533                 if (tb_port_has_remote(port)) {
534                         tb_port_dbg(port, "switch unplugged\n");
535                         tb_sw_set_unplugged(port->remote->sw);
536                         tb_free_invalid_tunnels(tb);
537                         tb_switch_remove(port->remote->sw);
538                         port->remote = NULL;
539                         if (port->dual_link_port)
540                                 port->dual_link_port->remote = NULL;
541                 } else if (port->xdomain) {
542                         struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
543
544                         tb_port_dbg(port, "xdomain unplugged\n");
545                         /*
546                          * Service drivers are unbound during
547                          * tb_xdomain_remove() so setting XDomain as
548                          * unplugged here prevents deadlock if they call
549                          * tb_xdomain_disable_paths(). We will tear down
550                          * the path below.
551                          */
552                         xd->is_unplugged = true;
553                         tb_xdomain_remove(xd);
554                         port->xdomain = NULL;
555                         __tb_disconnect_xdomain_paths(tb, xd);
556                         tb_xdomain_put(xd);
557                 } else if (tb_port_is_dpout(port)) {
558                         tb_teardown_dp(tb, port);
559                 } else {
560                         tb_port_dbg(port,
561                                    "got unplug event for disconnected port, ignoring\n");
562                 }
563         } else if (port->remote) {
564                 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
565         } else {
566                 if (tb_port_is_null(port)) {
567                         tb_port_dbg(port, "hotplug: scanning\n");
568                         tb_scan_port(port);
569                         if (!port->remote)
570                                 tb_port_dbg(port, "hotplug: no switch found\n");
571                 } else if (tb_port_is_dpout(port)) {
572                         tb_tunnel_dp(tb, port);
573                 }
574         }
575
576 put_sw:
577         tb_switch_put(sw);
578 out:
579         mutex_unlock(&tb->lock);
580         kfree(ev);
581 }
582
583 /**
584  * tb_schedule_hotplug_handler() - callback function for the control channel
585  *
586  * Delegates to tb_handle_hotplug.
587  */
588 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
589                             const void *buf, size_t size)
590 {
591         const struct cfg_event_pkg *pkg = buf;
592         u64 route;
593
594         if (type != TB_CFG_PKG_EVENT) {
595                 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
596                 return;
597         }
598
599         route = tb_cfg_get_route(&pkg->header);
600
601         if (tb_cfg_error(tb->ctl, route, pkg->port,
602                          TB_CFG_ERROR_ACK_PLUG_EVENT)) {
603                 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
604                         pkg->port);
605         }
606
607         tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
608 }
609
610 static void tb_stop(struct tb *tb)
611 {
612         struct tb_cm *tcm = tb_priv(tb);
613         struct tb_tunnel *tunnel;
614         struct tb_tunnel *n;
615
616         /* tunnels are only present after everything has been initialized */
617         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
618                 /*
619                  * DMA tunnels require the driver to be functional so we
620                  * tear them down. Other protocol tunnels can be left
621                  * intact.
622                  */
623                 if (tb_tunnel_is_dma(tunnel))
624                         tb_tunnel_deactivate(tunnel);
625                 tb_tunnel_free(tunnel);
626         }
627         tb_switch_remove(tb->root_switch);
628         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
629 }
630
631 static int tb_scan_finalize_switch(struct device *dev, void *data)
632 {
633         if (tb_is_switch(dev)) {
634                 struct tb_switch *sw = tb_to_switch(dev);
635
636                 /*
637                  * If we found that the switch was already setup by the
638                  * boot firmware, mark it as authorized now before we
639                  * send uevent to userspace.
640                  */
641                 if (sw->boot)
642                         sw->authorized = 1;
643
644                 dev_set_uevent_suppress(dev, false);
645                 kobject_uevent(&dev->kobj, KOBJ_ADD);
646                 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
647         }
648
649         return 0;
650 }
651
652 static int tb_start(struct tb *tb)
653 {
654         struct tb_cm *tcm = tb_priv(tb);
655         int ret;
656
657         tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
658         if (IS_ERR(tb->root_switch))
659                 return PTR_ERR(tb->root_switch);
660
661         /*
662          * ICM firmware upgrade needs running firmware and in native
663          * mode that is not available so disable firmware upgrade of the
664          * root switch.
665          */
666         tb->root_switch->no_nvm_upgrade = true;
667
668         ret = tb_switch_configure(tb->root_switch);
669         if (ret) {
670                 tb_switch_put(tb->root_switch);
671                 return ret;
672         }
673
674         /* Announce the switch to the world */
675         ret = tb_switch_add(tb->root_switch);
676         if (ret) {
677                 tb_switch_put(tb->root_switch);
678                 return ret;
679         }
680
681         /* Full scan to discover devices added before the driver was loaded. */
682         tb_scan_switch(tb->root_switch);
683         /* Find out tunnels created by the boot firmware */
684         tb_discover_tunnels(tb->root_switch);
685         /* Make the discovered switches available to the userspace */
686         device_for_each_child(&tb->root_switch->dev, NULL,
687                               tb_scan_finalize_switch);
688
689         /* Allow tb_handle_hotplug to progress events */
690         tcm->hotplug_active = true;
691         return 0;
692 }
693
694 static int tb_suspend_noirq(struct tb *tb)
695 {
696         struct tb_cm *tcm = tb_priv(tb);
697
698         tb_dbg(tb, "suspending...\n");
699         tb_switch_suspend(tb->root_switch);
700         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
701         tb_dbg(tb, "suspend finished\n");
702
703         return 0;
704 }
705
706 static int tb_resume_noirq(struct tb *tb)
707 {
708         struct tb_cm *tcm = tb_priv(tb);
709         struct tb_tunnel *tunnel, *n;
710
711         tb_dbg(tb, "resuming...\n");
712
713         /* remove any pci devices the firmware might have setup */
714         tb_switch_reset(tb, 0);
715
716         tb_switch_resume(tb->root_switch);
717         tb_free_invalid_tunnels(tb);
718         tb_free_unplugged_children(tb->root_switch);
719         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
720                 tb_tunnel_restart(tunnel);
721         if (!list_empty(&tcm->tunnel_list)) {
722                 /*
723                  * the pcie links need some time to get going.
724                  * 100ms works for me...
725                  */
726                 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
727                 msleep(100);
728         }
729          /* Allow tb_handle_hotplug to progress events */
730         tcm->hotplug_active = true;
731         tb_dbg(tb, "resume finished\n");
732
733         return 0;
734 }
735
736 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
737 {
738         struct tb_port *port;
739         int ret = 0;
740
741         tb_switch_for_each_port(sw, port) {
742                 if (tb_is_upstream_port(port))
743                         continue;
744                 if (port->xdomain && port->xdomain->is_unplugged) {
745                         tb_xdomain_remove(port->xdomain);
746                         port->xdomain = NULL;
747                         ret++;
748                 } else if (port->remote) {
749                         ret += tb_free_unplugged_xdomains(port->remote->sw);
750                 }
751         }
752
753         return ret;
754 }
755
756 static void tb_complete(struct tb *tb)
757 {
758         /*
759          * Release any unplugged XDomains and if there is a case where
760          * another domain is swapped in place of unplugged XDomain we
761          * need to run another rescan.
762          */
763         mutex_lock(&tb->lock);
764         if (tb_free_unplugged_xdomains(tb->root_switch))
765                 tb_scan_switch(tb->root_switch);
766         mutex_unlock(&tb->lock);
767 }
768
769 static const struct tb_cm_ops tb_cm_ops = {
770         .start = tb_start,
771         .stop = tb_stop,
772         .suspend_noirq = tb_suspend_noirq,
773         .resume_noirq = tb_resume_noirq,
774         .complete = tb_complete,
775         .handle_event = tb_handle_event,
776         .approve_switch = tb_tunnel_pci,
777         .approve_xdomain_paths = tb_approve_xdomain_paths,
778         .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
779 };
780
781 struct tb *tb_probe(struct tb_nhi *nhi)
782 {
783         struct tb_cm *tcm;
784         struct tb *tb;
785
786         if (!x86_apple_machine)
787                 return NULL;
788
789         tb = tb_domain_alloc(nhi, sizeof(*tcm));
790         if (!tb)
791                 return NULL;
792
793         tb->security_level = TB_SECURITY_USER;
794         tb->cm_ops = &tb_cm_ops;
795
796         tcm = tb_priv(tb);
797         INIT_LIST_HEAD(&tcm->tunnel_list);
798
799         return tb;
800 }