]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/thunderbolt/tb.c
thunderbolt: Add bandwidth management for Display Port tunnels
[linux.git] / drivers / thunderbolt / tb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - bus logic (NHI independent)
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/platform_data/x86/apple.h>
13
14 #include "tb.h"
15 #include "tb_regs.h"
16 #include "tunnel.h"
17
18 /**
19  * struct tb_cm - Simple Thunderbolt connection manager
20  * @tunnel_list: List of active tunnels
21  * @dp_resources: List of available DP resources for DP tunneling
22  * @hotplug_active: tb_handle_hotplug will stop progressing plug
23  *                  events and exit if this is not set (it needs to
24  *                  acquire the lock one more time). Used to drain wq
25  *                  after cfg has been paused.
26  */
27 struct tb_cm {
28         struct list_head tunnel_list;
29         struct list_head dp_resources;
30         bool hotplug_active;
31 };
32
33 struct tb_hotplug_event {
34         struct work_struct work;
35         struct tb *tb;
36         u64 route;
37         u8 port;
38         bool unplug;
39 };
40
41 static void tb_handle_hotplug(struct work_struct *work);
42
43 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
44 {
45         struct tb_hotplug_event *ev;
46
47         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
48         if (!ev)
49                 return;
50
51         ev->tb = tb;
52         ev->route = route;
53         ev->port = port;
54         ev->unplug = unplug;
55         INIT_WORK(&ev->work, tb_handle_hotplug);
56         queue_work(tb->wq, &ev->work);
57 }
58
59 /* enumeration & hot plug handling */
60
61 static void tb_add_dp_resources(struct tb_switch *sw)
62 {
63         struct tb_cm *tcm = tb_priv(sw->tb);
64         struct tb_port *port;
65
66         tb_switch_for_each_port(sw, port) {
67                 if (!tb_port_is_dpin(port))
68                         continue;
69
70                 if (!tb_switch_query_dp_resource(sw, port))
71                         continue;
72
73                 list_add_tail(&port->list, &tcm->dp_resources);
74                 tb_port_dbg(port, "DP IN resource available\n");
75         }
76 }
77
78 static void tb_remove_dp_resources(struct tb_switch *sw)
79 {
80         struct tb_cm *tcm = tb_priv(sw->tb);
81         struct tb_port *port, *tmp;
82
83         /* Clear children resources first */
84         tb_switch_for_each_port(sw, port) {
85                 if (tb_port_has_remote(port))
86                         tb_remove_dp_resources(port->remote->sw);
87         }
88
89         list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
90                 if (port->sw == sw) {
91                         tb_port_dbg(port, "DP OUT resource unavailable\n");
92                         list_del_init(&port->list);
93                 }
94         }
95 }
96
97 static void tb_discover_tunnels(struct tb_switch *sw)
98 {
99         struct tb *tb = sw->tb;
100         struct tb_cm *tcm = tb_priv(tb);
101         struct tb_port *port;
102
103         tb_switch_for_each_port(sw, port) {
104                 struct tb_tunnel *tunnel = NULL;
105
106                 switch (port->config.type) {
107                 case TB_TYPE_DP_HDMI_IN:
108                         tunnel = tb_tunnel_discover_dp(tb, port);
109                         break;
110
111                 case TB_TYPE_PCIE_DOWN:
112                         tunnel = tb_tunnel_discover_pci(tb, port);
113                         break;
114
115                 default:
116                         break;
117                 }
118
119                 if (!tunnel)
120                         continue;
121
122                 if (tb_tunnel_is_pci(tunnel)) {
123                         struct tb_switch *parent = tunnel->dst_port->sw;
124
125                         while (parent != tunnel->src_port->sw) {
126                                 parent->boot = true;
127                                 parent = tb_switch_parent(parent);
128                         }
129                 }
130
131                 list_add_tail(&tunnel->list, &tcm->tunnel_list);
132         }
133
134         tb_switch_for_each_port(sw, port) {
135                 if (tb_port_has_remote(port))
136                         tb_discover_tunnels(port->remote->sw);
137         }
138 }
139
140 static void tb_scan_xdomain(struct tb_port *port)
141 {
142         struct tb_switch *sw = port->sw;
143         struct tb *tb = sw->tb;
144         struct tb_xdomain *xd;
145         u64 route;
146
147         route = tb_downstream_route(port);
148         xd = tb_xdomain_find_by_route(tb, route);
149         if (xd) {
150                 tb_xdomain_put(xd);
151                 return;
152         }
153
154         xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
155                               NULL);
156         if (xd) {
157                 tb_port_at(route, sw)->xdomain = xd;
158                 tb_xdomain_add(xd);
159         }
160 }
161
162 static void tb_scan_port(struct tb_port *port);
163
164 /**
165  * tb_scan_switch() - scan for and initialize downstream switches
166  */
167 static void tb_scan_switch(struct tb_switch *sw)
168 {
169         struct tb_port *port;
170
171         tb_switch_for_each_port(sw, port)
172                 tb_scan_port(port);
173 }
174
175 /**
176  * tb_scan_port() - check for and initialize switches below port
177  */
178 static void tb_scan_port(struct tb_port *port)
179 {
180         struct tb_cm *tcm = tb_priv(port->sw->tb);
181         struct tb_port *upstream_port;
182         struct tb_switch *sw;
183
184         if (tb_is_upstream_port(port))
185                 return;
186
187         if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
188             !tb_dp_port_is_enabled(port)) {
189                 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
190                 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
191                                  false);
192                 return;
193         }
194
195         if (port->config.type != TB_TYPE_PORT)
196                 return;
197         if (port->dual_link_port && port->link_nr)
198                 return; /*
199                          * Downstream switch is reachable through two ports.
200                          * Only scan on the primary port (link_nr == 0).
201                          */
202         if (tb_wait_for_port(port, false) <= 0)
203                 return;
204         if (port->remote) {
205                 tb_port_dbg(port, "port already has a remote\n");
206                 return;
207         }
208         sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
209                              tb_downstream_route(port));
210         if (IS_ERR(sw)) {
211                 /*
212                  * If there is an error accessing the connected switch
213                  * it may be connected to another domain. Also we allow
214                  * the other domain to be connected to a max depth switch.
215                  */
216                 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
217                         tb_scan_xdomain(port);
218                 return;
219         }
220
221         if (tb_switch_configure(sw)) {
222                 tb_switch_put(sw);
223                 return;
224         }
225
226         /*
227          * If there was previously another domain connected remove it
228          * first.
229          */
230         if (port->xdomain) {
231                 tb_xdomain_remove(port->xdomain);
232                 port->xdomain = NULL;
233         }
234
235         /*
236          * Do not send uevents until we have discovered all existing
237          * tunnels and know which switches were authorized already by
238          * the boot firmware.
239          */
240         if (!tcm->hotplug_active)
241                 dev_set_uevent_suppress(&sw->dev, true);
242
243         if (tb_switch_add(sw)) {
244                 tb_switch_put(sw);
245                 return;
246         }
247
248         /* Link the switches using both links if available */
249         upstream_port = tb_upstream_port(sw);
250         port->remote = upstream_port;
251         upstream_port->remote = port;
252         if (port->dual_link_port && upstream_port->dual_link_port) {
253                 port->dual_link_port->remote = upstream_port->dual_link_port;
254                 upstream_port->dual_link_port->remote = port->dual_link_port;
255         }
256
257         /* Enable lane bonding if supported */
258         if (tb_switch_lane_bonding_enable(sw))
259                 tb_sw_warn(sw, "failed to enable lane bonding\n");
260
261         tb_scan_switch(sw);
262 }
263
264 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
265                                         struct tb_port *src_port,
266                                         struct tb_port *dst_port)
267 {
268         struct tb_cm *tcm = tb_priv(tb);
269         struct tb_tunnel *tunnel;
270
271         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
272                 if (tunnel->type == type &&
273                     ((src_port && src_port == tunnel->src_port) ||
274                      (dst_port && dst_port == tunnel->dst_port))) {
275                         return tunnel;
276                 }
277         }
278
279         return NULL;
280 }
281
282 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
283 {
284         if (!tunnel)
285                 return;
286
287         tb_tunnel_deactivate(tunnel);
288         list_del(&tunnel->list);
289
290         /*
291          * In case of DP tunnel make sure the DP IN resource is deallocated
292          * properly.
293          */
294         if (tb_tunnel_is_dp(tunnel)) {
295                 struct tb_port *in = tunnel->src_port;
296
297                 tb_switch_dealloc_dp_resource(in->sw, in);
298         }
299
300         tb_tunnel_free(tunnel);
301 }
302
303 /**
304  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
305  */
306 static void tb_free_invalid_tunnels(struct tb *tb)
307 {
308         struct tb_cm *tcm = tb_priv(tb);
309         struct tb_tunnel *tunnel;
310         struct tb_tunnel *n;
311
312         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
313                 if (tb_tunnel_is_invalid(tunnel))
314                         tb_deactivate_and_free_tunnel(tunnel);
315         }
316 }
317
318 /**
319  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
320  */
321 static void tb_free_unplugged_children(struct tb_switch *sw)
322 {
323         struct tb_port *port;
324
325         tb_switch_for_each_port(sw, port) {
326                 if (!tb_port_has_remote(port))
327                         continue;
328
329                 if (port->remote->sw->is_unplugged) {
330                         tb_remove_dp_resources(port->remote->sw);
331                         tb_switch_lane_bonding_disable(port->remote->sw);
332                         tb_switch_remove(port->remote->sw);
333                         port->remote = NULL;
334                         if (port->dual_link_port)
335                                 port->dual_link_port->remote = NULL;
336                 } else {
337                         tb_free_unplugged_children(port->remote->sw);
338                 }
339         }
340 }
341
342 /**
343  * tb_find_port() - return the first port of @type on @sw or NULL
344  * @sw: Switch to find the port from
345  * @type: Port type to look for
346  */
347 static struct tb_port *tb_find_port(struct tb_switch *sw,
348                                     enum tb_port_type type)
349 {
350         struct tb_port *port;
351
352         tb_switch_for_each_port(sw, port) {
353                 if (port->config.type == type)
354                         return port;
355         }
356
357         return NULL;
358 }
359
360 /**
361  * tb_find_unused_port() - return the first inactive port on @sw
362  * @sw: Switch to find the port on
363  * @type: Port type to look for
364  */
365 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
366                                            enum tb_port_type type)
367 {
368         struct tb_port *port;
369
370         tb_switch_for_each_port(sw, port) {
371                 if (tb_is_upstream_port(port))
372                         continue;
373                 if (port->config.type != type)
374                         continue;
375                 if (port->cap_adap)
376                         continue;
377                 if (tb_port_is_enabled(port))
378                         continue;
379                 return port;
380         }
381         return NULL;
382 }
383
384 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
385                                          const struct tb_port *port)
386 {
387         /*
388          * To keep plugging devices consistently in the same PCIe
389          * hierarchy, do mapping here for root switch downstream PCIe
390          * ports.
391          */
392         if (!tb_route(sw)) {
393                 int phy_port = tb_phy_port_from_link(port->port);
394                 int index;
395
396                 /*
397                  * Hard-coded Thunderbolt port to PCIe down port mapping
398                  * per controller.
399                  */
400                 if (tb_switch_is_cactus_ridge(sw) ||
401                     tb_switch_is_alpine_ridge(sw))
402                         index = !phy_port ? 6 : 7;
403                 else if (tb_switch_is_falcon_ridge(sw))
404                         index = !phy_port ? 6 : 8;
405                 else if (tb_switch_is_titan_ridge(sw))
406                         index = !phy_port ? 8 : 9;
407                 else
408                         goto out;
409
410                 /* Validate the hard-coding */
411                 if (WARN_ON(index > sw->config.max_port_number))
412                         goto out;
413                 if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index])))
414                         goto out;
415                 if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index])))
416                         goto out;
417
418                 return &sw->ports[index];
419         }
420
421 out:
422         return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
423 }
424
425 static int tb_available_bw(struct tb_cm *tcm, struct tb_port *in,
426                            struct tb_port *out)
427 {
428         struct tb_switch *sw = out->sw;
429         struct tb_tunnel *tunnel;
430         int bw, available_bw = 40000;
431
432         while (sw && sw != in->sw) {
433                 bw = sw->link_speed * sw->link_width * 1000; /* Mb/s */
434                 /* Leave 10% guard band */
435                 bw -= bw / 10;
436
437                 /*
438                  * Check for any active DP tunnels that go through this
439                  * switch and reduce their consumed bandwidth from
440                  * available.
441                  */
442                 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
443                         int consumed_bw;
444
445                         if (!tb_tunnel_switch_on_path(tunnel, sw))
446                                 continue;
447
448                         consumed_bw = tb_tunnel_consumed_bandwidth(tunnel);
449                         if (consumed_bw < 0)
450                                 return consumed_bw;
451
452                         bw -= consumed_bw;
453                 }
454
455                 if (bw < available_bw)
456                         available_bw = bw;
457
458                 sw = tb_switch_parent(sw);
459         }
460
461         return available_bw;
462 }
463
464 static void tb_tunnel_dp(struct tb *tb)
465 {
466         struct tb_cm *tcm = tb_priv(tb);
467         struct tb_port *port, *in, *out;
468         struct tb_tunnel *tunnel;
469         int available_bw;
470
471         /*
472          * Find pair of inactive DP IN and DP OUT adapters and then
473          * establish a DP tunnel between them.
474          */
475         tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
476
477         in = NULL;
478         out = NULL;
479         list_for_each_entry(port, &tcm->dp_resources, list) {
480                 if (tb_port_is_enabled(port)) {
481                         tb_port_dbg(port, "in use\n");
482                         continue;
483                 }
484
485                 tb_port_dbg(port, "available\n");
486
487                 if (!in && tb_port_is_dpin(port))
488                         in = port;
489                 else if (!out && tb_port_is_dpout(port))
490                         out = port;
491         }
492
493         if (!in) {
494                 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
495                 return;
496         }
497         if (!out) {
498                 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
499                 return;
500         }
501
502         if (tb_switch_alloc_dp_resource(in->sw, in)) {
503                 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
504                 return;
505         }
506
507         /* Calculate available bandwidth between in and out */
508         available_bw = tb_available_bw(tcm, in, out);
509         if (available_bw < 0) {
510                 tb_warn(tb, "failed to determine available bandwidth\n");
511                 return;
512         }
513
514         tb_dbg(tb, "available bandwidth for new DP tunnel %u Mb/s\n",
515                available_bw);
516
517         tunnel = tb_tunnel_alloc_dp(tb, in, out, available_bw);
518         if (!tunnel) {
519                 tb_port_dbg(out, "could not allocate DP tunnel\n");
520                 goto dealloc_dp;
521         }
522
523         if (tb_tunnel_activate(tunnel)) {
524                 tb_port_info(out, "DP tunnel activation failed, aborting\n");
525                 tb_tunnel_free(tunnel);
526                 goto dealloc_dp;
527         }
528
529         list_add_tail(&tunnel->list, &tcm->tunnel_list);
530         return;
531
532 dealloc_dp:
533         tb_switch_dealloc_dp_resource(in->sw, in);
534 }
535
536 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
537 {
538         struct tb_port *in, *out;
539         struct tb_tunnel *tunnel;
540
541         if (tb_port_is_dpin(port)) {
542                 tb_port_dbg(port, "DP IN resource unavailable\n");
543                 in = port;
544                 out = NULL;
545         } else {
546                 tb_port_dbg(port, "DP OUT resource unavailable\n");
547                 in = NULL;
548                 out = port;
549         }
550
551         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
552         tb_deactivate_and_free_tunnel(tunnel);
553         list_del_init(&port->list);
554
555         /*
556          * See if there is another DP OUT port that can be used for
557          * to create another tunnel.
558          */
559         tb_tunnel_dp(tb);
560 }
561
562 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
563 {
564         struct tb_cm *tcm = tb_priv(tb);
565         struct tb_port *p;
566
567         if (tb_port_is_enabled(port))
568                 return;
569
570         list_for_each_entry(p, &tcm->dp_resources, list) {
571                 if (p == port)
572                         return;
573         }
574
575         tb_port_dbg(port, "DP %s resource available\n",
576                     tb_port_is_dpin(port) ? "IN" : "OUT");
577         list_add_tail(&port->list, &tcm->dp_resources);
578
579         /* Look for suitable DP IN <-> DP OUT pairs now */
580         tb_tunnel_dp(tb);
581 }
582
583 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
584 {
585         struct tb_port *up, *down, *port;
586         struct tb_cm *tcm = tb_priv(tb);
587         struct tb_switch *parent_sw;
588         struct tb_tunnel *tunnel;
589
590         up = tb_find_port(sw, TB_TYPE_PCIE_UP);
591         if (!up)
592                 return 0;
593
594         /*
595          * Look up available down port. Since we are chaining it should
596          * be found right above this switch.
597          */
598         parent_sw = tb_to_switch(sw->dev.parent);
599         port = tb_port_at(tb_route(sw), parent_sw);
600         down = tb_find_pcie_down(parent_sw, port);
601         if (!down)
602                 return 0;
603
604         tunnel = tb_tunnel_alloc_pci(tb, up, down);
605         if (!tunnel)
606                 return -ENOMEM;
607
608         if (tb_tunnel_activate(tunnel)) {
609                 tb_port_info(up,
610                              "PCIe tunnel activation failed, aborting\n");
611                 tb_tunnel_free(tunnel);
612                 return -EIO;
613         }
614
615         list_add_tail(&tunnel->list, &tcm->tunnel_list);
616         return 0;
617 }
618
619 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
620 {
621         struct tb_cm *tcm = tb_priv(tb);
622         struct tb_port *nhi_port, *dst_port;
623         struct tb_tunnel *tunnel;
624         struct tb_switch *sw;
625
626         sw = tb_to_switch(xd->dev.parent);
627         dst_port = tb_port_at(xd->route, sw);
628         nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI);
629
630         mutex_lock(&tb->lock);
631         tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
632                                      xd->transmit_path, xd->receive_ring,
633                                      xd->receive_path);
634         if (!tunnel) {
635                 mutex_unlock(&tb->lock);
636                 return -ENOMEM;
637         }
638
639         if (tb_tunnel_activate(tunnel)) {
640                 tb_port_info(nhi_port,
641                              "DMA tunnel activation failed, aborting\n");
642                 tb_tunnel_free(tunnel);
643                 mutex_unlock(&tb->lock);
644                 return -EIO;
645         }
646
647         list_add_tail(&tunnel->list, &tcm->tunnel_list);
648         mutex_unlock(&tb->lock);
649         return 0;
650 }
651
652 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
653 {
654         struct tb_port *dst_port;
655         struct tb_tunnel *tunnel;
656         struct tb_switch *sw;
657
658         sw = tb_to_switch(xd->dev.parent);
659         dst_port = tb_port_at(xd->route, sw);
660
661         /*
662          * It is possible that the tunnel was already teared down (in
663          * case of cable disconnect) so it is fine if we cannot find it
664          * here anymore.
665          */
666         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
667         tb_deactivate_and_free_tunnel(tunnel);
668 }
669
670 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
671 {
672         if (!xd->is_unplugged) {
673                 mutex_lock(&tb->lock);
674                 __tb_disconnect_xdomain_paths(tb, xd);
675                 mutex_unlock(&tb->lock);
676         }
677         return 0;
678 }
679
680 /* hotplug handling */
681
682 /**
683  * tb_handle_hotplug() - handle hotplug event
684  *
685  * Executes on tb->wq.
686  */
687 static void tb_handle_hotplug(struct work_struct *work)
688 {
689         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
690         struct tb *tb = ev->tb;
691         struct tb_cm *tcm = tb_priv(tb);
692         struct tb_switch *sw;
693         struct tb_port *port;
694         mutex_lock(&tb->lock);
695         if (!tcm->hotplug_active)
696                 goto out; /* during init, suspend or shutdown */
697
698         sw = tb_switch_find_by_route(tb, ev->route);
699         if (!sw) {
700                 tb_warn(tb,
701                         "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
702                         ev->route, ev->port, ev->unplug);
703                 goto out;
704         }
705         if (ev->port > sw->config.max_port_number) {
706                 tb_warn(tb,
707                         "hotplug event from non existent port %llx:%x (unplug: %d)\n",
708                         ev->route, ev->port, ev->unplug);
709                 goto put_sw;
710         }
711         port = &sw->ports[ev->port];
712         if (tb_is_upstream_port(port)) {
713                 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
714                        ev->route, ev->port, ev->unplug);
715                 goto put_sw;
716         }
717         if (ev->unplug) {
718                 if (tb_port_has_remote(port)) {
719                         tb_port_dbg(port, "switch unplugged\n");
720                         tb_sw_set_unplugged(port->remote->sw);
721                         tb_free_invalid_tunnels(tb);
722                         tb_remove_dp_resources(port->remote->sw);
723                         tb_switch_lane_bonding_disable(port->remote->sw);
724                         tb_switch_remove(port->remote->sw);
725                         port->remote = NULL;
726                         if (port->dual_link_port)
727                                 port->dual_link_port->remote = NULL;
728                         /* Maybe we can create another DP tunnel */
729                         tb_tunnel_dp(tb);
730                 } else if (port->xdomain) {
731                         struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
732
733                         tb_port_dbg(port, "xdomain unplugged\n");
734                         /*
735                          * Service drivers are unbound during
736                          * tb_xdomain_remove() so setting XDomain as
737                          * unplugged here prevents deadlock if they call
738                          * tb_xdomain_disable_paths(). We will tear down
739                          * the path below.
740                          */
741                         xd->is_unplugged = true;
742                         tb_xdomain_remove(xd);
743                         port->xdomain = NULL;
744                         __tb_disconnect_xdomain_paths(tb, xd);
745                         tb_xdomain_put(xd);
746                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
747                         tb_dp_resource_unavailable(tb, port);
748                 } else {
749                         tb_port_dbg(port,
750                                    "got unplug event for disconnected port, ignoring\n");
751                 }
752         } else if (port->remote) {
753                 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
754         } else {
755                 if (tb_port_is_null(port)) {
756                         tb_port_dbg(port, "hotplug: scanning\n");
757                         tb_scan_port(port);
758                         if (!port->remote)
759                                 tb_port_dbg(port, "hotplug: no switch found\n");
760                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
761                         tb_dp_resource_available(tb, port);
762                 }
763         }
764
765 put_sw:
766         tb_switch_put(sw);
767 out:
768         mutex_unlock(&tb->lock);
769         kfree(ev);
770 }
771
772 /**
773  * tb_schedule_hotplug_handler() - callback function for the control channel
774  *
775  * Delegates to tb_handle_hotplug.
776  */
777 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
778                             const void *buf, size_t size)
779 {
780         const struct cfg_event_pkg *pkg = buf;
781         u64 route;
782
783         if (type != TB_CFG_PKG_EVENT) {
784                 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
785                 return;
786         }
787
788         route = tb_cfg_get_route(&pkg->header);
789
790         if (tb_cfg_error(tb->ctl, route, pkg->port,
791                          TB_CFG_ERROR_ACK_PLUG_EVENT)) {
792                 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
793                         pkg->port);
794         }
795
796         tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
797 }
798
799 static void tb_stop(struct tb *tb)
800 {
801         struct tb_cm *tcm = tb_priv(tb);
802         struct tb_tunnel *tunnel;
803         struct tb_tunnel *n;
804
805         /* tunnels are only present after everything has been initialized */
806         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
807                 /*
808                  * DMA tunnels require the driver to be functional so we
809                  * tear them down. Other protocol tunnels can be left
810                  * intact.
811                  */
812                 if (tb_tunnel_is_dma(tunnel))
813                         tb_tunnel_deactivate(tunnel);
814                 tb_tunnel_free(tunnel);
815         }
816         tb_switch_remove(tb->root_switch);
817         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
818 }
819
820 static int tb_scan_finalize_switch(struct device *dev, void *data)
821 {
822         if (tb_is_switch(dev)) {
823                 struct tb_switch *sw = tb_to_switch(dev);
824
825                 /*
826                  * If we found that the switch was already setup by the
827                  * boot firmware, mark it as authorized now before we
828                  * send uevent to userspace.
829                  */
830                 if (sw->boot)
831                         sw->authorized = 1;
832
833                 dev_set_uevent_suppress(dev, false);
834                 kobject_uevent(&dev->kobj, KOBJ_ADD);
835                 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
836         }
837
838         return 0;
839 }
840
841 static int tb_start(struct tb *tb)
842 {
843         struct tb_cm *tcm = tb_priv(tb);
844         int ret;
845
846         tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
847         if (IS_ERR(tb->root_switch))
848                 return PTR_ERR(tb->root_switch);
849
850         /*
851          * ICM firmware upgrade needs running firmware and in native
852          * mode that is not available so disable firmware upgrade of the
853          * root switch.
854          */
855         tb->root_switch->no_nvm_upgrade = true;
856
857         ret = tb_switch_configure(tb->root_switch);
858         if (ret) {
859                 tb_switch_put(tb->root_switch);
860                 return ret;
861         }
862
863         /* Announce the switch to the world */
864         ret = tb_switch_add(tb->root_switch);
865         if (ret) {
866                 tb_switch_put(tb->root_switch);
867                 return ret;
868         }
869
870         /* Full scan to discover devices added before the driver was loaded. */
871         tb_scan_switch(tb->root_switch);
872         /* Find out tunnels created by the boot firmware */
873         tb_discover_tunnels(tb->root_switch);
874         /* Add DP IN resources for the root switch */
875         tb_add_dp_resources(tb->root_switch);
876         /* Make the discovered switches available to the userspace */
877         device_for_each_child(&tb->root_switch->dev, NULL,
878                               tb_scan_finalize_switch);
879
880         /* Allow tb_handle_hotplug to progress events */
881         tcm->hotplug_active = true;
882         return 0;
883 }
884
885 static int tb_suspend_noirq(struct tb *tb)
886 {
887         struct tb_cm *tcm = tb_priv(tb);
888
889         tb_dbg(tb, "suspending...\n");
890         tb_switch_suspend(tb->root_switch);
891         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
892         tb_dbg(tb, "suspend finished\n");
893
894         return 0;
895 }
896
897 static void tb_restore_children(struct tb_switch *sw)
898 {
899         struct tb_port *port;
900
901         tb_switch_for_each_port(sw, port) {
902                 if (!tb_port_has_remote(port))
903                         continue;
904
905                 if (tb_switch_lane_bonding_enable(port->remote->sw))
906                         dev_warn(&sw->dev, "failed to restore lane bonding\n");
907
908                 tb_restore_children(port->remote->sw);
909         }
910 }
911
912 static int tb_resume_noirq(struct tb *tb)
913 {
914         struct tb_cm *tcm = tb_priv(tb);
915         struct tb_tunnel *tunnel, *n;
916
917         tb_dbg(tb, "resuming...\n");
918
919         /* remove any pci devices the firmware might have setup */
920         tb_switch_reset(tb, 0);
921
922         tb_switch_resume(tb->root_switch);
923         tb_free_invalid_tunnels(tb);
924         tb_free_unplugged_children(tb->root_switch);
925         tb_restore_children(tb->root_switch);
926         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
927                 tb_tunnel_restart(tunnel);
928         if (!list_empty(&tcm->tunnel_list)) {
929                 /*
930                  * the pcie links need some time to get going.
931                  * 100ms works for me...
932                  */
933                 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
934                 msleep(100);
935         }
936          /* Allow tb_handle_hotplug to progress events */
937         tcm->hotplug_active = true;
938         tb_dbg(tb, "resume finished\n");
939
940         return 0;
941 }
942
943 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
944 {
945         struct tb_port *port;
946         int ret = 0;
947
948         tb_switch_for_each_port(sw, port) {
949                 if (tb_is_upstream_port(port))
950                         continue;
951                 if (port->xdomain && port->xdomain->is_unplugged) {
952                         tb_xdomain_remove(port->xdomain);
953                         port->xdomain = NULL;
954                         ret++;
955                 } else if (port->remote) {
956                         ret += tb_free_unplugged_xdomains(port->remote->sw);
957                 }
958         }
959
960         return ret;
961 }
962
963 static void tb_complete(struct tb *tb)
964 {
965         /*
966          * Release any unplugged XDomains and if there is a case where
967          * another domain is swapped in place of unplugged XDomain we
968          * need to run another rescan.
969          */
970         mutex_lock(&tb->lock);
971         if (tb_free_unplugged_xdomains(tb->root_switch))
972                 tb_scan_switch(tb->root_switch);
973         mutex_unlock(&tb->lock);
974 }
975
976 static const struct tb_cm_ops tb_cm_ops = {
977         .start = tb_start,
978         .stop = tb_stop,
979         .suspend_noirq = tb_suspend_noirq,
980         .resume_noirq = tb_resume_noirq,
981         .complete = tb_complete,
982         .handle_event = tb_handle_event,
983         .approve_switch = tb_tunnel_pci,
984         .approve_xdomain_paths = tb_approve_xdomain_paths,
985         .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
986 };
987
988 struct tb *tb_probe(struct tb_nhi *nhi)
989 {
990         struct tb_cm *tcm;
991         struct tb *tb;
992
993         if (!x86_apple_machine)
994                 return NULL;
995
996         tb = tb_domain_alloc(nhi, sizeof(*tcm));
997         if (!tb)
998                 return NULL;
999
1000         tb->security_level = TB_SECURITY_USER;
1001         tb->cm_ops = &tb_cm_ops;
1002
1003         tcm = tb_priv(tb);
1004         INIT_LIST_HEAD(&tcm->tunnel_list);
1005         INIT_LIST_HEAD(&tcm->dp_resources);
1006
1007         return tb;
1008 }