]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/thunderbolt/tb.c
thunderbolt: Add Display Port adapter pairing and resource management
[linux.git] / drivers / thunderbolt / tb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - bus logic (NHI independent)
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/platform_data/x86/apple.h>
13
14 #include "tb.h"
15 #include "tb_regs.h"
16 #include "tunnel.h"
17
18 /**
19  * struct tb_cm - Simple Thunderbolt connection manager
20  * @tunnel_list: List of active tunnels
21  * @dp_resources: List of available DP resources for DP tunneling
22  * @hotplug_active: tb_handle_hotplug will stop progressing plug
23  *                  events and exit if this is not set (it needs to
24  *                  acquire the lock one more time). Used to drain wq
25  *                  after cfg has been paused.
26  */
27 struct tb_cm {
28         struct list_head tunnel_list;
29         struct list_head dp_resources;
30         bool hotplug_active;
31 };
32
33 struct tb_hotplug_event {
34         struct work_struct work;
35         struct tb *tb;
36         u64 route;
37         u8 port;
38         bool unplug;
39 };
40
41 static void tb_handle_hotplug(struct work_struct *work);
42
43 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
44 {
45         struct tb_hotplug_event *ev;
46
47         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
48         if (!ev)
49                 return;
50
51         ev->tb = tb;
52         ev->route = route;
53         ev->port = port;
54         ev->unplug = unplug;
55         INIT_WORK(&ev->work, tb_handle_hotplug);
56         queue_work(tb->wq, &ev->work);
57 }
58
59 /* enumeration & hot plug handling */
60
61 static void tb_add_dp_resources(struct tb_switch *sw)
62 {
63         struct tb_cm *tcm = tb_priv(sw->tb);
64         struct tb_port *port;
65
66         tb_switch_for_each_port(sw, port) {
67                 if (!tb_port_is_dpin(port))
68                         continue;
69
70                 if (!tb_switch_query_dp_resource(sw, port))
71                         continue;
72
73                 list_add_tail(&port->list, &tcm->dp_resources);
74                 tb_port_dbg(port, "DP IN resource available\n");
75         }
76 }
77
78 static void tb_remove_dp_resources(struct tb_switch *sw)
79 {
80         struct tb_cm *tcm = tb_priv(sw->tb);
81         struct tb_port *port, *tmp;
82
83         /* Clear children resources first */
84         tb_switch_for_each_port(sw, port) {
85                 if (tb_port_has_remote(port))
86                         tb_remove_dp_resources(port->remote->sw);
87         }
88
89         list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
90                 if (port->sw == sw) {
91                         tb_port_dbg(port, "DP OUT resource unavailable\n");
92                         list_del_init(&port->list);
93                 }
94         }
95 }
96
97 static void tb_discover_tunnels(struct tb_switch *sw)
98 {
99         struct tb *tb = sw->tb;
100         struct tb_cm *tcm = tb_priv(tb);
101         struct tb_port *port;
102
103         tb_switch_for_each_port(sw, port) {
104                 struct tb_tunnel *tunnel = NULL;
105
106                 switch (port->config.type) {
107                 case TB_TYPE_DP_HDMI_IN:
108                         tunnel = tb_tunnel_discover_dp(tb, port);
109                         break;
110
111                 case TB_TYPE_PCIE_DOWN:
112                         tunnel = tb_tunnel_discover_pci(tb, port);
113                         break;
114
115                 default:
116                         break;
117                 }
118
119                 if (!tunnel)
120                         continue;
121
122                 if (tb_tunnel_is_pci(tunnel)) {
123                         struct tb_switch *parent = tunnel->dst_port->sw;
124
125                         while (parent != tunnel->src_port->sw) {
126                                 parent->boot = true;
127                                 parent = tb_switch_parent(parent);
128                         }
129                 }
130
131                 list_add_tail(&tunnel->list, &tcm->tunnel_list);
132         }
133
134         tb_switch_for_each_port(sw, port) {
135                 if (tb_port_has_remote(port))
136                         tb_discover_tunnels(port->remote->sw);
137         }
138 }
139
140 static void tb_scan_xdomain(struct tb_port *port)
141 {
142         struct tb_switch *sw = port->sw;
143         struct tb *tb = sw->tb;
144         struct tb_xdomain *xd;
145         u64 route;
146
147         route = tb_downstream_route(port);
148         xd = tb_xdomain_find_by_route(tb, route);
149         if (xd) {
150                 tb_xdomain_put(xd);
151                 return;
152         }
153
154         xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
155                               NULL);
156         if (xd) {
157                 tb_port_at(route, sw)->xdomain = xd;
158                 tb_xdomain_add(xd);
159         }
160 }
161
162 static void tb_scan_port(struct tb_port *port);
163
164 /**
165  * tb_scan_switch() - scan for and initialize downstream switches
166  */
167 static void tb_scan_switch(struct tb_switch *sw)
168 {
169         struct tb_port *port;
170
171         tb_switch_for_each_port(sw, port)
172                 tb_scan_port(port);
173 }
174
175 /**
176  * tb_scan_port() - check for and initialize switches below port
177  */
178 static void tb_scan_port(struct tb_port *port)
179 {
180         struct tb_cm *tcm = tb_priv(port->sw->tb);
181         struct tb_port *upstream_port;
182         struct tb_switch *sw;
183
184         if (tb_is_upstream_port(port))
185                 return;
186
187         if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
188             !tb_dp_port_is_enabled(port)) {
189                 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
190                 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
191                                  false);
192                 return;
193         }
194
195         if (port->config.type != TB_TYPE_PORT)
196                 return;
197         if (port->dual_link_port && port->link_nr)
198                 return; /*
199                          * Downstream switch is reachable through two ports.
200                          * Only scan on the primary port (link_nr == 0).
201                          */
202         if (tb_wait_for_port(port, false) <= 0)
203                 return;
204         if (port->remote) {
205                 tb_port_dbg(port, "port already has a remote\n");
206                 return;
207         }
208         sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
209                              tb_downstream_route(port));
210         if (IS_ERR(sw)) {
211                 /*
212                  * If there is an error accessing the connected switch
213                  * it may be connected to another domain. Also we allow
214                  * the other domain to be connected to a max depth switch.
215                  */
216                 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
217                         tb_scan_xdomain(port);
218                 return;
219         }
220
221         if (tb_switch_configure(sw)) {
222                 tb_switch_put(sw);
223                 return;
224         }
225
226         /*
227          * If there was previously another domain connected remove it
228          * first.
229          */
230         if (port->xdomain) {
231                 tb_xdomain_remove(port->xdomain);
232                 port->xdomain = NULL;
233         }
234
235         /*
236          * Do not send uevents until we have discovered all existing
237          * tunnels and know which switches were authorized already by
238          * the boot firmware.
239          */
240         if (!tcm->hotplug_active)
241                 dev_set_uevent_suppress(&sw->dev, true);
242
243         if (tb_switch_add(sw)) {
244                 tb_switch_put(sw);
245                 return;
246         }
247
248         /* Link the switches using both links if available */
249         upstream_port = tb_upstream_port(sw);
250         port->remote = upstream_port;
251         upstream_port->remote = port;
252         if (port->dual_link_port && upstream_port->dual_link_port) {
253                 port->dual_link_port->remote = upstream_port->dual_link_port;
254                 upstream_port->dual_link_port->remote = port->dual_link_port;
255         }
256
257         /* Enable lane bonding if supported */
258         if (tb_switch_lane_bonding_enable(sw))
259                 tb_sw_warn(sw, "failed to enable lane bonding\n");
260
261         tb_scan_switch(sw);
262 }
263
264 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
265                                         struct tb_port *src_port,
266                                         struct tb_port *dst_port)
267 {
268         struct tb_cm *tcm = tb_priv(tb);
269         struct tb_tunnel *tunnel;
270
271         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
272                 if (tunnel->type == type &&
273                     ((src_port && src_port == tunnel->src_port) ||
274                      (dst_port && dst_port == tunnel->dst_port))) {
275                         return tunnel;
276                 }
277         }
278
279         return NULL;
280 }
281
282 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
283 {
284         if (!tunnel)
285                 return;
286
287         tb_tunnel_deactivate(tunnel);
288         list_del(&tunnel->list);
289
290         /*
291          * In case of DP tunnel make sure the DP IN resource is deallocated
292          * properly.
293          */
294         if (tb_tunnel_is_dp(tunnel)) {
295                 struct tb_port *in = tunnel->src_port;
296
297                 tb_switch_dealloc_dp_resource(in->sw, in);
298         }
299
300         tb_tunnel_free(tunnel);
301 }
302
303 /**
304  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
305  */
306 static void tb_free_invalid_tunnels(struct tb *tb)
307 {
308         struct tb_cm *tcm = tb_priv(tb);
309         struct tb_tunnel *tunnel;
310         struct tb_tunnel *n;
311
312         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
313                 if (tb_tunnel_is_invalid(tunnel))
314                         tb_deactivate_and_free_tunnel(tunnel);
315         }
316 }
317
318 /**
319  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
320  */
321 static void tb_free_unplugged_children(struct tb_switch *sw)
322 {
323         struct tb_port *port;
324
325         tb_switch_for_each_port(sw, port) {
326                 if (!tb_port_has_remote(port))
327                         continue;
328
329                 if (port->remote->sw->is_unplugged) {
330                         tb_remove_dp_resources(port->remote->sw);
331                         tb_switch_lane_bonding_disable(port->remote->sw);
332                         tb_switch_remove(port->remote->sw);
333                         port->remote = NULL;
334                         if (port->dual_link_port)
335                                 port->dual_link_port->remote = NULL;
336                 } else {
337                         tb_free_unplugged_children(port->remote->sw);
338                 }
339         }
340 }
341
342 /**
343  * tb_find_port() - return the first port of @type on @sw or NULL
344  * @sw: Switch to find the port from
345  * @type: Port type to look for
346  */
347 static struct tb_port *tb_find_port(struct tb_switch *sw,
348                                     enum tb_port_type type)
349 {
350         struct tb_port *port;
351
352         tb_switch_for_each_port(sw, port) {
353                 if (port->config.type == type)
354                         return port;
355         }
356
357         return NULL;
358 }
359
360 /**
361  * tb_find_unused_port() - return the first inactive port on @sw
362  * @sw: Switch to find the port on
363  * @type: Port type to look for
364  */
365 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
366                                            enum tb_port_type type)
367 {
368         struct tb_port *port;
369
370         tb_switch_for_each_port(sw, port) {
371                 if (tb_is_upstream_port(port))
372                         continue;
373                 if (port->config.type != type)
374                         continue;
375                 if (port->cap_adap)
376                         continue;
377                 if (tb_port_is_enabled(port))
378                         continue;
379                 return port;
380         }
381         return NULL;
382 }
383
384 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
385                                          const struct tb_port *port)
386 {
387         /*
388          * To keep plugging devices consistently in the same PCIe
389          * hierarchy, do mapping here for root switch downstream PCIe
390          * ports.
391          */
392         if (!tb_route(sw)) {
393                 int phy_port = tb_phy_port_from_link(port->port);
394                 int index;
395
396                 /*
397                  * Hard-coded Thunderbolt port to PCIe down port mapping
398                  * per controller.
399                  */
400                 if (tb_switch_is_cactus_ridge(sw) ||
401                     tb_switch_is_alpine_ridge(sw))
402                         index = !phy_port ? 6 : 7;
403                 else if (tb_switch_is_falcon_ridge(sw))
404                         index = !phy_port ? 6 : 8;
405                 else if (tb_switch_is_titan_ridge(sw))
406                         index = !phy_port ? 8 : 9;
407                 else
408                         goto out;
409
410                 /* Validate the hard-coding */
411                 if (WARN_ON(index > sw->config.max_port_number))
412                         goto out;
413                 if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index])))
414                         goto out;
415                 if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index])))
416                         goto out;
417
418                 return &sw->ports[index];
419         }
420
421 out:
422         return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
423 }
424
425 static void tb_tunnel_dp(struct tb *tb)
426 {
427         struct tb_cm *tcm = tb_priv(tb);
428         struct tb_port *port, *in, *out;
429         struct tb_tunnel *tunnel;
430
431         /*
432          * Find pair of inactive DP IN and DP OUT adapters and then
433          * establish a DP tunnel between them.
434          */
435         tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
436
437         in = NULL;
438         out = NULL;
439         list_for_each_entry(port, &tcm->dp_resources, list) {
440                 if (tb_port_is_enabled(port)) {
441                         tb_port_dbg(port, "in use\n");
442                         continue;
443                 }
444
445                 tb_port_dbg(port, "available\n");
446
447                 if (!in && tb_port_is_dpin(port))
448                         in = port;
449                 else if (!out && tb_port_is_dpout(port))
450                         out = port;
451         }
452
453         if (!in) {
454                 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
455                 return;
456         }
457         if (!out) {
458                 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
459                 return;
460         }
461
462         if (tb_switch_alloc_dp_resource(in->sw, in)) {
463                 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
464                 return;
465         }
466
467         tunnel = tb_tunnel_alloc_dp(tb, in, out);
468         if (!tunnel) {
469                 tb_port_dbg(out, "could not allocate DP tunnel\n");
470                 goto dealloc_dp;
471         }
472
473         if (tb_tunnel_activate(tunnel)) {
474                 tb_port_info(out, "DP tunnel activation failed, aborting\n");
475                 tb_tunnel_free(tunnel);
476                 goto dealloc_dp;
477         }
478
479         list_add_tail(&tunnel->list, &tcm->tunnel_list);
480         return;
481
482 dealloc_dp:
483         tb_switch_dealloc_dp_resource(in->sw, in);
484 }
485
486 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
487 {
488         struct tb_port *in, *out;
489         struct tb_tunnel *tunnel;
490
491         if (tb_port_is_dpin(port)) {
492                 tb_port_dbg(port, "DP IN resource unavailable\n");
493                 in = port;
494                 out = NULL;
495         } else {
496                 tb_port_dbg(port, "DP OUT resource unavailable\n");
497                 in = NULL;
498                 out = port;
499         }
500
501         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
502         tb_deactivate_and_free_tunnel(tunnel);
503         list_del_init(&port->list);
504
505         /*
506          * See if there is another DP OUT port that can be used for
507          * to create another tunnel.
508          */
509         tb_tunnel_dp(tb);
510 }
511
512 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
513 {
514         struct tb_cm *tcm = tb_priv(tb);
515         struct tb_port *p;
516
517         if (tb_port_is_enabled(port))
518                 return;
519
520         list_for_each_entry(p, &tcm->dp_resources, list) {
521                 if (p == port)
522                         return;
523         }
524
525         tb_port_dbg(port, "DP %s resource available\n",
526                     tb_port_is_dpin(port) ? "IN" : "OUT");
527         list_add_tail(&port->list, &tcm->dp_resources);
528
529         /* Look for suitable DP IN <-> DP OUT pairs now */
530         tb_tunnel_dp(tb);
531 }
532
533 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
534 {
535         struct tb_port *up, *down, *port;
536         struct tb_cm *tcm = tb_priv(tb);
537         struct tb_switch *parent_sw;
538         struct tb_tunnel *tunnel;
539
540         up = tb_find_port(sw, TB_TYPE_PCIE_UP);
541         if (!up)
542                 return 0;
543
544         /*
545          * Look up available down port. Since we are chaining it should
546          * be found right above this switch.
547          */
548         parent_sw = tb_to_switch(sw->dev.parent);
549         port = tb_port_at(tb_route(sw), parent_sw);
550         down = tb_find_pcie_down(parent_sw, port);
551         if (!down)
552                 return 0;
553
554         tunnel = tb_tunnel_alloc_pci(tb, up, down);
555         if (!tunnel)
556                 return -ENOMEM;
557
558         if (tb_tunnel_activate(tunnel)) {
559                 tb_port_info(up,
560                              "PCIe tunnel activation failed, aborting\n");
561                 tb_tunnel_free(tunnel);
562                 return -EIO;
563         }
564
565         list_add_tail(&tunnel->list, &tcm->tunnel_list);
566         return 0;
567 }
568
569 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
570 {
571         struct tb_cm *tcm = tb_priv(tb);
572         struct tb_port *nhi_port, *dst_port;
573         struct tb_tunnel *tunnel;
574         struct tb_switch *sw;
575
576         sw = tb_to_switch(xd->dev.parent);
577         dst_port = tb_port_at(xd->route, sw);
578         nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI);
579
580         mutex_lock(&tb->lock);
581         tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
582                                      xd->transmit_path, xd->receive_ring,
583                                      xd->receive_path);
584         if (!tunnel) {
585                 mutex_unlock(&tb->lock);
586                 return -ENOMEM;
587         }
588
589         if (tb_tunnel_activate(tunnel)) {
590                 tb_port_info(nhi_port,
591                              "DMA tunnel activation failed, aborting\n");
592                 tb_tunnel_free(tunnel);
593                 mutex_unlock(&tb->lock);
594                 return -EIO;
595         }
596
597         list_add_tail(&tunnel->list, &tcm->tunnel_list);
598         mutex_unlock(&tb->lock);
599         return 0;
600 }
601
602 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
603 {
604         struct tb_port *dst_port;
605         struct tb_tunnel *tunnel;
606         struct tb_switch *sw;
607
608         sw = tb_to_switch(xd->dev.parent);
609         dst_port = tb_port_at(xd->route, sw);
610
611         /*
612          * It is possible that the tunnel was already teared down (in
613          * case of cable disconnect) so it is fine if we cannot find it
614          * here anymore.
615          */
616         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
617         tb_deactivate_and_free_tunnel(tunnel);
618 }
619
620 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
621 {
622         if (!xd->is_unplugged) {
623                 mutex_lock(&tb->lock);
624                 __tb_disconnect_xdomain_paths(tb, xd);
625                 mutex_unlock(&tb->lock);
626         }
627         return 0;
628 }
629
630 /* hotplug handling */
631
632 /**
633  * tb_handle_hotplug() - handle hotplug event
634  *
635  * Executes on tb->wq.
636  */
637 static void tb_handle_hotplug(struct work_struct *work)
638 {
639         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
640         struct tb *tb = ev->tb;
641         struct tb_cm *tcm = tb_priv(tb);
642         struct tb_switch *sw;
643         struct tb_port *port;
644         mutex_lock(&tb->lock);
645         if (!tcm->hotplug_active)
646                 goto out; /* during init, suspend or shutdown */
647
648         sw = tb_switch_find_by_route(tb, ev->route);
649         if (!sw) {
650                 tb_warn(tb,
651                         "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
652                         ev->route, ev->port, ev->unplug);
653                 goto out;
654         }
655         if (ev->port > sw->config.max_port_number) {
656                 tb_warn(tb,
657                         "hotplug event from non existent port %llx:%x (unplug: %d)\n",
658                         ev->route, ev->port, ev->unplug);
659                 goto put_sw;
660         }
661         port = &sw->ports[ev->port];
662         if (tb_is_upstream_port(port)) {
663                 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
664                        ev->route, ev->port, ev->unplug);
665                 goto put_sw;
666         }
667         if (ev->unplug) {
668                 if (tb_port_has_remote(port)) {
669                         tb_port_dbg(port, "switch unplugged\n");
670                         tb_sw_set_unplugged(port->remote->sw);
671                         tb_free_invalid_tunnels(tb);
672                         tb_remove_dp_resources(port->remote->sw);
673                         tb_switch_lane_bonding_disable(port->remote->sw);
674                         tb_switch_remove(port->remote->sw);
675                         port->remote = NULL;
676                         if (port->dual_link_port)
677                                 port->dual_link_port->remote = NULL;
678                         /* Maybe we can create another DP tunnel */
679                         tb_tunnel_dp(tb);
680                 } else if (port->xdomain) {
681                         struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
682
683                         tb_port_dbg(port, "xdomain unplugged\n");
684                         /*
685                          * Service drivers are unbound during
686                          * tb_xdomain_remove() so setting XDomain as
687                          * unplugged here prevents deadlock if they call
688                          * tb_xdomain_disable_paths(). We will tear down
689                          * the path below.
690                          */
691                         xd->is_unplugged = true;
692                         tb_xdomain_remove(xd);
693                         port->xdomain = NULL;
694                         __tb_disconnect_xdomain_paths(tb, xd);
695                         tb_xdomain_put(xd);
696                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
697                         tb_dp_resource_unavailable(tb, port);
698                 } else {
699                         tb_port_dbg(port,
700                                    "got unplug event for disconnected port, ignoring\n");
701                 }
702         } else if (port->remote) {
703                 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
704         } else {
705                 if (tb_port_is_null(port)) {
706                         tb_port_dbg(port, "hotplug: scanning\n");
707                         tb_scan_port(port);
708                         if (!port->remote)
709                                 tb_port_dbg(port, "hotplug: no switch found\n");
710                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
711                         tb_dp_resource_available(tb, port);
712                 }
713         }
714
715 put_sw:
716         tb_switch_put(sw);
717 out:
718         mutex_unlock(&tb->lock);
719         kfree(ev);
720 }
721
722 /**
723  * tb_schedule_hotplug_handler() - callback function for the control channel
724  *
725  * Delegates to tb_handle_hotplug.
726  */
727 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
728                             const void *buf, size_t size)
729 {
730         const struct cfg_event_pkg *pkg = buf;
731         u64 route;
732
733         if (type != TB_CFG_PKG_EVENT) {
734                 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
735                 return;
736         }
737
738         route = tb_cfg_get_route(&pkg->header);
739
740         if (tb_cfg_error(tb->ctl, route, pkg->port,
741                          TB_CFG_ERROR_ACK_PLUG_EVENT)) {
742                 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
743                         pkg->port);
744         }
745
746         tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
747 }
748
749 static void tb_stop(struct tb *tb)
750 {
751         struct tb_cm *tcm = tb_priv(tb);
752         struct tb_tunnel *tunnel;
753         struct tb_tunnel *n;
754
755         /* tunnels are only present after everything has been initialized */
756         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
757                 /*
758                  * DMA tunnels require the driver to be functional so we
759                  * tear them down. Other protocol tunnels can be left
760                  * intact.
761                  */
762                 if (tb_tunnel_is_dma(tunnel))
763                         tb_tunnel_deactivate(tunnel);
764                 tb_tunnel_free(tunnel);
765         }
766         tb_switch_remove(tb->root_switch);
767         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
768 }
769
770 static int tb_scan_finalize_switch(struct device *dev, void *data)
771 {
772         if (tb_is_switch(dev)) {
773                 struct tb_switch *sw = tb_to_switch(dev);
774
775                 /*
776                  * If we found that the switch was already setup by the
777                  * boot firmware, mark it as authorized now before we
778                  * send uevent to userspace.
779                  */
780                 if (sw->boot)
781                         sw->authorized = 1;
782
783                 dev_set_uevent_suppress(dev, false);
784                 kobject_uevent(&dev->kobj, KOBJ_ADD);
785                 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
786         }
787
788         return 0;
789 }
790
791 static int tb_start(struct tb *tb)
792 {
793         struct tb_cm *tcm = tb_priv(tb);
794         int ret;
795
796         tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
797         if (IS_ERR(tb->root_switch))
798                 return PTR_ERR(tb->root_switch);
799
800         /*
801          * ICM firmware upgrade needs running firmware and in native
802          * mode that is not available so disable firmware upgrade of the
803          * root switch.
804          */
805         tb->root_switch->no_nvm_upgrade = true;
806
807         ret = tb_switch_configure(tb->root_switch);
808         if (ret) {
809                 tb_switch_put(tb->root_switch);
810                 return ret;
811         }
812
813         /* Announce the switch to the world */
814         ret = tb_switch_add(tb->root_switch);
815         if (ret) {
816                 tb_switch_put(tb->root_switch);
817                 return ret;
818         }
819
820         /* Full scan to discover devices added before the driver was loaded. */
821         tb_scan_switch(tb->root_switch);
822         /* Find out tunnels created by the boot firmware */
823         tb_discover_tunnels(tb->root_switch);
824         /* Add DP IN resources for the root switch */
825         tb_add_dp_resources(tb->root_switch);
826         /* Make the discovered switches available to the userspace */
827         device_for_each_child(&tb->root_switch->dev, NULL,
828                               tb_scan_finalize_switch);
829
830         /* Allow tb_handle_hotplug to progress events */
831         tcm->hotplug_active = true;
832         return 0;
833 }
834
835 static int tb_suspend_noirq(struct tb *tb)
836 {
837         struct tb_cm *tcm = tb_priv(tb);
838
839         tb_dbg(tb, "suspending...\n");
840         tb_switch_suspend(tb->root_switch);
841         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
842         tb_dbg(tb, "suspend finished\n");
843
844         return 0;
845 }
846
847 static void tb_restore_children(struct tb_switch *sw)
848 {
849         struct tb_port *port;
850
851         tb_switch_for_each_port(sw, port) {
852                 if (!tb_port_has_remote(port))
853                         continue;
854
855                 if (tb_switch_lane_bonding_enable(port->remote->sw))
856                         dev_warn(&sw->dev, "failed to restore lane bonding\n");
857
858                 tb_restore_children(port->remote->sw);
859         }
860 }
861
862 static int tb_resume_noirq(struct tb *tb)
863 {
864         struct tb_cm *tcm = tb_priv(tb);
865         struct tb_tunnel *tunnel, *n;
866
867         tb_dbg(tb, "resuming...\n");
868
869         /* remove any pci devices the firmware might have setup */
870         tb_switch_reset(tb, 0);
871
872         tb_switch_resume(tb->root_switch);
873         tb_free_invalid_tunnels(tb);
874         tb_free_unplugged_children(tb->root_switch);
875         tb_restore_children(tb->root_switch);
876         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
877                 tb_tunnel_restart(tunnel);
878         if (!list_empty(&tcm->tunnel_list)) {
879                 /*
880                  * the pcie links need some time to get going.
881                  * 100ms works for me...
882                  */
883                 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
884                 msleep(100);
885         }
886          /* Allow tb_handle_hotplug to progress events */
887         tcm->hotplug_active = true;
888         tb_dbg(tb, "resume finished\n");
889
890         return 0;
891 }
892
893 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
894 {
895         struct tb_port *port;
896         int ret = 0;
897
898         tb_switch_for_each_port(sw, port) {
899                 if (tb_is_upstream_port(port))
900                         continue;
901                 if (port->xdomain && port->xdomain->is_unplugged) {
902                         tb_xdomain_remove(port->xdomain);
903                         port->xdomain = NULL;
904                         ret++;
905                 } else if (port->remote) {
906                         ret += tb_free_unplugged_xdomains(port->remote->sw);
907                 }
908         }
909
910         return ret;
911 }
912
913 static void tb_complete(struct tb *tb)
914 {
915         /*
916          * Release any unplugged XDomains and if there is a case where
917          * another domain is swapped in place of unplugged XDomain we
918          * need to run another rescan.
919          */
920         mutex_lock(&tb->lock);
921         if (tb_free_unplugged_xdomains(tb->root_switch))
922                 tb_scan_switch(tb->root_switch);
923         mutex_unlock(&tb->lock);
924 }
925
926 static const struct tb_cm_ops tb_cm_ops = {
927         .start = tb_start,
928         .stop = tb_stop,
929         .suspend_noirq = tb_suspend_noirq,
930         .resume_noirq = tb_resume_noirq,
931         .complete = tb_complete,
932         .handle_event = tb_handle_event,
933         .approve_switch = tb_tunnel_pci,
934         .approve_xdomain_paths = tb_approve_xdomain_paths,
935         .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
936 };
937
938 struct tb *tb_probe(struct tb_nhi *nhi)
939 {
940         struct tb_cm *tcm;
941         struct tb *tb;
942
943         if (!x86_apple_machine)
944                 return NULL;
945
946         tb = tb_domain_alloc(nhi, sizeof(*tcm));
947         if (!tb)
948                 return NULL;
949
950         tb->security_level = TB_SECURITY_USER;
951         tb->cm_ops = &tb_cm_ops;
952
953         tcm = tb_priv(tb);
954         INIT_LIST_HEAD(&tcm->tunnel_list);
955         INIT_LIST_HEAD(&tcm->dp_resources);
956
957         return tb;
958 }