1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - bus logic (NHI independent)
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/platform_data/x86/apple.h>
19 * struct tb_cm - Simple Thunderbolt connection manager
20 * @tunnel_list: List of active tunnels
21 * @hotplug_active: tb_handle_hotplug will stop progressing plug
22 * events and exit if this is not set (it needs to
23 * acquire the lock one more time). Used to drain wq
24 * after cfg has been paused.
27 struct list_head tunnel_list;
31 struct tb_hotplug_event {
32 struct work_struct work;
39 static void tb_handle_hotplug(struct work_struct *work);
41 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
43 struct tb_hotplug_event *ev;
45 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
53 INIT_WORK(&ev->work, tb_handle_hotplug);
54 queue_work(tb->wq, &ev->work);
57 /* enumeration & hot plug handling */
59 static void tb_discover_tunnels(struct tb_switch *sw)
61 struct tb *tb = sw->tb;
62 struct tb_cm *tcm = tb_priv(tb);
65 tb_switch_for_each_port(sw, port) {
66 struct tb_tunnel *tunnel = NULL;
68 switch (port->config.type) {
69 case TB_TYPE_DP_HDMI_IN:
70 tunnel = tb_tunnel_discover_dp(tb, port);
73 case TB_TYPE_PCIE_DOWN:
74 tunnel = tb_tunnel_discover_pci(tb, port);
84 if (tb_tunnel_is_pci(tunnel)) {
85 struct tb_switch *parent = tunnel->dst_port->sw;
87 while (parent != tunnel->src_port->sw) {
89 parent = tb_switch_parent(parent);
93 list_add_tail(&tunnel->list, &tcm->tunnel_list);
96 tb_switch_for_each_port(sw, port) {
97 if (tb_port_has_remote(port))
98 tb_discover_tunnels(port->remote->sw);
102 static void tb_scan_xdomain(struct tb_port *port)
104 struct tb_switch *sw = port->sw;
105 struct tb *tb = sw->tb;
106 struct tb_xdomain *xd;
109 route = tb_downstream_route(port);
110 xd = tb_xdomain_find_by_route(tb, route);
116 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
119 tb_port_at(route, sw)->xdomain = xd;
124 static void tb_scan_port(struct tb_port *port);
127 * tb_scan_switch() - scan for and initialize downstream switches
129 static void tb_scan_switch(struct tb_switch *sw)
131 struct tb_port *port;
133 tb_switch_for_each_port(sw, port)
138 * tb_scan_port() - check for and initialize switches below port
140 static void tb_scan_port(struct tb_port *port)
142 struct tb_cm *tcm = tb_priv(port->sw->tb);
143 struct tb_port *upstream_port;
144 struct tb_switch *sw;
146 if (tb_is_upstream_port(port))
149 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
150 !tb_dp_port_is_enabled(port)) {
151 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
152 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
157 if (port->config.type != TB_TYPE_PORT)
159 if (port->dual_link_port && port->link_nr)
161 * Downstream switch is reachable through two ports.
162 * Only scan on the primary port (link_nr == 0).
164 if (tb_wait_for_port(port, false) <= 0)
167 tb_port_dbg(port, "port already has a remote\n");
170 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
171 tb_downstream_route(port));
174 * If there is an error accessing the connected switch
175 * it may be connected to another domain. Also we allow
176 * the other domain to be connected to a max depth switch.
178 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
179 tb_scan_xdomain(port);
183 if (tb_switch_configure(sw)) {
189 * If there was previously another domain connected remove it
193 tb_xdomain_remove(port->xdomain);
194 port->xdomain = NULL;
198 * Do not send uevents until we have discovered all existing
199 * tunnels and know which switches were authorized already by
202 if (!tcm->hotplug_active)
203 dev_set_uevent_suppress(&sw->dev, true);
205 if (tb_switch_add(sw)) {
210 /* Link the switches using both links if available */
211 upstream_port = tb_upstream_port(sw);
212 port->remote = upstream_port;
213 upstream_port->remote = port;
214 if (port->dual_link_port && upstream_port->dual_link_port) {
215 port->dual_link_port->remote = upstream_port->dual_link_port;
216 upstream_port->dual_link_port->remote = port->dual_link_port;
219 /* Enable lane bonding if supported */
220 if (tb_switch_lane_bonding_enable(sw))
221 tb_sw_warn(sw, "failed to enable lane bonding\n");
226 static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type,
227 struct tb_port *src_port, struct tb_port *dst_port)
229 struct tb_cm *tcm = tb_priv(tb);
230 struct tb_tunnel *tunnel;
232 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
233 if (tunnel->type == type &&
234 ((src_port && src_port == tunnel->src_port) ||
235 (dst_port && dst_port == tunnel->dst_port))) {
236 tb_tunnel_deactivate(tunnel);
237 list_del(&tunnel->list);
238 tb_tunnel_free(tunnel);
247 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
249 static void tb_free_invalid_tunnels(struct tb *tb)
251 struct tb_cm *tcm = tb_priv(tb);
252 struct tb_tunnel *tunnel;
255 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
256 if (tb_tunnel_is_invalid(tunnel)) {
257 tb_tunnel_deactivate(tunnel);
258 list_del(&tunnel->list);
259 tb_tunnel_free(tunnel);
265 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
267 static void tb_free_unplugged_children(struct tb_switch *sw)
269 struct tb_port *port;
271 tb_switch_for_each_port(sw, port) {
272 if (!tb_port_has_remote(port))
275 if (port->remote->sw->is_unplugged) {
276 tb_switch_lane_bonding_disable(port->remote->sw);
277 tb_switch_remove(port->remote->sw);
279 if (port->dual_link_port)
280 port->dual_link_port->remote = NULL;
282 tb_free_unplugged_children(port->remote->sw);
288 * tb_find_port() - return the first port of @type on @sw or NULL
289 * @sw: Switch to find the port from
290 * @type: Port type to look for
292 static struct tb_port *tb_find_port(struct tb_switch *sw,
293 enum tb_port_type type)
295 struct tb_port *port;
297 tb_switch_for_each_port(sw, port) {
298 if (port->config.type == type)
306 * tb_find_unused_port() - return the first inactive port on @sw
307 * @sw: Switch to find the port on
308 * @type: Port type to look for
310 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
311 enum tb_port_type type)
313 struct tb_port *port;
315 tb_switch_for_each_port(sw, port) {
316 if (tb_is_upstream_port(port))
318 if (port->config.type != type)
322 if (tb_port_is_enabled(port))
329 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
330 const struct tb_port *port)
333 * To keep plugging devices consistently in the same PCIe
334 * hierarchy, do mapping here for root switch downstream PCIe
338 int phy_port = tb_phy_port_from_link(port->port);
342 * Hard-coded Thunderbolt port to PCIe down port mapping
345 if (tb_switch_is_cactus_ridge(sw) ||
346 tb_switch_is_alpine_ridge(sw))
347 index = !phy_port ? 6 : 7;
348 else if (tb_switch_is_falcon_ridge(sw))
349 index = !phy_port ? 6 : 8;
350 else if (tb_switch_is_titan_ridge(sw))
351 index = !phy_port ? 8 : 9;
355 /* Validate the hard-coding */
356 if (WARN_ON(index > sw->config.max_port_number))
358 if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index])))
360 if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index])))
363 return &sw->ports[index];
367 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
370 static int tb_tunnel_dp(struct tb *tb, struct tb_port *out)
372 struct tb_cm *tcm = tb_priv(tb);
373 struct tb_switch *sw = out->sw;
374 struct tb_tunnel *tunnel;
377 if (tb_port_is_enabled(out))
381 sw = tb_to_switch(sw->dev.parent);
384 in = tb_find_unused_port(sw, TB_TYPE_DP_HDMI_IN);
387 tunnel = tb_tunnel_alloc_dp(tb, in, out);
389 tb_port_dbg(out, "DP tunnel allocation failed\n");
393 if (tb_tunnel_activate(tunnel)) {
394 tb_port_info(out, "DP tunnel activation failed, aborting\n");
395 tb_tunnel_free(tunnel);
399 list_add_tail(&tunnel->list, &tcm->tunnel_list);
403 static void tb_teardown_dp(struct tb *tb, struct tb_port *out)
405 tb_free_tunnel(tb, TB_TUNNEL_DP, NULL, out);
408 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
410 struct tb_port *up, *down, *port;
411 struct tb_cm *tcm = tb_priv(tb);
412 struct tb_switch *parent_sw;
413 struct tb_tunnel *tunnel;
415 up = tb_find_port(sw, TB_TYPE_PCIE_UP);
420 * Look up available down port. Since we are chaining it should
421 * be found right above this switch.
423 parent_sw = tb_to_switch(sw->dev.parent);
424 port = tb_port_at(tb_route(sw), parent_sw);
425 down = tb_find_pcie_down(parent_sw, port);
429 tunnel = tb_tunnel_alloc_pci(tb, up, down);
433 if (tb_tunnel_activate(tunnel)) {
435 "PCIe tunnel activation failed, aborting\n");
436 tb_tunnel_free(tunnel);
440 list_add_tail(&tunnel->list, &tcm->tunnel_list);
444 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
446 struct tb_cm *tcm = tb_priv(tb);
447 struct tb_port *nhi_port, *dst_port;
448 struct tb_tunnel *tunnel;
449 struct tb_switch *sw;
451 sw = tb_to_switch(xd->dev.parent);
452 dst_port = tb_port_at(xd->route, sw);
453 nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI);
455 mutex_lock(&tb->lock);
456 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
457 xd->transmit_path, xd->receive_ring,
460 mutex_unlock(&tb->lock);
464 if (tb_tunnel_activate(tunnel)) {
465 tb_port_info(nhi_port,
466 "DMA tunnel activation failed, aborting\n");
467 tb_tunnel_free(tunnel);
468 mutex_unlock(&tb->lock);
472 list_add_tail(&tunnel->list, &tcm->tunnel_list);
473 mutex_unlock(&tb->lock);
477 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
479 struct tb_port *dst_port;
480 struct tb_switch *sw;
482 sw = tb_to_switch(xd->dev.parent);
483 dst_port = tb_port_at(xd->route, sw);
486 * It is possible that the tunnel was already teared down (in
487 * case of cable disconnect) so it is fine if we cannot find it
490 tb_free_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
493 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
495 if (!xd->is_unplugged) {
496 mutex_lock(&tb->lock);
497 __tb_disconnect_xdomain_paths(tb, xd);
498 mutex_unlock(&tb->lock);
503 /* hotplug handling */
506 * tb_handle_hotplug() - handle hotplug event
508 * Executes on tb->wq.
510 static void tb_handle_hotplug(struct work_struct *work)
512 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
513 struct tb *tb = ev->tb;
514 struct tb_cm *tcm = tb_priv(tb);
515 struct tb_switch *sw;
516 struct tb_port *port;
517 mutex_lock(&tb->lock);
518 if (!tcm->hotplug_active)
519 goto out; /* during init, suspend or shutdown */
521 sw = tb_switch_find_by_route(tb, ev->route);
524 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
525 ev->route, ev->port, ev->unplug);
528 if (ev->port > sw->config.max_port_number) {
530 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
531 ev->route, ev->port, ev->unplug);
534 port = &sw->ports[ev->port];
535 if (tb_is_upstream_port(port)) {
536 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
537 ev->route, ev->port, ev->unplug);
541 if (tb_port_has_remote(port)) {
542 tb_port_dbg(port, "switch unplugged\n");
543 tb_sw_set_unplugged(port->remote->sw);
544 tb_free_invalid_tunnels(tb);
545 tb_switch_lane_bonding_disable(port->remote->sw);
546 tb_switch_remove(port->remote->sw);
548 if (port->dual_link_port)
549 port->dual_link_port->remote = NULL;
550 } else if (port->xdomain) {
551 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
553 tb_port_dbg(port, "xdomain unplugged\n");
555 * Service drivers are unbound during
556 * tb_xdomain_remove() so setting XDomain as
557 * unplugged here prevents deadlock if they call
558 * tb_xdomain_disable_paths(). We will tear down
561 xd->is_unplugged = true;
562 tb_xdomain_remove(xd);
563 port->xdomain = NULL;
564 __tb_disconnect_xdomain_paths(tb, xd);
566 } else if (tb_port_is_dpout(port)) {
567 tb_teardown_dp(tb, port);
570 "got unplug event for disconnected port, ignoring\n");
572 } else if (port->remote) {
573 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
575 if (tb_port_is_null(port)) {
576 tb_port_dbg(port, "hotplug: scanning\n");
579 tb_port_dbg(port, "hotplug: no switch found\n");
580 } else if (tb_port_is_dpout(port)) {
581 tb_tunnel_dp(tb, port);
588 mutex_unlock(&tb->lock);
593 * tb_schedule_hotplug_handler() - callback function for the control channel
595 * Delegates to tb_handle_hotplug.
597 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
598 const void *buf, size_t size)
600 const struct cfg_event_pkg *pkg = buf;
603 if (type != TB_CFG_PKG_EVENT) {
604 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
608 route = tb_cfg_get_route(&pkg->header);
610 if (tb_cfg_error(tb->ctl, route, pkg->port,
611 TB_CFG_ERROR_ACK_PLUG_EVENT)) {
612 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
616 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
619 static void tb_stop(struct tb *tb)
621 struct tb_cm *tcm = tb_priv(tb);
622 struct tb_tunnel *tunnel;
625 /* tunnels are only present after everything has been initialized */
626 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
628 * DMA tunnels require the driver to be functional so we
629 * tear them down. Other protocol tunnels can be left
632 if (tb_tunnel_is_dma(tunnel))
633 tb_tunnel_deactivate(tunnel);
634 tb_tunnel_free(tunnel);
636 tb_switch_remove(tb->root_switch);
637 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
640 static int tb_scan_finalize_switch(struct device *dev, void *data)
642 if (tb_is_switch(dev)) {
643 struct tb_switch *sw = tb_to_switch(dev);
646 * If we found that the switch was already setup by the
647 * boot firmware, mark it as authorized now before we
648 * send uevent to userspace.
653 dev_set_uevent_suppress(dev, false);
654 kobject_uevent(&dev->kobj, KOBJ_ADD);
655 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
661 static int tb_start(struct tb *tb)
663 struct tb_cm *tcm = tb_priv(tb);
666 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
667 if (IS_ERR(tb->root_switch))
668 return PTR_ERR(tb->root_switch);
671 * ICM firmware upgrade needs running firmware and in native
672 * mode that is not available so disable firmware upgrade of the
675 tb->root_switch->no_nvm_upgrade = true;
677 ret = tb_switch_configure(tb->root_switch);
679 tb_switch_put(tb->root_switch);
683 /* Announce the switch to the world */
684 ret = tb_switch_add(tb->root_switch);
686 tb_switch_put(tb->root_switch);
690 /* Full scan to discover devices added before the driver was loaded. */
691 tb_scan_switch(tb->root_switch);
692 /* Find out tunnels created by the boot firmware */
693 tb_discover_tunnels(tb->root_switch);
694 /* Make the discovered switches available to the userspace */
695 device_for_each_child(&tb->root_switch->dev, NULL,
696 tb_scan_finalize_switch);
698 /* Allow tb_handle_hotplug to progress events */
699 tcm->hotplug_active = true;
703 static int tb_suspend_noirq(struct tb *tb)
705 struct tb_cm *tcm = tb_priv(tb);
707 tb_dbg(tb, "suspending...\n");
708 tb_switch_suspend(tb->root_switch);
709 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
710 tb_dbg(tb, "suspend finished\n");
715 static void tb_restore_children(struct tb_switch *sw)
717 struct tb_port *port;
719 tb_switch_for_each_port(sw, port) {
720 if (!tb_port_has_remote(port))
723 if (tb_switch_lane_bonding_enable(port->remote->sw))
724 dev_warn(&sw->dev, "failed to restore lane bonding\n");
726 tb_restore_children(port->remote->sw);
730 static int tb_resume_noirq(struct tb *tb)
732 struct tb_cm *tcm = tb_priv(tb);
733 struct tb_tunnel *tunnel, *n;
735 tb_dbg(tb, "resuming...\n");
737 /* remove any pci devices the firmware might have setup */
738 tb_switch_reset(tb, 0);
740 tb_switch_resume(tb->root_switch);
741 tb_free_invalid_tunnels(tb);
742 tb_free_unplugged_children(tb->root_switch);
743 tb_restore_children(tb->root_switch);
744 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
745 tb_tunnel_restart(tunnel);
746 if (!list_empty(&tcm->tunnel_list)) {
748 * the pcie links need some time to get going.
749 * 100ms works for me...
751 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
754 /* Allow tb_handle_hotplug to progress events */
755 tcm->hotplug_active = true;
756 tb_dbg(tb, "resume finished\n");
761 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
763 struct tb_port *port;
766 tb_switch_for_each_port(sw, port) {
767 if (tb_is_upstream_port(port))
769 if (port->xdomain && port->xdomain->is_unplugged) {
770 tb_xdomain_remove(port->xdomain);
771 port->xdomain = NULL;
773 } else if (port->remote) {
774 ret += tb_free_unplugged_xdomains(port->remote->sw);
781 static void tb_complete(struct tb *tb)
784 * Release any unplugged XDomains and if there is a case where
785 * another domain is swapped in place of unplugged XDomain we
786 * need to run another rescan.
788 mutex_lock(&tb->lock);
789 if (tb_free_unplugged_xdomains(tb->root_switch))
790 tb_scan_switch(tb->root_switch);
791 mutex_unlock(&tb->lock);
794 static const struct tb_cm_ops tb_cm_ops = {
797 .suspend_noirq = tb_suspend_noirq,
798 .resume_noirq = tb_resume_noirq,
799 .complete = tb_complete,
800 .handle_event = tb_handle_event,
801 .approve_switch = tb_tunnel_pci,
802 .approve_xdomain_paths = tb_approve_xdomain_paths,
803 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
806 struct tb *tb_probe(struct tb_nhi *nhi)
811 if (!x86_apple_machine)
814 tb = tb_domain_alloc(nhi, sizeof(*tcm));
818 tb->security_level = TB_SECURITY_USER;
819 tb->cm_ops = &tb_cm_ops;
822 INIT_LIST_HEAD(&tcm->tunnel_list);