2 * Internal Thunderbolt Connection Manager. This is a firmware running on
3 * the Thunderbolt host controller performing most of the low-level
6 * Copyright (C) 2017, Intel Corporation
7 * Authors: Michael Jamet <michael.jamet@intel.com>
8 * Mika Westerberg <mika.westerberg@linux.intel.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/delay.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/platform_data/x86/apple.h>
20 #include <linux/sizes.h>
21 #include <linux/slab.h>
22 #include <linux/workqueue.h>
28 #define PCIE2CIO_CMD 0x30
29 #define PCIE2CIO_CMD_TIMEOUT BIT(31)
30 #define PCIE2CIO_CMD_START BIT(30)
31 #define PCIE2CIO_CMD_WRITE BIT(21)
32 #define PCIE2CIO_CMD_CS_MASK GENMASK(20, 19)
33 #define PCIE2CIO_CMD_CS_SHIFT 19
34 #define PCIE2CIO_CMD_PORT_MASK GENMASK(18, 13)
35 #define PCIE2CIO_CMD_PORT_SHIFT 13
37 #define PCIE2CIO_WRDATA 0x34
38 #define PCIE2CIO_RDDATA 0x38
40 #define PHY_PORT_CS1 0x37
41 #define PHY_PORT_CS1_LINK_DISABLE BIT(14)
42 #define PHY_PORT_CS1_LINK_STATE_MASK GENMASK(29, 26)
43 #define PHY_PORT_CS1_LINK_STATE_SHIFT 26
45 #define ICM_TIMEOUT 5000 /* ms */
46 #define ICM_APPROVE_TIMEOUT 10000 /* ms */
47 #define ICM_MAX_LINK 4
48 #define ICM_MAX_DEPTH 6
51 * struct icm - Internal connection manager private data
52 * @request_lock: Makes sure only one message is send to ICM at time
53 * @rescan_work: Work used to rescan the surviving switches after resume
54 * @upstream_port: Pointer to the PCIe upstream port this host
55 * controller is connected. This is only set for systems
56 * where ICM needs to be started manually
57 * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides
58 * (only set when @upstream_port is not %NULL)
59 * @safe_mode: ICM is in safe mode
60 * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported)
61 * @rpm: Does the controller support runtime PM (RTD3)
62 * @is_supported: Checks if we can support ICM on this controller
63 * @get_mode: Read and return the ICM firmware mode (optional)
64 * @get_route: Find a route string for given switch
65 * @save_devices: Ask ICM to save devices to ACL when suspending (optional)
66 * @driver_ready: Send driver ready message to ICM
67 * @device_connected: Handle device connected ICM message
68 * @device_disconnected: Handle device disconnected ICM message
69 * @xdomain_connected - Handle XDomain connected ICM message
70 * @xdomain_disconnected - Handle XDomain disconnected ICM message
73 struct mutex request_lock;
74 struct delayed_work rescan_work;
75 struct pci_dev *upstream_port;
80 bool (*is_supported)(struct tb *tb);
81 int (*get_mode)(struct tb *tb);
82 int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
83 void (*save_devices)(struct tb *tb);
84 int (*driver_ready)(struct tb *tb,
85 enum tb_security_level *security_level,
86 size_t *nboot_acl, bool *rpm);
87 void (*device_connected)(struct tb *tb,
88 const struct icm_pkg_header *hdr);
89 void (*device_disconnected)(struct tb *tb,
90 const struct icm_pkg_header *hdr);
91 void (*xdomain_connected)(struct tb *tb,
92 const struct icm_pkg_header *hdr);
93 void (*xdomain_disconnected)(struct tb *tb,
94 const struct icm_pkg_header *hdr);
97 struct icm_notification {
98 struct work_struct work;
99 struct icm_pkg_header *pkg;
103 struct ep_name_entry {
109 #define EP_NAME_INTEL_VSS 0x10
111 /* Intel Vendor specific structure */
121 #define INTEL_VSS_FLAGS_RTD3 BIT(0)
123 static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size)
125 const void *end = ep_name + size;
127 while (ep_name < end) {
128 const struct ep_name_entry *ep = ep_name;
132 if (ep_name + ep->len > end)
135 if (ep->type == EP_NAME_INTEL_VSS)
136 return (const struct intel_vss *)ep->data;
144 static inline struct tb *icm_to_tb(struct icm *icm)
146 return ((void *)icm - sizeof(struct tb));
149 static inline u8 phy_port_from_route(u64 route, u8 depth)
153 link = depth ? route >> ((depth - 1) * 8) : route;
154 return tb_phy_port_from_link(link);
157 static inline u8 dual_link_from_link(u8 link)
159 return link ? ((link - 1) ^ 0x01) + 1 : 0;
162 static inline u64 get_route(u32 route_hi, u32 route_lo)
164 return (u64)route_hi << 32 | route_lo;
167 static inline u64 get_parent_route(u64 route)
169 int depth = tb_route_length(route);
170 return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0;
173 static bool icm_match(const struct tb_cfg_request *req,
174 const struct ctl_pkg *pkg)
176 const struct icm_pkg_header *res_hdr = pkg->buffer;
177 const struct icm_pkg_header *req_hdr = req->request;
179 if (pkg->frame.eof != req->response_type)
181 if (res_hdr->code != req_hdr->code)
187 static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
189 const struct icm_pkg_header *hdr = pkg->buffer;
191 if (hdr->packet_id < req->npackets) {
192 size_t offset = hdr->packet_id * req->response_size;
194 memcpy(req->response + offset, pkg->buffer, req->response_size);
197 return hdr->packet_id == hdr->total_packets - 1;
200 static int icm_request(struct tb *tb, const void *request, size_t request_size,
201 void *response, size_t response_size, size_t npackets,
202 unsigned int timeout_msec)
204 struct icm *icm = tb_priv(tb);
208 struct tb_cfg_request *req;
209 struct tb_cfg_result res;
211 req = tb_cfg_request_alloc();
215 req->match = icm_match;
216 req->copy = icm_copy;
217 req->request = request;
218 req->request_size = request_size;
219 req->request_type = TB_CFG_PKG_ICM_CMD;
220 req->response = response;
221 req->npackets = npackets;
222 req->response_size = response_size;
223 req->response_type = TB_CFG_PKG_ICM_RESP;
225 mutex_lock(&icm->request_lock);
226 res = tb_cfg_request_sync(tb->ctl, req, timeout_msec);
227 mutex_unlock(&icm->request_lock);
229 tb_cfg_request_put(req);
231 if (res.err != -ETIMEDOUT)
232 return res.err == 1 ? -EIO : res.err;
234 usleep_range(20, 50);
240 static bool icm_fr_is_supported(struct tb *tb)
242 return !x86_apple_machine;
245 static inline int icm_fr_get_switch_index(u32 port)
249 if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT)
252 index = port >> ICM_PORT_INDEX_SHIFT;
253 return index != 0xff ? index : 0;
256 static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
258 struct icm_fr_pkg_get_topology_response *switches, *sw;
259 struct icm_fr_pkg_get_topology request = {
260 .hdr = { .code = ICM_GET_TOPOLOGY },
262 size_t npackets = ICM_GET_TOPOLOGY_PACKETS;
266 switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL);
270 ret = icm_request(tb, &request, sizeof(request), switches,
271 sizeof(*switches), npackets, ICM_TIMEOUT);
276 index = icm_fr_get_switch_index(sw->ports[link]);
282 sw = &switches[index];
283 for (i = 1; i < depth; i++) {
286 if (!(sw->first_data & ICM_SWITCH_USED)) {
291 for (j = 0; j < ARRAY_SIZE(sw->ports); j++) {
292 index = icm_fr_get_switch_index(sw->ports[j]);
293 if (index > sw->switch_index) {
294 sw = &switches[index];
300 *route = get_route(sw->route_hi, sw->route_lo);
307 static void icm_fr_save_devices(struct tb *tb)
309 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
313 icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
314 size_t *nboot_acl, bool *rpm)
316 struct icm_fr_pkg_driver_ready_response reply;
317 struct icm_pkg_driver_ready request = {
318 .hdr.code = ICM_DRIVER_READY,
322 memset(&reply, 0, sizeof(reply));
323 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
329 *security_level = reply.security_level & ICM_FR_SLEVEL_MASK;
334 static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
336 struct icm_fr_pkg_approve_device request;
337 struct icm_fr_pkg_approve_device reply;
340 memset(&request, 0, sizeof(request));
341 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
342 request.hdr.code = ICM_APPROVE_DEVICE;
343 request.connection_id = sw->connection_id;
344 request.connection_key = sw->connection_key;
346 memset(&reply, 0, sizeof(reply));
347 /* Use larger timeout as establishing tunnels can take some time */
348 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
349 1, ICM_APPROVE_TIMEOUT);
353 if (reply.hdr.flags & ICM_FLAGS_ERROR) {
354 tb_warn(tb, "PCIe tunnel creation failed\n");
361 static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
363 struct icm_fr_pkg_add_device_key request;
364 struct icm_fr_pkg_add_device_key_response reply;
367 memset(&request, 0, sizeof(request));
368 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
369 request.hdr.code = ICM_ADD_DEVICE_KEY;
370 request.connection_id = sw->connection_id;
371 request.connection_key = sw->connection_key;
372 memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
374 memset(&reply, 0, sizeof(reply));
375 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
380 if (reply.hdr.flags & ICM_FLAGS_ERROR) {
381 tb_warn(tb, "Adding key to switch failed\n");
388 static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
389 const u8 *challenge, u8 *response)
391 struct icm_fr_pkg_challenge_device request;
392 struct icm_fr_pkg_challenge_device_response reply;
395 memset(&request, 0, sizeof(request));
396 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
397 request.hdr.code = ICM_CHALLENGE_DEVICE;
398 request.connection_id = sw->connection_id;
399 request.connection_key = sw->connection_key;
400 memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
402 memset(&reply, 0, sizeof(reply));
403 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
408 if (reply.hdr.flags & ICM_FLAGS_ERROR)
409 return -EKEYREJECTED;
410 if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
413 memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
418 static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
420 struct icm_fr_pkg_approve_xdomain_response reply;
421 struct icm_fr_pkg_approve_xdomain request;
424 memset(&request, 0, sizeof(request));
425 request.hdr.code = ICM_APPROVE_XDOMAIN;
426 request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link;
427 memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
429 request.transmit_path = xd->transmit_path;
430 request.transmit_ring = xd->transmit_ring;
431 request.receive_path = xd->receive_path;
432 request.receive_ring = xd->receive_ring;
434 memset(&reply, 0, sizeof(reply));
435 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
440 if (reply.hdr.flags & ICM_FLAGS_ERROR)
446 static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
451 phy_port = tb_phy_port_from_link(xd->link);
453 cmd = NHI_MAILBOX_DISCONNECT_PA;
455 cmd = NHI_MAILBOX_DISCONNECT_PB;
457 nhi_mailbox_cmd(tb->nhi, cmd, 1);
458 usleep_range(10, 50);
459 nhi_mailbox_cmd(tb->nhi, cmd, 2);
463 static void add_switch(struct tb_switch *parent_sw, u64 route,
464 const uuid_t *uuid, const u8 *ep_name,
465 size_t ep_name_size, u8 connection_id, u8 connection_key,
466 u8 link, u8 depth, enum tb_security_level security_level,
467 bool authorized, bool boot)
469 const struct intel_vss *vss;
470 struct tb_switch *sw;
472 pm_runtime_get_sync(&parent_sw->dev);
474 sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route);
478 sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
479 sw->connection_id = connection_id;
480 sw->connection_key = connection_key;
483 sw->authorized = authorized;
484 sw->security_level = security_level;
487 vss = parse_intel_vss(ep_name, ep_name_size);
489 sw->rpm = !!(vss->flags & INTEL_VSS_FLAGS_RTD3);
491 /* Link the two switches now */
492 tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
493 tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
495 if (tb_switch_add(sw)) {
496 tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
501 pm_runtime_mark_last_busy(&parent_sw->dev);
502 pm_runtime_put_autosuspend(&parent_sw->dev);
505 static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
506 u64 route, u8 connection_id, u8 connection_key,
507 u8 link, u8 depth, bool boot)
509 /* Disconnect from parent */
510 tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
511 /* Re-connect via updated port*/
512 tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
514 /* Update with the new addressing information */
515 sw->config.route_hi = upper_32_bits(route);
516 sw->config.route_lo = lower_32_bits(route);
517 sw->connection_id = connection_id;
518 sw->connection_key = connection_key;
523 /* This switch still exists */
524 sw->is_unplugged = false;
527 static void remove_switch(struct tb_switch *sw)
529 struct tb_switch *parent_sw;
531 parent_sw = tb_to_switch(sw->dev.parent);
532 tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
533 tb_switch_remove(sw);
536 static void add_xdomain(struct tb_switch *sw, u64 route,
537 const uuid_t *local_uuid, const uuid_t *remote_uuid,
540 struct tb_xdomain *xd;
542 pm_runtime_get_sync(&sw->dev);
544 xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid);
551 tb_port_at(route, sw)->xdomain = xd;
556 pm_runtime_mark_last_busy(&sw->dev);
557 pm_runtime_put_autosuspend(&sw->dev);
560 static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link)
564 xd->is_unplugged = false;
567 static void remove_xdomain(struct tb_xdomain *xd)
569 struct tb_switch *sw;
571 sw = tb_to_switch(xd->dev.parent);
572 tb_port_at(xd->route, sw)->xdomain = NULL;
573 tb_xdomain_remove(xd);
577 icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
579 const struct icm_fr_event_device_connected *pkg =
580 (const struct icm_fr_event_device_connected *)hdr;
581 enum tb_security_level security_level;
582 struct tb_switch *sw, *parent_sw;
583 struct icm *icm = tb_priv(tb);
584 bool authorized = false;
585 struct tb_xdomain *xd;
591 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
592 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
593 ICM_LINK_INFO_DEPTH_SHIFT;
594 authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
595 security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
596 ICM_FLAGS_SLEVEL_SHIFT;
597 boot = pkg->link_info & ICM_LINK_INFO_BOOT;
599 if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
600 tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n",
605 sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
607 u8 phy_port, sw_phy_port;
609 parent_sw = tb_to_switch(sw->dev.parent);
610 sw_phy_port = tb_phy_port_from_link(sw->link);
611 phy_port = tb_phy_port_from_link(link);
614 * On resume ICM will send us connected events for the
615 * devices that still are present. However, that
616 * information might have changed for example by the
617 * fact that a switch on a dual-link connection might
618 * have been enumerated using the other link now. Make
619 * sure our book keeping matches that.
621 if (sw->depth == depth && sw_phy_port == phy_port &&
622 !!sw->authorized == authorized) {
624 * It was enumerated through another link so update
625 * route string accordingly.
627 if (sw->link != link) {
628 ret = icm->get_route(tb, link, depth, &route);
630 tb_err(tb, "failed to update route string for switch at %u.%u\n",
636 route = tb_route(sw);
639 update_switch(parent_sw, sw, route, pkg->connection_id,
640 pkg->connection_key, link, depth, boot);
646 * User connected the same switch to another physical
647 * port or to another part of the topology. Remove the
648 * existing switch now before adding the new one.
655 * If the switch was not found by UUID, look for a switch on
656 * same physical port (taking possible link aggregation into
657 * account) and depth. If we found one it is definitely a stale
658 * one so remove it first.
660 sw = tb_switch_find_by_link_depth(tb, link, depth);
664 dual_link = dual_link_from_link(link);
666 sw = tb_switch_find_by_link_depth(tb, dual_link, depth);
673 /* Remove existing XDomain connection if found */
674 xd = tb_xdomain_find_by_link_depth(tb, link, depth);
680 parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
682 tb_err(tb, "failed to find parent switch for %u.%u\n",
687 ret = icm->get_route(tb, link, depth, &route);
689 tb_err(tb, "failed to find route string for switch at %u.%u\n",
691 tb_switch_put(parent_sw);
695 add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
696 sizeof(pkg->ep_name), pkg->connection_id,
697 pkg->connection_key, link, depth, security_level,
700 tb_switch_put(parent_sw);
704 icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
706 const struct icm_fr_event_device_disconnected *pkg =
707 (const struct icm_fr_event_device_disconnected *)hdr;
708 struct tb_switch *sw;
711 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
712 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
713 ICM_LINK_INFO_DEPTH_SHIFT;
715 if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
716 tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
720 sw = tb_switch_find_by_link_depth(tb, link, depth);
722 tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
732 icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
734 const struct icm_fr_event_xdomain_connected *pkg =
735 (const struct icm_fr_event_xdomain_connected *)hdr;
736 struct tb_xdomain *xd;
737 struct tb_switch *sw;
742 * After NVM upgrade adding root switch device fails because we
743 * initiated reset. During that time ICM might still send
744 * XDomain connected message which we ignore here.
746 if (!tb->root_switch)
749 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
750 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
751 ICM_LINK_INFO_DEPTH_SHIFT;
753 if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
754 tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
758 route = get_route(pkg->local_route_hi, pkg->local_route_lo);
760 xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
762 u8 xd_phy_port, phy_port;
764 xd_phy_port = phy_port_from_route(xd->route, xd->depth);
765 phy_port = phy_port_from_route(route, depth);
767 if (xd->depth == depth && xd_phy_port == phy_port) {
768 update_xdomain(xd, route, link);
774 * If we find an existing XDomain connection remove it
775 * now. We need to go through login handshake and
776 * everything anyway to be able to re-establish the
784 * Look if there already exists an XDomain in the same place
785 * than the new one and in that case remove it because it is
786 * most likely another host that got disconnected.
788 xd = tb_xdomain_find_by_link_depth(tb, link, depth);
792 dual_link = dual_link_from_link(link);
794 xd = tb_xdomain_find_by_link_depth(tb, dual_link,
803 * If the user disconnected a switch during suspend and
804 * connected another host to the same port, remove the switch
807 sw = get_switch_at_route(tb->root_switch, route);
811 sw = tb_switch_find_by_link_depth(tb, link, depth);
813 tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
818 add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link,
824 icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
826 const struct icm_fr_event_xdomain_disconnected *pkg =
827 (const struct icm_fr_event_xdomain_disconnected *)hdr;
828 struct tb_xdomain *xd;
831 * If the connection is through one or multiple devices, the
832 * XDomain device is removed along with them so it is fine if we
833 * cannot find it here.
835 xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
843 icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
844 size_t *nboot_acl, bool *rpm)
846 struct icm_tr_pkg_driver_ready_response reply;
847 struct icm_pkg_driver_ready request = {
848 .hdr.code = ICM_DRIVER_READY,
852 memset(&reply, 0, sizeof(reply));
853 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
859 *security_level = reply.info & ICM_TR_INFO_SLEVEL_MASK;
861 *nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >>
862 ICM_TR_INFO_BOOT_ACL_SHIFT;
864 *rpm = !!(reply.hdr.flags & ICM_TR_FLAGS_RTD3);
869 static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw)
871 struct icm_tr_pkg_approve_device request;
872 struct icm_tr_pkg_approve_device reply;
875 memset(&request, 0, sizeof(request));
876 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
877 request.hdr.code = ICM_APPROVE_DEVICE;
878 request.route_lo = sw->config.route_lo;
879 request.route_hi = sw->config.route_hi;
880 request.connection_id = sw->connection_id;
882 memset(&reply, 0, sizeof(reply));
883 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
884 1, ICM_APPROVE_TIMEOUT);
888 if (reply.hdr.flags & ICM_FLAGS_ERROR) {
889 tb_warn(tb, "PCIe tunnel creation failed\n");
896 static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw)
898 struct icm_tr_pkg_add_device_key_response reply;
899 struct icm_tr_pkg_add_device_key request;
902 memset(&request, 0, sizeof(request));
903 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
904 request.hdr.code = ICM_ADD_DEVICE_KEY;
905 request.route_lo = sw->config.route_lo;
906 request.route_hi = sw->config.route_hi;
907 request.connection_id = sw->connection_id;
908 memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
910 memset(&reply, 0, sizeof(reply));
911 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
916 if (reply.hdr.flags & ICM_FLAGS_ERROR) {
917 tb_warn(tb, "Adding key to switch failed\n");
924 static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
925 const u8 *challenge, u8 *response)
927 struct icm_tr_pkg_challenge_device_response reply;
928 struct icm_tr_pkg_challenge_device request;
931 memset(&request, 0, sizeof(request));
932 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
933 request.hdr.code = ICM_CHALLENGE_DEVICE;
934 request.route_lo = sw->config.route_lo;
935 request.route_hi = sw->config.route_hi;
936 request.connection_id = sw->connection_id;
937 memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
939 memset(&reply, 0, sizeof(reply));
940 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
945 if (reply.hdr.flags & ICM_FLAGS_ERROR)
946 return -EKEYREJECTED;
947 if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
950 memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
955 static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
957 struct icm_tr_pkg_approve_xdomain_response reply;
958 struct icm_tr_pkg_approve_xdomain request;
961 memset(&request, 0, sizeof(request));
962 request.hdr.code = ICM_APPROVE_XDOMAIN;
963 request.route_hi = upper_32_bits(xd->route);
964 request.route_lo = lower_32_bits(xd->route);
965 request.transmit_path = xd->transmit_path;
966 request.transmit_ring = xd->transmit_ring;
967 request.receive_path = xd->receive_path;
968 request.receive_ring = xd->receive_ring;
969 memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
971 memset(&reply, 0, sizeof(reply));
972 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
977 if (reply.hdr.flags & ICM_FLAGS_ERROR)
983 static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd,
986 struct icm_tr_pkg_disconnect_xdomain_response reply;
987 struct icm_tr_pkg_disconnect_xdomain request;
990 memset(&request, 0, sizeof(request));
991 request.hdr.code = ICM_DISCONNECT_XDOMAIN;
992 request.stage = stage;
993 request.route_hi = upper_32_bits(xd->route);
994 request.route_lo = lower_32_bits(xd->route);
995 memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
997 memset(&reply, 0, sizeof(reply));
998 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1003 if (reply.hdr.flags & ICM_FLAGS_ERROR)
1009 static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
1013 ret = icm_tr_xdomain_tear_down(tb, xd, 1);
1017 usleep_range(10, 50);
1018 return icm_tr_xdomain_tear_down(tb, xd, 2);
1022 icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
1024 const struct icm_tr_event_device_connected *pkg =
1025 (const struct icm_tr_event_device_connected *)hdr;
1026 enum tb_security_level security_level;
1027 struct tb_switch *sw, *parent_sw;
1028 struct tb_xdomain *xd;
1029 bool authorized, boot;
1033 * Currently we don't use the QoS information coming with the
1034 * device connected message so simply just ignore that extra
1037 if (pkg->hdr.packet_id)
1041 * After NVM upgrade adding root switch device fails because we
1042 * initiated reset. During that time ICM might still send device
1043 * connected message which we ignore here.
1045 if (!tb->root_switch)
1048 route = get_route(pkg->route_hi, pkg->route_lo);
1049 authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
1050 security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
1051 ICM_FLAGS_SLEVEL_SHIFT;
1052 boot = pkg->link_info & ICM_LINK_INFO_BOOT;
1054 if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
1055 tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n",
1060 sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
1062 /* Update the switch if it is still in the same place */
1063 if (tb_route(sw) == route && !!sw->authorized == authorized) {
1064 parent_sw = tb_to_switch(sw->dev.parent);
1065 update_switch(parent_sw, sw, route, pkg->connection_id,
1075 /* Another switch with the same address */
1076 sw = tb_switch_find_by_route(tb, route);
1082 /* XDomain connection with the same address */
1083 xd = tb_xdomain_find_by_route(tb, route);
1089 parent_sw = tb_switch_find_by_route(tb, get_parent_route(route));
1091 tb_err(tb, "failed to find parent switch for %llx\n", route);
1095 add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
1096 sizeof(pkg->ep_name), pkg->connection_id,
1097 0, 0, 0, security_level, authorized, boot);
1099 tb_switch_put(parent_sw);
1103 icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
1105 const struct icm_tr_event_device_disconnected *pkg =
1106 (const struct icm_tr_event_device_disconnected *)hdr;
1107 struct tb_switch *sw;
1110 route = get_route(pkg->route_hi, pkg->route_lo);
1112 sw = tb_switch_find_by_route(tb, route);
1114 tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
1123 icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
1125 const struct icm_tr_event_xdomain_connected *pkg =
1126 (const struct icm_tr_event_xdomain_connected *)hdr;
1127 struct tb_xdomain *xd;
1128 struct tb_switch *sw;
1131 if (!tb->root_switch)
1134 route = get_route(pkg->local_route_hi, pkg->local_route_lo);
1136 xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
1138 if (xd->route == route) {
1139 update_xdomain(xd, route, 0);
1148 /* An existing xdomain with the same address */
1149 xd = tb_xdomain_find_by_route(tb, route);
1156 * If the user disconnected a switch during suspend and
1157 * connected another host to the same port, remove the switch
1160 sw = get_switch_at_route(tb->root_switch, route);
1164 sw = tb_switch_find_by_route(tb, get_parent_route(route));
1166 tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
1170 add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, 0, 0);
1175 icm_tr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
1177 const struct icm_tr_event_xdomain_disconnected *pkg =
1178 (const struct icm_tr_event_xdomain_disconnected *)hdr;
1179 struct tb_xdomain *xd;
1182 route = get_route(pkg->route_hi, pkg->route_lo);
1184 xd = tb_xdomain_find_by_route(tb, route);
1191 static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
1193 struct pci_dev *parent;
1195 parent = pci_upstream_bridge(pdev);
1197 if (!pci_is_pcie(parent))
1199 if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM)
1201 parent = pci_upstream_bridge(parent);
1207 switch (parent->device) {
1208 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
1209 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
1210 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
1211 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
1212 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
1219 static bool icm_ar_is_supported(struct tb *tb)
1221 struct pci_dev *upstream_port;
1222 struct icm *icm = tb_priv(tb);
1225 * Starting from Alpine Ridge we can use ICM on Apple machines
1226 * as well. We just need to reset and re-enable it first.
1228 if (!x86_apple_machine)
1232 * Find the upstream PCIe port in case we need to do reset
1233 * through its vendor specific registers.
1235 upstream_port = get_upstream_port(tb->nhi->pdev);
1236 if (upstream_port) {
1239 cap = pci_find_ext_capability(upstream_port,
1240 PCI_EXT_CAP_ID_VNDR);
1242 icm->upstream_port = upstream_port;
1252 static int icm_ar_get_mode(struct tb *tb)
1254 struct tb_nhi *nhi = tb->nhi;
1259 val = ioread32(nhi->iobase + REG_FW_STS);
1260 if (val & REG_FW_STS_NVM_AUTH_DONE)
1263 } while (--retries);
1266 dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n");
1270 return nhi_mailbox_mode(nhi);
1274 icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
1275 size_t *nboot_acl, bool *rpm)
1277 struct icm_ar_pkg_driver_ready_response reply;
1278 struct icm_pkg_driver_ready request = {
1279 .hdr.code = ICM_DRIVER_READY,
1283 memset(&reply, 0, sizeof(reply));
1284 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1290 *security_level = reply.info & ICM_AR_INFO_SLEVEL_MASK;
1291 if (nboot_acl && (reply.info & ICM_AR_INFO_BOOT_ACL_SUPPORTED))
1292 *nboot_acl = (reply.info & ICM_AR_INFO_BOOT_ACL_MASK) >>
1293 ICM_AR_INFO_BOOT_ACL_SHIFT;
1295 *rpm = !!(reply.hdr.flags & ICM_AR_FLAGS_RTD3);
1300 static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
1302 struct icm_ar_pkg_get_route_response reply;
1303 struct icm_ar_pkg_get_route request = {
1304 .hdr = { .code = ICM_GET_ROUTE },
1305 .link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link,
1309 memset(&reply, 0, sizeof(reply));
1310 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1315 if (reply.hdr.flags & ICM_FLAGS_ERROR)
1318 *route = get_route(reply.route_hi, reply.route_lo);
1322 static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids)
1324 struct icm_ar_pkg_preboot_acl_response reply;
1325 struct icm_ar_pkg_preboot_acl request = {
1326 .hdr = { .code = ICM_PREBOOT_ACL },
1330 memset(&reply, 0, sizeof(reply));
1331 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1336 if (reply.hdr.flags & ICM_FLAGS_ERROR)
1339 for (i = 0; i < nuuids; i++) {
1340 u32 *uuid = (u32 *)&uuids[i];
1342 uuid[0] = reply.acl[i].uuid_lo;
1343 uuid[1] = reply.acl[i].uuid_hi;
1345 if (uuid[0] == 0xffffffff && uuid[1] == 0xffffffff) {
1346 /* Map empty entries to null UUID */
1349 } else if (uuid[0] != 0 || uuid[1] != 0) {
1350 /* Upper two DWs are always one's */
1351 uuid[2] = 0xffffffff;
1352 uuid[3] = 0xffffffff;
1359 static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids,
1362 struct icm_ar_pkg_preboot_acl_response reply;
1363 struct icm_ar_pkg_preboot_acl request = {
1365 .code = ICM_PREBOOT_ACL,
1366 .flags = ICM_FLAGS_WRITE,
1371 for (i = 0; i < nuuids; i++) {
1372 const u32 *uuid = (const u32 *)&uuids[i];
1374 if (uuid_is_null(&uuids[i])) {
1376 * Map null UUID to the empty (all one) entries
1379 request.acl[i].uuid_lo = 0xffffffff;
1380 request.acl[i].uuid_hi = 0xffffffff;
1382 /* Two high DWs need to be set to all one */
1383 if (uuid[2] != 0xffffffff || uuid[3] != 0xffffffff)
1386 request.acl[i].uuid_lo = uuid[0];
1387 request.acl[i].uuid_hi = uuid[1];
1391 memset(&reply, 0, sizeof(reply));
1392 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1397 if (reply.hdr.flags & ICM_FLAGS_ERROR)
1403 static void icm_handle_notification(struct work_struct *work)
1405 struct icm_notification *n = container_of(work, typeof(*n), work);
1406 struct tb *tb = n->tb;
1407 struct icm *icm = tb_priv(tb);
1409 mutex_lock(&tb->lock);
1411 switch (n->pkg->code) {
1412 case ICM_EVENT_DEVICE_CONNECTED:
1413 icm->device_connected(tb, n->pkg);
1415 case ICM_EVENT_DEVICE_DISCONNECTED:
1416 icm->device_disconnected(tb, n->pkg);
1418 case ICM_EVENT_XDOMAIN_CONNECTED:
1419 icm->xdomain_connected(tb, n->pkg);
1421 case ICM_EVENT_XDOMAIN_DISCONNECTED:
1422 icm->xdomain_disconnected(tb, n->pkg);
1426 mutex_unlock(&tb->lock);
1432 static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
1433 const void *buf, size_t size)
1435 struct icm_notification *n;
1437 n = kmalloc(sizeof(*n), GFP_KERNEL);
1441 INIT_WORK(&n->work, icm_handle_notification);
1442 n->pkg = kmemdup(buf, size, GFP_KERNEL);
1445 queue_work(tb->wq, &n->work);
1449 __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level,
1450 size_t *nboot_acl, bool *rpm)
1452 struct icm *icm = tb_priv(tb);
1453 unsigned int retries = 50;
1456 ret = icm->driver_ready(tb, security_level, nboot_acl, rpm);
1458 tb_err(tb, "failed to send driver ready to ICM\n");
1463 * Hold on here until the switch config space is accessible so
1464 * that we can read root switch config successfully.
1467 struct tb_cfg_result res;
1470 res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH,
1476 } while (--retries);
1478 tb_err(tb, "failed to read root switch config space, giving up\n");
1482 static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec)
1484 unsigned long end = jiffies + msecs_to_jiffies(timeout_msec);
1488 pci_read_config_dword(icm->upstream_port,
1489 icm->vnd_cap + PCIE2CIO_CMD, &cmd);
1490 if (!(cmd & PCIE2CIO_CMD_START)) {
1491 if (cmd & PCIE2CIO_CMD_TIMEOUT)
1497 } while (time_before(jiffies, end));
1502 static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs,
1503 unsigned int port, unsigned int index, u32 *data)
1505 struct pci_dev *pdev = icm->upstream_port;
1506 int ret, vnd_cap = icm->vnd_cap;
1510 cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
1511 cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
1512 cmd |= PCIE2CIO_CMD_START;
1513 pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
1515 ret = pci2cio_wait_completion(icm, 5000);
1519 pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data);
1523 static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs,
1524 unsigned int port, unsigned int index, u32 data)
1526 struct pci_dev *pdev = icm->upstream_port;
1527 int vnd_cap = icm->vnd_cap;
1530 pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data);
1533 cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
1534 cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
1535 cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
1536 pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
1538 return pci2cio_wait_completion(icm, 5000);
1541 static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
1543 struct icm *icm = tb_priv(tb);
1546 if (!icm->upstream_port)
1549 /* Put ARC to wait for CIO reset event to happen */
1550 val = ioread32(nhi->iobase + REG_FW_STS);
1551 val |= REG_FW_STS_CIO_RESET_REQ;
1552 iowrite32(val, nhi->iobase + REG_FW_STS);
1555 val = ioread32(nhi->iobase + REG_FW_STS);
1556 val |= REG_FW_STS_ICM_EN_INVERT;
1557 val |= REG_FW_STS_ICM_EN_CPU;
1558 iowrite32(val, nhi->iobase + REG_FW_STS);
1560 /* Trigger CIO reset now */
1561 return pcie2cio_write(icm, TB_CFG_SWITCH, 0, 0x50, BIT(9));
1564 static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
1566 unsigned int retries = 10;
1570 /* Check if the ICM firmware is already running */
1571 val = ioread32(nhi->iobase + REG_FW_STS);
1572 if (val & REG_FW_STS_ICM_EN)
1575 dev_info(&nhi->pdev->dev, "starting ICM firmware\n");
1577 ret = icm_firmware_reset(tb, nhi);
1581 /* Wait until the ICM firmware tells us it is up and running */
1583 /* Check that the ICM firmware is running */
1584 val = ioread32(nhi->iobase + REG_FW_STS);
1585 if (val & REG_FW_STS_NVM_AUTH_DONE)
1589 } while (--retries);
1594 static int icm_reset_phy_port(struct tb *tb, int phy_port)
1596 struct icm *icm = tb_priv(tb);
1602 if (!icm->upstream_port)
1614 * Read link status of both null ports belonging to a single
1617 ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
1620 ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
1624 state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK;
1625 state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
1626 state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK;
1627 state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
1629 /* If they are both up we need to reset them now */
1630 if (state0 != TB_PORT_UP || state1 != TB_PORT_UP)
1633 val0 |= PHY_PORT_CS1_LINK_DISABLE;
1634 ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
1638 val1 |= PHY_PORT_CS1_LINK_DISABLE;
1639 ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
1643 /* Wait a bit and then re-enable both ports */
1644 usleep_range(10, 100);
1646 ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
1649 ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
1653 val0 &= ~PHY_PORT_CS1_LINK_DISABLE;
1654 ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
1658 val1 &= ~PHY_PORT_CS1_LINK_DISABLE;
1659 return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
1662 static int icm_firmware_init(struct tb *tb)
1664 struct icm *icm = tb_priv(tb);
1665 struct tb_nhi *nhi = tb->nhi;
1668 ret = icm_firmware_start(tb, nhi);
1670 dev_err(&nhi->pdev->dev, "could not start ICM firmware\n");
1674 if (icm->get_mode) {
1675 ret = icm->get_mode(tb);
1678 case NHI_FW_SAFE_MODE:
1679 icm->safe_mode = true;
1682 case NHI_FW_CM_MODE:
1683 /* Ask ICM to accept all Thunderbolt devices */
1684 nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0);
1691 tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
1697 * Reset both physical ports if there is anything connected to
1700 ret = icm_reset_phy_port(tb, 0);
1702 dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n");
1703 ret = icm_reset_phy_port(tb, 1);
1705 dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n");
1710 static int icm_driver_ready(struct tb *tb)
1712 struct icm *icm = tb_priv(tb);
1715 ret = icm_firmware_init(tb);
1719 if (icm->safe_mode) {
1720 tb_info(tb, "Thunderbolt host controller is in safe mode.\n");
1721 tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n");
1722 tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n");
1726 ret = __icm_driver_ready(tb, &tb->security_level, &tb->nboot_acl,
1732 * Make sure the number of supported preboot ACL matches what we
1733 * expect or disable the whole feature.
1735 if (tb->nboot_acl > icm->max_boot_acl)
1741 static int icm_suspend(struct tb *tb)
1743 struct icm *icm = tb_priv(tb);
1745 if (icm->save_devices)
1746 icm->save_devices(tb);
1748 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
1753 * Mark all switches (except root switch) below this one unplugged. ICM
1754 * firmware will send us an updated list of switches after we have send
1755 * it driver ready command. If a switch is not in that list it will be
1756 * removed when we perform rescan.
1758 static void icm_unplug_children(struct tb_switch *sw)
1763 sw->is_unplugged = true;
1765 for (i = 1; i <= sw->config.max_port_number; i++) {
1766 struct tb_port *port = &sw->ports[i];
1768 if (tb_is_upstream_port(port))
1770 if (port->xdomain) {
1771 port->xdomain->is_unplugged = true;
1777 icm_unplug_children(port->remote->sw);
1781 static void icm_free_unplugged_children(struct tb_switch *sw)
1785 for (i = 1; i <= sw->config.max_port_number; i++) {
1786 struct tb_port *port = &sw->ports[i];
1788 if (tb_is_upstream_port(port))
1791 if (port->xdomain && port->xdomain->is_unplugged) {
1792 tb_xdomain_remove(port->xdomain);
1793 port->xdomain = NULL;
1800 if (port->remote->sw->is_unplugged) {
1801 tb_switch_remove(port->remote->sw);
1802 port->remote = NULL;
1804 icm_free_unplugged_children(port->remote->sw);
1809 static void icm_rescan_work(struct work_struct *work)
1811 struct icm *icm = container_of(work, struct icm, rescan_work.work);
1812 struct tb *tb = icm_to_tb(icm);
1814 mutex_lock(&tb->lock);
1815 if (tb->root_switch)
1816 icm_free_unplugged_children(tb->root_switch);
1817 mutex_unlock(&tb->lock);
1820 static void icm_complete(struct tb *tb)
1822 struct icm *icm = tb_priv(tb);
1824 if (tb->nhi->going_away)
1827 icm_unplug_children(tb->root_switch);
1830 * Now all existing children should be resumed, start events
1831 * from ICM to get updated status.
1833 __icm_driver_ready(tb, NULL, NULL, NULL);
1836 * We do not get notifications of devices that have been
1837 * unplugged during suspend so schedule rescan to clean them up
1840 queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500));
1843 static int icm_runtime_suspend(struct tb *tb)
1845 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
1849 static int icm_runtime_resume(struct tb *tb)
1852 * We can reuse the same resume functionality than with system
1859 static int icm_start(struct tb *tb)
1861 struct icm *icm = tb_priv(tb);
1865 tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
1867 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1868 if (!tb->root_switch)
1872 * NVM upgrade has not been tested on Apple systems and they
1873 * don't provide images publicly either. To be on the safe side
1874 * prevent root switch NVM upgrade on Macs for now.
1876 tb->root_switch->no_nvm_upgrade = x86_apple_machine;
1877 tb->root_switch->rpm = icm->rpm;
1879 ret = tb_switch_add(tb->root_switch);
1881 tb_switch_put(tb->root_switch);
1882 tb->root_switch = NULL;
1888 static void icm_stop(struct tb *tb)
1890 struct icm *icm = tb_priv(tb);
1892 cancel_delayed_work(&icm->rescan_work);
1893 tb_switch_remove(tb->root_switch);
1894 tb->root_switch = NULL;
1895 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
1898 static int icm_disconnect_pcie_paths(struct tb *tb)
1900 return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0);
1904 static const struct tb_cm_ops icm_fr_ops = {
1905 .driver_ready = icm_driver_ready,
1908 .suspend = icm_suspend,
1909 .complete = icm_complete,
1910 .handle_event = icm_handle_event,
1911 .approve_switch = icm_fr_approve_switch,
1912 .add_switch_key = icm_fr_add_switch_key,
1913 .challenge_switch_key = icm_fr_challenge_switch_key,
1914 .disconnect_pcie_paths = icm_disconnect_pcie_paths,
1915 .approve_xdomain_paths = icm_fr_approve_xdomain_paths,
1916 .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
1920 static const struct tb_cm_ops icm_ar_ops = {
1921 .driver_ready = icm_driver_ready,
1924 .suspend = icm_suspend,
1925 .complete = icm_complete,
1926 .runtime_suspend = icm_runtime_suspend,
1927 .runtime_resume = icm_runtime_resume,
1928 .handle_event = icm_handle_event,
1929 .get_boot_acl = icm_ar_get_boot_acl,
1930 .set_boot_acl = icm_ar_set_boot_acl,
1931 .approve_switch = icm_fr_approve_switch,
1932 .add_switch_key = icm_fr_add_switch_key,
1933 .challenge_switch_key = icm_fr_challenge_switch_key,
1934 .disconnect_pcie_paths = icm_disconnect_pcie_paths,
1935 .approve_xdomain_paths = icm_fr_approve_xdomain_paths,
1936 .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
1940 static const struct tb_cm_ops icm_tr_ops = {
1941 .driver_ready = icm_driver_ready,
1944 .suspend = icm_suspend,
1945 .complete = icm_complete,
1946 .runtime_suspend = icm_runtime_suspend,
1947 .runtime_resume = icm_runtime_resume,
1948 .handle_event = icm_handle_event,
1949 .get_boot_acl = icm_ar_get_boot_acl,
1950 .set_boot_acl = icm_ar_set_boot_acl,
1951 .approve_switch = icm_tr_approve_switch,
1952 .add_switch_key = icm_tr_add_switch_key,
1953 .challenge_switch_key = icm_tr_challenge_switch_key,
1954 .disconnect_pcie_paths = icm_disconnect_pcie_paths,
1955 .approve_xdomain_paths = icm_tr_approve_xdomain_paths,
1956 .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
1959 struct tb *icm_probe(struct tb_nhi *nhi)
1964 tb = tb_domain_alloc(nhi, sizeof(struct icm));
1969 INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work);
1970 mutex_init(&icm->request_lock);
1972 switch (nhi->pdev->device) {
1973 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
1974 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
1975 icm->is_supported = icm_fr_is_supported;
1976 icm->get_route = icm_fr_get_route;
1977 icm->save_devices = icm_fr_save_devices;
1978 icm->driver_ready = icm_fr_driver_ready;
1979 icm->device_connected = icm_fr_device_connected;
1980 icm->device_disconnected = icm_fr_device_disconnected;
1981 icm->xdomain_connected = icm_fr_xdomain_connected;
1982 icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
1983 tb->cm_ops = &icm_fr_ops;
1986 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI:
1987 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI:
1988 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI:
1989 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
1990 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
1991 icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
1992 icm->is_supported = icm_ar_is_supported;
1993 icm->get_mode = icm_ar_get_mode;
1994 icm->get_route = icm_ar_get_route;
1995 icm->save_devices = icm_fr_save_devices;
1996 icm->driver_ready = icm_ar_driver_ready;
1997 icm->device_connected = icm_fr_device_connected;
1998 icm->device_disconnected = icm_fr_device_disconnected;
1999 icm->xdomain_connected = icm_fr_xdomain_connected;
2000 icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
2001 tb->cm_ops = &icm_ar_ops;
2004 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI:
2005 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI:
2006 icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
2007 icm->is_supported = icm_ar_is_supported;
2008 icm->get_mode = icm_ar_get_mode;
2009 icm->driver_ready = icm_tr_driver_ready;
2010 icm->device_connected = icm_tr_device_connected;
2011 icm->device_disconnected = icm_tr_device_disconnected;
2012 icm->xdomain_connected = icm_tr_xdomain_connected;
2013 icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
2014 tb->cm_ops = &icm_tr_ops;
2018 if (!icm->is_supported || !icm->is_supported(tb)) {
2019 dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n");