1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - Tunneling support
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/list.h>
16 /* PCIe adapters use always HopID of 8 for both directions */
17 #define TB_PCI_HOPID 8
19 #define TB_PCI_PATH_DOWN 0
20 #define TB_PCI_PATH_UP 1
22 /* DP adapters use HopID 8 for AUX and 9 for Video */
23 #define TB_DP_AUX_TX_HOPID 8
24 #define TB_DP_AUX_RX_HOPID 8
25 #define TB_DP_VIDEO_HOPID 9
27 #define TB_DP_VIDEO_PATH_OUT 0
28 #define TB_DP_AUX_PATH_OUT 1
29 #define TB_DP_AUX_PATH_IN 2
31 #define TB_DMA_PATH_OUT 0
32 #define TB_DMA_PATH_IN 1
34 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA" };
36 #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
38 struct tb_tunnel *__tunnel = (tunnel); \
39 level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt, \
40 tb_route(__tunnel->src_port->sw), \
41 __tunnel->src_port->port, \
42 tb_route(__tunnel->dst_port->sw), \
43 __tunnel->dst_port->port, \
44 tb_tunnel_names[__tunnel->type], \
48 #define tb_tunnel_WARN(tunnel, fmt, arg...) \
49 __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
50 #define tb_tunnel_warn(tunnel, fmt, arg...) \
51 __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
52 #define tb_tunnel_info(tunnel, fmt, arg...) \
53 __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
54 #define tb_tunnel_dbg(tunnel, fmt, arg...) \
55 __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
57 static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
58 enum tb_tunnel_type type)
60 struct tb_tunnel *tunnel;
62 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
66 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
68 tb_tunnel_free(tunnel);
72 INIT_LIST_HEAD(&tunnel->list);
74 tunnel->npaths = npaths;
80 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
84 res = tb_pci_port_enable(tunnel->src_port, activate);
88 if (tb_port_is_pcie_up(tunnel->dst_port))
89 return tb_pci_port_enable(tunnel->dst_port, activate);
94 static int tb_initial_credits(const struct tb_switch *sw)
96 /* If the path is complete sw is not NULL */
98 /* More credits for faster link */
99 switch (sw->link_speed * sw->link_width) {
110 static void tb_pci_init_path(struct tb_path *path)
112 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
113 path->egress_shared_buffer = TB_PATH_NONE;
114 path->ingress_fc_enable = TB_PATH_ALL;
115 path->ingress_shared_buffer = TB_PATH_NONE;
118 path->drop_packages = 0;
119 path->nfc_credits = 0;
120 path->hops[0].initial_credits = 7;
121 path->hops[1].initial_credits =
122 tb_initial_credits(path->hops[1].in_port->sw);
126 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
127 * @tb: Pointer to the domain structure
128 * @down: PCIe downstream adapter
130 * If @down adapter is active, follows the tunnel to the PCIe upstream
131 * adapter and back. Returns the discovered tunnel or %NULL if there was
134 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
136 struct tb_tunnel *tunnel;
137 struct tb_path *path;
139 if (!tb_pci_port_is_enabled(down))
142 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
146 tunnel->activate = tb_pci_activate;
147 tunnel->src_port = down;
150 * Discover both paths even if they are not complete. We will
151 * clean them up by calling tb_tunnel_deactivate() below in that
154 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
155 &tunnel->dst_port, "PCIe Up");
157 /* Just disable the downstream port */
158 tb_pci_port_enable(down, false);
161 tunnel->paths[TB_PCI_PATH_UP] = path;
162 tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]);
164 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
168 tunnel->paths[TB_PCI_PATH_DOWN] = path;
169 tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]);
171 /* Validate that the tunnel is complete */
172 if (!tb_port_is_pcie_up(tunnel->dst_port)) {
173 tb_port_warn(tunnel->dst_port,
174 "path does not end on a PCIe adapter, cleaning up\n");
178 if (down != tunnel->src_port) {
179 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
183 if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
184 tb_tunnel_warn(tunnel,
185 "tunnel is not fully activated, cleaning up\n");
189 tb_tunnel_dbg(tunnel, "discovered\n");
193 tb_tunnel_deactivate(tunnel);
195 tb_tunnel_free(tunnel);
201 * tb_tunnel_alloc_pci() - allocate a pci tunnel
202 * @tb: Pointer to the domain structure
203 * @up: PCIe upstream adapter port
204 * @down: PCIe downstream adapter port
206 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
209 * Return: Returns a tb_tunnel on success or NULL on failure.
211 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
212 struct tb_port *down)
214 struct tb_tunnel *tunnel;
215 struct tb_path *path;
217 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
221 tunnel->activate = tb_pci_activate;
222 tunnel->src_port = down;
223 tunnel->dst_port = up;
225 path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
228 tb_tunnel_free(tunnel);
231 tb_pci_init_path(path);
232 tunnel->paths[TB_PCI_PATH_DOWN] = path;
234 path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
237 tb_tunnel_free(tunnel);
240 tb_pci_init_path(path);
241 tunnel->paths[TB_PCI_PATH_UP] = path;
246 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
252 /* Both ends need to support this */
253 if (!tb_switch_is_titan_ridge(in->sw) ||
254 !tb_switch_is_titan_ridge(out->sw))
257 ret = tb_port_read(out, &val, TB_CFG_PORT,
258 out->cap_adap + DP_STATUS_CTRL, 1);
262 val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
264 ret = tb_port_write(out, &val, TB_CFG_PORT,
265 out->cap_adap + DP_STATUS_CTRL, 1);
270 ret = tb_port_read(out, &val, TB_CFG_PORT,
271 out->cap_adap + DP_STATUS_CTRL, 1);
274 if (!(val & DP_STATUS_CTRL_CMHS))
276 usleep_range(10, 100);
282 static inline u32 tb_dp_cap_get_rate(u32 val)
284 u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
287 case DP_COMMON_CAP_RATE_RBR:
289 case DP_COMMON_CAP_RATE_HBR:
291 case DP_COMMON_CAP_RATE_HBR2:
293 case DP_COMMON_CAP_RATE_HBR3:
300 static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
302 val &= ~DP_COMMON_CAP_RATE_MASK;
305 WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
308 val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
311 val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
314 val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
317 val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
323 static inline u32 tb_dp_cap_get_lanes(u32 val)
325 u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
328 case DP_COMMON_CAP_1_LANE:
330 case DP_COMMON_CAP_2_LANES:
332 case DP_COMMON_CAP_4_LANES:
339 static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
341 val &= ~DP_COMMON_CAP_LANES_MASK;
344 WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
348 val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
351 val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
354 val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
360 static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
362 /* Tunneling removes the DP 8b/10b encoding */
363 return rate * lanes * 8 / 10;
366 static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
367 u32 out_rate, u32 out_lanes, u32 *new_rate,
370 static const u32 dp_bw[][2] = {
372 { 8100, 4 }, /* 25920 Mb/s */
373 { 5400, 4 }, /* 17280 Mb/s */
374 { 8100, 2 }, /* 12960 Mb/s */
375 { 2700, 4 }, /* 8640 Mb/s */
376 { 5400, 2 }, /* 8640 Mb/s */
377 { 8100, 1 }, /* 6480 Mb/s */
378 { 1620, 4 }, /* 5184 Mb/s */
379 { 5400, 1 }, /* 4320 Mb/s */
380 { 2700, 2 }, /* 4320 Mb/s */
381 { 1620, 2 }, /* 2592 Mb/s */
382 { 2700, 1 }, /* 2160 Mb/s */
383 { 1620, 1 }, /* 1296 Mb/s */
388 * Find a combination that can fit into max_bw and does not
389 * exceed the maximum rate and lanes supported by the DP OUT and
392 for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
393 if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
396 if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
399 if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
400 *new_rate = dp_bw[i][0];
401 *new_lanes = dp_bw[i][1];
409 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
411 u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
412 struct tb_port *out = tunnel->dst_port;
413 struct tb_port *in = tunnel->src_port;
417 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
418 * newer generation hardware.
420 if (in->sw->generation < 2 || out->sw->generation < 2)
424 * Perform connection manager handshake between IN and OUT ports
425 * before capabilities exchange can take place.
427 ret = tb_dp_cm_handshake(in, out);
431 /* Read both DP_LOCAL_CAP registers */
432 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
433 in->cap_adap + DP_LOCAL_CAP, 1);
437 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
438 out->cap_adap + DP_LOCAL_CAP, 1);
442 /* Write IN local caps to OUT remote caps */
443 ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
444 out->cap_adap + DP_REMOTE_CAP, 1);
448 in_rate = tb_dp_cap_get_rate(in_dp_cap);
449 in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
450 tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
451 in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
454 * If the tunnel bandwidth is limited (max_bw is set) then see
455 * if we need to reduce bandwidth to fit there.
457 out_rate = tb_dp_cap_get_rate(out_dp_cap);
458 out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
459 bw = tb_dp_bandwidth(out_rate, out_lanes);
460 tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
461 out_rate, out_lanes, bw);
463 if (tunnel->max_bw && bw > tunnel->max_bw) {
464 u32 new_rate, new_lanes, new_bw;
466 ret = tb_dp_reduce_bandwidth(tunnel->max_bw, in_rate, in_lanes,
467 out_rate, out_lanes, &new_rate,
470 tb_port_info(out, "not enough bandwidth for DP tunnel\n");
474 new_bw = tb_dp_bandwidth(new_rate, new_lanes);
475 tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
476 new_rate, new_lanes, new_bw);
479 * Set new rate and number of lanes before writing it to
480 * the IN port remote caps.
482 out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
483 out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
486 return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
487 in->cap_adap + DP_REMOTE_CAP, 1);
490 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
495 struct tb_path **paths;
498 paths = tunnel->paths;
499 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
501 tb_dp_port_set_hops(tunnel->src_port,
502 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
503 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
504 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
506 tb_dp_port_set_hops(tunnel->dst_port,
507 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
508 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
509 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
511 tb_dp_port_hpd_clear(tunnel->src_port);
512 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
513 if (tb_port_is_dpout(tunnel->dst_port))
514 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
517 ret = tb_dp_port_enable(tunnel->src_port, active);
521 if (tb_port_is_dpout(tunnel->dst_port))
522 return tb_dp_port_enable(tunnel->dst_port, active);
527 static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel)
529 struct tb_port *in = tunnel->src_port;
530 const struct tb_switch *sw = in->sw;
531 u32 val, rate = 0, lanes = 0;
534 if (tb_switch_is_titan_ridge(sw)) {
538 * Wait for DPRX done. Normally it should be already set
542 ret = tb_port_read(in, &val, TB_CFG_PORT,
543 in->cap_adap + DP_COMMON_CAP, 1);
547 if (val & DP_COMMON_CAP_DPRX_DONE) {
548 rate = tb_dp_cap_get_rate(val);
549 lanes = tb_dp_cap_get_lanes(val);
557 } else if (sw->generation >= 2) {
559 * Read from the copied remote cap so that we take into
560 * account if capabilities were reduced during exchange.
562 ret = tb_port_read(in, &val, TB_CFG_PORT,
563 in->cap_adap + DP_REMOTE_CAP, 1);
567 rate = tb_dp_cap_get_rate(val);
568 lanes = tb_dp_cap_get_lanes(val);
570 /* No bandwidth management for legacy devices */
574 return tb_dp_bandwidth(rate, lanes);
577 static void tb_dp_init_aux_path(struct tb_path *path)
581 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
582 path->egress_shared_buffer = TB_PATH_NONE;
583 path->ingress_fc_enable = TB_PATH_ALL;
584 path->ingress_shared_buffer = TB_PATH_NONE;
588 for (i = 0; i < path->path_length; i++)
589 path->hops[i].initial_credits = 1;
592 static void tb_dp_init_video_path(struct tb_path *path, bool discover)
594 u32 nfc_credits = path->hops[0].in_port->config.nfc_credits;
596 path->egress_fc_enable = TB_PATH_NONE;
597 path->egress_shared_buffer = TB_PATH_NONE;
598 path->ingress_fc_enable = TB_PATH_NONE;
599 path->ingress_shared_buffer = TB_PATH_NONE;
604 path->nfc_credits = nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
608 max_credits = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
609 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
610 /* Leave some credits for AUX path */
611 path->nfc_credits = min(max_credits - 2, 12U);
616 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
617 * @tb: Pointer to the domain structure
620 * If @in adapter is active, follows the tunnel to the DP out adapter
621 * and back. Returns the discovered tunnel or %NULL if there was no
624 * Return: DP tunnel or %NULL if no tunnel found.
626 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
628 struct tb_tunnel *tunnel;
629 struct tb_port *port;
630 struct tb_path *path;
632 if (!tb_dp_port_is_enabled(in))
635 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
639 tunnel->init = tb_dp_xchg_caps;
640 tunnel->activate = tb_dp_activate;
641 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
642 tunnel->src_port = in;
644 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
645 &tunnel->dst_port, "Video");
647 /* Just disable the DP IN port */
648 tb_dp_port_enable(in, false);
651 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
652 tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], true);
654 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
657 tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
658 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
660 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
664 tunnel->paths[TB_DP_AUX_PATH_IN] = path;
665 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
667 /* Validate that the tunnel is complete */
668 if (!tb_port_is_dpout(tunnel->dst_port)) {
669 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
673 if (!tb_dp_port_is_enabled(tunnel->dst_port))
676 if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
679 if (port != tunnel->src_port) {
680 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
684 tb_tunnel_dbg(tunnel, "discovered\n");
688 tb_tunnel_deactivate(tunnel);
690 tb_tunnel_free(tunnel);
696 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
697 * @tb: Pointer to the domain structure
698 * @in: DP in adapter port
699 * @out: DP out adapter port
700 * @max_bw: Maximum available bandwidth for the DP tunnel (%0 if not limited)
702 * Allocates a tunnel between @in and @out that is capable of tunneling
703 * Display Port traffic.
705 * Return: Returns a tb_tunnel on success or NULL on failure.
707 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
708 struct tb_port *out, int max_bw)
710 struct tb_tunnel *tunnel;
711 struct tb_path **paths;
712 struct tb_path *path;
714 if (WARN_ON(!in->cap_adap || !out->cap_adap))
717 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
721 tunnel->init = tb_dp_xchg_caps;
722 tunnel->activate = tb_dp_activate;
723 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
724 tunnel->src_port = in;
725 tunnel->dst_port = out;
726 tunnel->max_bw = max_bw;
728 paths = tunnel->paths;
730 path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
734 tb_dp_init_video_path(path, false);
735 paths[TB_DP_VIDEO_PATH_OUT] = path;
737 path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
738 TB_DP_AUX_TX_HOPID, 1, "AUX TX");
741 tb_dp_init_aux_path(path);
742 paths[TB_DP_AUX_PATH_OUT] = path;
744 path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
745 TB_DP_AUX_RX_HOPID, 1, "AUX RX");
748 tb_dp_init_aux_path(path);
749 paths[TB_DP_AUX_PATH_IN] = path;
754 tb_tunnel_free(tunnel);
758 static u32 tb_dma_credits(struct tb_port *nhi)
762 max_credits = (nhi->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
763 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
764 return min(max_credits, 13U);
767 static int tb_dma_activate(struct tb_tunnel *tunnel, bool active)
769 struct tb_port *nhi = tunnel->src_port;
772 credits = active ? tb_dma_credits(nhi) : 0;
773 return tb_port_set_initial_credits(nhi, credits);
776 static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
777 unsigned int efc, u32 credits)
781 path->egress_fc_enable = efc;
782 path->ingress_fc_enable = TB_PATH_ALL;
783 path->egress_shared_buffer = TB_PATH_NONE;
784 path->ingress_shared_buffer = isb;
787 path->clear_fc = true;
789 for (i = 0; i < path->path_length; i++)
790 path->hops[i].initial_credits = credits;
794 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
795 * @tb: Pointer to the domain structure
796 * @nhi: Host controller port
797 * @dst: Destination null port which the other domain is connected to
798 * @transmit_ring: NHI ring number used to send packets towards the
800 * @transmit_path: HopID used for transmitting packets
801 * @receive_ring: NHI ring number used to receive packets from the
803 * @reveive_path: HopID used for receiving packets
805 * Return: Returns a tb_tunnel on success or NULL on failure.
807 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
808 struct tb_port *dst, int transmit_ring,
809 int transmit_path, int receive_ring,
812 struct tb_tunnel *tunnel;
813 struct tb_path *path;
816 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_DMA);
820 tunnel->activate = tb_dma_activate;
821 tunnel->src_port = nhi;
822 tunnel->dst_port = dst;
824 credits = tb_dma_credits(nhi);
826 path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, "DMA RX");
828 tb_tunnel_free(tunnel);
831 tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL,
833 tunnel->paths[TB_DMA_PATH_IN] = path;
835 path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, "DMA TX");
837 tb_tunnel_free(tunnel);
840 tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits);
841 tunnel->paths[TB_DMA_PATH_OUT] = path;
847 * tb_tunnel_free() - free a tunnel
848 * @tunnel: Tunnel to be freed
850 * Frees a tunnel. The tunnel does not need to be deactivated.
852 void tb_tunnel_free(struct tb_tunnel *tunnel)
859 for (i = 0; i < tunnel->npaths; i++) {
860 if (tunnel->paths[i])
861 tb_path_free(tunnel->paths[i]);
864 kfree(tunnel->paths);
869 * tb_tunnel_is_invalid - check whether an activated path is still valid
870 * @tunnel: Tunnel to check
872 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
876 for (i = 0; i < tunnel->npaths; i++) {
877 WARN_ON(!tunnel->paths[i]->activated);
878 if (tb_path_is_invalid(tunnel->paths[i]))
886 * tb_tunnel_restart() - activate a tunnel after a hardware reset
887 * @tunnel: Tunnel to restart
889 * Return: 0 on success and negative errno in case if failure
891 int tb_tunnel_restart(struct tb_tunnel *tunnel)
895 tb_tunnel_dbg(tunnel, "activating\n");
898 * Make sure all paths are properly disabled before enabling
901 for (i = 0; i < tunnel->npaths; i++) {
902 if (tunnel->paths[i]->activated) {
903 tb_path_deactivate(tunnel->paths[i]);
904 tunnel->paths[i]->activated = false;
909 res = tunnel->init(tunnel);
914 for (i = 0; i < tunnel->npaths; i++) {
915 res = tb_path_activate(tunnel->paths[i]);
920 if (tunnel->activate) {
921 res = tunnel->activate(tunnel, true);
929 tb_tunnel_warn(tunnel, "activation failed\n");
930 tb_tunnel_deactivate(tunnel);
935 * tb_tunnel_activate() - activate a tunnel
936 * @tunnel: Tunnel to activate
938 * Return: Returns 0 on success or an error code on failure.
940 int tb_tunnel_activate(struct tb_tunnel *tunnel)
944 for (i = 0; i < tunnel->npaths; i++) {
945 if (tunnel->paths[i]->activated) {
946 tb_tunnel_WARN(tunnel,
947 "trying to activate an already activated tunnel\n");
952 return tb_tunnel_restart(tunnel);
956 * tb_tunnel_deactivate() - deactivate a tunnel
957 * @tunnel: Tunnel to deactivate
959 void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
963 tb_tunnel_dbg(tunnel, "deactivating\n");
965 if (tunnel->activate)
966 tunnel->activate(tunnel, false);
968 for (i = 0; i < tunnel->npaths; i++) {
969 if (tunnel->paths[i] && tunnel->paths[i]->activated)
970 tb_path_deactivate(tunnel->paths[i]);
975 * tb_tunnel_switch_on_path() - Does the tunnel go through switch
976 * @tunnel: Tunnel to check
977 * @sw: Switch to check
979 * Returns true if @tunnel goes through @sw (direction does not matter),
982 bool tb_tunnel_switch_on_path(const struct tb_tunnel *tunnel,
983 const struct tb_switch *sw)
987 for (i = 0; i < tunnel->npaths; i++) {
988 if (!tunnel->paths[i])
990 if (tb_path_switch_on_path(tunnel->paths[i], sw))
997 static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
1001 for (i = 0; i < tunnel->npaths; i++) {
1002 if (!tunnel->paths[i])
1004 if (!tunnel->paths[i]->activated)
1012 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
1013 * @tunnel: Tunnel to check
1015 * Returns bandwidth currently consumed by @tunnel and %0 if the @tunnel
1016 * is not active or does consume bandwidth.
1018 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel)
1020 if (!tb_tunnel_is_active(tunnel))
1023 if (tunnel->consumed_bandwidth) {
1024 int ret = tunnel->consumed_bandwidth(tunnel);
1026 tb_tunnel_dbg(tunnel, "consumed bandwidth %d Mb/s\n", ret);