2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
42 #include "name_distr.h"
48 #include <linux/pkt_sched.h>
69 u32 link_congs; /* # port sends blocked by congestion */
72 u32 max_queue_sz; /* send queue size high water mark */
73 u32 accu_queue_sz; /* used for send queue size profiling */
74 u32 queue_sz_counts; /* used for send queue size profiling */
75 u32 msg_length_counts; /* used for message length profiling */
76 u32 msg_lengths_total; /* used for message length profiling */
77 u32 msg_length_profile[7]; /* used for msg. length profiling */
81 * struct tipc_link - TIPC link data structure
82 * @addr: network address of link's peer node
83 * @name: link name character string
84 * @media_addr: media address to use when sending messages over link
86 * @net: pointer to namespace struct
87 * @refcnt: reference counter for permanent references (owner node & timer)
88 * @peer_session: link session # being used by peer end of link
89 * @peer_bearer_id: bearer id used by link's peer endpoint
90 * @bearer_id: local bearer id used by link
91 * @tolerance: minimum link continuity loss needed to reset link [in ms]
92 * @abort_limit: # of unacknowledged continuity probes needed to reset link
93 * @state: current state of link FSM
94 * @peer_caps: bitmap describing capabilities of peer node
95 * @silent_intv_cnt: # of timer intervals without any reception from peer
96 * @proto_msg: template for control messages generated by link
97 * @pmsg: convenience pointer to "proto_msg" field
98 * @priority: current link priority
99 * @net_plane: current link network plane ('A' through 'H')
100 * @mon_state: cookie with information needed by link monitor
101 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
102 * @exp_msg_count: # of tunnelled messages expected during link changeover
103 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
104 * @mtu: current maximum packet size for this link
105 * @advertised_mtu: advertised own mtu when link is being established
106 * @transmitq: queue for sent, non-acked messages
107 * @backlogq: queue for messages waiting to be sent
108 * @snt_nxt: next sequence number to use for outbound messages
109 * @prev_from: sequence number of most previous retransmission request
110 * @stale_limit: time when repeated identical retransmits must force link reset
111 * @ackers: # of peers that needs to ack each packet before it can be released
112 * @acked: # last packet acked by a certain peer. Used for broadcast.
113 * @rcv_nxt: next sequence number to expect for inbound messages
114 * @deferred_queue: deferred queue saved OOS b'cast message received from node
115 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
116 * @inputq: buffer queue for messages to be delivered upwards
117 * @namedq: buffer queue for name table messages to be delivered upwards
118 * @next_out: ptr to first unsent outbound message in queue
119 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
120 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
121 * @reasm_buf: head of partially reassembled inbound message fragments
122 * @bc_rcvr: marks that this is a broadcast receiver link
123 * @stats: collects statistics regarding link activity
127 char name[TIPC_MAX_LINK_NAME];
130 /* Management and link supervision data */
144 char if_name[TIPC_MAX_IF_NAME];
147 struct tipc_mon_state mon_state;
152 struct sk_buff *failover_reasm_skb;
153 struct sk_buff_head failover_deferdq;
155 /* Max packet negotiation */
160 struct sk_buff_head transmq;
161 struct sk_buff_head backlogq;
169 unsigned long stale_limit;
174 struct sk_buff_head deferdq;
175 struct sk_buff_head *inputq;
176 struct sk_buff_head *namedq;
178 /* Congestion handling */
179 struct sk_buff_head wakeupq;
181 /* Fragmentation/reassembly */
182 struct sk_buff *reasm_buf;
187 struct tipc_link *bc_rcvlink;
188 struct tipc_link *bc_sndlink;
193 struct tipc_stats stats;
197 * Error message prefixes
199 static const char *link_co_err = "Link tunneling error, ";
200 static const char *link_rst_msg = "Resetting link ";
202 /* Send states for broadcast NACKs
205 BC_NACK_SND_CONDITIONAL,
206 BC_NACK_SND_UNCONDITIONAL,
207 BC_NACK_SND_SUPPRESS,
210 #define TIPC_BC_RETR_LIM (jiffies + msecs_to_jiffies(10))
211 #define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
214 * Interval between NACKs when packets arrive out of order
216 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
221 LINK_ESTABLISHED = 0xe,
222 LINK_ESTABLISHING = 0xe << 4,
223 LINK_RESET = 0x1 << 8,
224 LINK_RESETTING = 0x2 << 12,
225 LINK_PEER_RESET = 0xd << 16,
226 LINK_FAILINGOVER = 0xf << 20,
227 LINK_SYNCHING = 0xc << 24
230 /* Link FSM state checking routines
232 static int link_is_up(struct tipc_link *l)
234 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
237 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
238 struct sk_buff_head *xmitq);
239 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
240 bool probe_reply, u16 rcvgap,
241 int tolerance, int priority,
242 struct sk_buff_head *xmitq);
243 static void link_print(struct tipc_link *l, const char *str);
244 static int tipc_link_build_nack_msg(struct tipc_link *l,
245 struct sk_buff_head *xmitq);
246 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
247 struct sk_buff_head *xmitq);
248 static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
249 static u16 tipc_build_gap_ack_blks(struct tipc_link *l, void *data);
250 static int tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap,
251 struct tipc_gap_ack_blks *ga,
252 struct sk_buff_head *xmitq);
255 * Simple non-static link routines (i.e. referenced outside this file)
257 bool tipc_link_is_up(struct tipc_link *l)
259 return link_is_up(l);
262 bool tipc_link_peer_is_down(struct tipc_link *l)
264 return l->state == LINK_PEER_RESET;
267 bool tipc_link_is_reset(struct tipc_link *l)
269 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
272 bool tipc_link_is_establishing(struct tipc_link *l)
274 return l->state == LINK_ESTABLISHING;
277 bool tipc_link_is_synching(struct tipc_link *l)
279 return l->state == LINK_SYNCHING;
282 bool tipc_link_is_failingover(struct tipc_link *l)
284 return l->state == LINK_FAILINGOVER;
287 bool tipc_link_is_blocked(struct tipc_link *l)
289 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
292 static bool link_is_bc_sndlink(struct tipc_link *l)
294 return !l->bc_sndlink;
297 static bool link_is_bc_rcvlink(struct tipc_link *l)
299 return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
302 void tipc_link_set_active(struct tipc_link *l, bool active)
307 u32 tipc_link_id(struct tipc_link *l)
309 return l->peer_bearer_id << 16 | l->bearer_id;
312 int tipc_link_window(struct tipc_link *l)
317 int tipc_link_prio(struct tipc_link *l)
322 unsigned long tipc_link_tolerance(struct tipc_link *l)
327 struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
332 char tipc_link_plane(struct tipc_link *l)
337 void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
339 l->peer_caps = capabilities;
342 void tipc_link_add_bc_peer(struct tipc_link *snd_l,
343 struct tipc_link *uc_l,
344 struct sk_buff_head *xmitq)
346 struct tipc_link *rcv_l = uc_l->bc_rcvlink;
349 rcv_l->acked = snd_l->snd_nxt - 1;
350 snd_l->state = LINK_ESTABLISHED;
351 tipc_link_build_bc_init_msg(uc_l, xmitq);
354 void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
355 struct tipc_link *rcv_l,
356 struct sk_buff_head *xmitq)
358 u16 ack = snd_l->snd_nxt - 1;
361 rcv_l->bc_peer_is_up = true;
362 rcv_l->state = LINK_ESTABLISHED;
363 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
364 trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!");
365 tipc_link_reset(rcv_l);
366 rcv_l->state = LINK_RESET;
367 if (!snd_l->ackers) {
368 trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!");
369 tipc_link_reset(snd_l);
370 snd_l->state = LINK_RESET;
371 __skb_queue_purge(xmitq);
375 int tipc_link_bc_peers(struct tipc_link *l)
380 static u16 link_bc_rcv_gap(struct tipc_link *l)
382 struct sk_buff *skb = skb_peek(&l->deferdq);
385 if (more(l->snd_nxt, l->rcv_nxt))
386 gap = l->snd_nxt - l->rcv_nxt;
388 gap = buf_seqno(skb) - l->rcv_nxt;
392 void tipc_link_set_mtu(struct tipc_link *l, int mtu)
397 int tipc_link_mtu(struct tipc_link *l)
402 u16 tipc_link_rcv_nxt(struct tipc_link *l)
407 u16 tipc_link_acked(struct tipc_link *l)
412 char *tipc_link_name(struct tipc_link *l)
417 u32 tipc_link_state(struct tipc_link *l)
423 * tipc_link_create - create a new link
424 * @n: pointer to associated node
425 * @if_name: associated interface name
426 * @bearer_id: id (index) of associated bearer
427 * @tolerance: link tolerance to be used by link
428 * @net_plane: network plane (A,B,c..) this link belongs to
429 * @mtu: mtu to be advertised by link
430 * @priority: priority to be used by link
431 * @window: send window to be used by link
432 * @session: session to be used by link
433 * @ownnode: identity of own node
434 * @peer: node id of peer node
435 * @peer_caps: bitmap describing peer node capabilities
436 * @bc_sndlink: the namespace global link used for broadcast sending
437 * @bc_rcvlink: the peer specific link used for broadcast reception
438 * @inputq: queue to put messages ready for delivery
439 * @namedq: queue to put binding table update messages ready for delivery
440 * @link: return value, pointer to put the created link
442 * Returns true if link was created, otherwise false
444 bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
445 int tolerance, char net_plane, u32 mtu, int priority,
446 int window, u32 session, u32 self,
447 u32 peer, u8 *peer_id, u16 peer_caps,
448 struct tipc_link *bc_sndlink,
449 struct tipc_link *bc_rcvlink,
450 struct sk_buff_head *inputq,
451 struct sk_buff_head *namedq,
452 struct tipc_link **link)
454 char peer_str[NODE_ID_STR_LEN] = {0,};
455 char self_str[NODE_ID_STR_LEN] = {0,};
458 l = kzalloc(sizeof(*l), GFP_ATOMIC);
462 l->session = session;
464 /* Set link name for unicast links only */
466 tipc_nodeid2string(self_str, tipc_own_id(net));
467 if (strlen(self_str) > 16)
468 sprintf(self_str, "%x", self);
469 tipc_nodeid2string(peer_str, peer_id);
470 if (strlen(peer_str) > 16)
471 sprintf(peer_str, "%x", peer);
473 /* Peer i/f name will be completed by reset/activate message */
474 snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
475 self_str, if_name, peer_str);
477 strcpy(l->if_name, if_name);
479 l->peer_caps = peer_caps;
481 l->in_session = false;
482 l->bearer_id = bearer_id;
483 l->tolerance = tolerance;
485 bc_rcvlink->tolerance = tolerance;
486 l->net_plane = net_plane;
487 l->advertised_mtu = mtu;
489 l->priority = priority;
490 tipc_link_set_queue_limits(l, window);
492 l->bc_sndlink = bc_sndlink;
493 l->bc_rcvlink = bc_rcvlink;
496 l->state = LINK_RESETTING;
497 __skb_queue_head_init(&l->transmq);
498 __skb_queue_head_init(&l->backlogq);
499 __skb_queue_head_init(&l->deferdq);
500 __skb_queue_head_init(&l->failover_deferdq);
501 skb_queue_head_init(&l->wakeupq);
502 skb_queue_head_init(l->inputq);
507 * tipc_link_bc_create - create new link to be used for broadcast
508 * @n: pointer to associated node
509 * @mtu: mtu to be used initially if no peers
510 * @window: send window to be used
511 * @inputq: queue to put messages ready for delivery
512 * @namedq: queue to put binding table update messages ready for delivery
513 * @link: return value, pointer to put the created link
515 * Returns true if link was created, otherwise false
517 bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
518 int mtu, int window, u16 peer_caps,
519 struct sk_buff_head *inputq,
520 struct sk_buff_head *namedq,
521 struct tipc_link *bc_sndlink,
522 struct tipc_link **link)
526 if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
527 0, ownnode, peer, NULL, peer_caps, bc_sndlink,
528 NULL, inputq, namedq, link))
532 strcpy(l->name, tipc_bclink_name);
533 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!");
535 l->state = LINK_RESET;
539 /* Broadcast send link is always up */
540 if (link_is_bc_sndlink(l))
541 l->state = LINK_ESTABLISHED;
543 /* Disable replicast if even a single peer doesn't support it */
544 if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
545 tipc_bcast_disable_rcast(net);
551 * tipc_link_fsm_evt - link finite state machine
552 * @l: pointer to link
553 * @evt: state machine event to be processed
555 int tipc_link_fsm_evt(struct tipc_link *l, int evt)
558 int old_state = l->state;
563 case LINK_PEER_RESET_EVT:
564 l->state = LINK_PEER_RESET;
567 l->state = LINK_RESET;
569 case LINK_FAILURE_EVT:
570 case LINK_FAILOVER_BEGIN_EVT:
571 case LINK_ESTABLISH_EVT:
572 case LINK_FAILOVER_END_EVT:
573 case LINK_SYNCH_BEGIN_EVT:
574 case LINK_SYNCH_END_EVT:
581 case LINK_PEER_RESET_EVT:
582 l->state = LINK_ESTABLISHING;
584 case LINK_FAILOVER_BEGIN_EVT:
585 l->state = LINK_FAILINGOVER;
586 case LINK_FAILURE_EVT:
588 case LINK_ESTABLISH_EVT:
589 case LINK_FAILOVER_END_EVT:
591 case LINK_SYNCH_BEGIN_EVT:
592 case LINK_SYNCH_END_EVT:
597 case LINK_PEER_RESET:
600 l->state = LINK_ESTABLISHING;
602 case LINK_PEER_RESET_EVT:
603 case LINK_ESTABLISH_EVT:
604 case LINK_FAILURE_EVT:
606 case LINK_SYNCH_BEGIN_EVT:
607 case LINK_SYNCH_END_EVT:
608 case LINK_FAILOVER_BEGIN_EVT:
609 case LINK_FAILOVER_END_EVT:
614 case LINK_FAILINGOVER:
616 case LINK_FAILOVER_END_EVT:
617 l->state = LINK_RESET;
619 case LINK_PEER_RESET_EVT:
621 case LINK_ESTABLISH_EVT:
622 case LINK_FAILURE_EVT:
624 case LINK_FAILOVER_BEGIN_EVT:
625 case LINK_SYNCH_BEGIN_EVT:
626 case LINK_SYNCH_END_EVT:
631 case LINK_ESTABLISHING:
633 case LINK_ESTABLISH_EVT:
634 l->state = LINK_ESTABLISHED;
636 case LINK_FAILOVER_BEGIN_EVT:
637 l->state = LINK_FAILINGOVER;
640 l->state = LINK_RESET;
642 case LINK_FAILURE_EVT:
643 case LINK_PEER_RESET_EVT:
644 case LINK_SYNCH_BEGIN_EVT:
645 case LINK_FAILOVER_END_EVT:
647 case LINK_SYNCH_END_EVT:
652 case LINK_ESTABLISHED:
654 case LINK_PEER_RESET_EVT:
655 l->state = LINK_PEER_RESET;
656 rc |= TIPC_LINK_DOWN_EVT;
658 case LINK_FAILURE_EVT:
659 l->state = LINK_RESETTING;
660 rc |= TIPC_LINK_DOWN_EVT;
663 l->state = LINK_RESET;
665 case LINK_ESTABLISH_EVT:
666 case LINK_SYNCH_END_EVT:
668 case LINK_SYNCH_BEGIN_EVT:
669 l->state = LINK_SYNCHING;
671 case LINK_FAILOVER_BEGIN_EVT:
672 case LINK_FAILOVER_END_EVT:
679 case LINK_PEER_RESET_EVT:
680 l->state = LINK_PEER_RESET;
681 rc |= TIPC_LINK_DOWN_EVT;
683 case LINK_FAILURE_EVT:
684 l->state = LINK_RESETTING;
685 rc |= TIPC_LINK_DOWN_EVT;
688 l->state = LINK_RESET;
690 case LINK_ESTABLISH_EVT:
691 case LINK_SYNCH_BEGIN_EVT:
693 case LINK_SYNCH_END_EVT:
694 l->state = LINK_ESTABLISHED;
696 case LINK_FAILOVER_BEGIN_EVT:
697 case LINK_FAILOVER_END_EVT:
703 pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
705 trace_tipc_link_fsm(l->name, old_state, l->state, evt);
708 pr_err("Illegal FSM event %x in state %x on link %s\n",
709 evt, l->state, l->name);
710 trace_tipc_link_fsm(l->name, old_state, l->state, evt);
714 /* link_profile_stats - update statistical profiling of traffic
716 static void link_profile_stats(struct tipc_link *l)
719 struct tipc_msg *msg;
722 /* Update counters used in statistical profiling of send traffic */
723 l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
724 l->stats.queue_sz_counts++;
726 skb = skb_peek(&l->transmq);
730 length = msg_size(msg);
732 if (msg_user(msg) == MSG_FRAGMENTER) {
733 if (msg_type(msg) != FIRST_FRAGMENT)
735 length = msg_size(msg_inner_hdr(msg));
737 l->stats.msg_lengths_total += length;
738 l->stats.msg_length_counts++;
740 l->stats.msg_length_profile[0]++;
741 else if (length <= 256)
742 l->stats.msg_length_profile[1]++;
743 else if (length <= 1024)
744 l->stats.msg_length_profile[2]++;
745 else if (length <= 4096)
746 l->stats.msg_length_profile[3]++;
747 else if (length <= 16384)
748 l->stats.msg_length_profile[4]++;
749 else if (length <= 32768)
750 l->stats.msg_length_profile[5]++;
752 l->stats.msg_length_profile[6]++;
756 * tipc_link_too_silent - check if link is "too silent"
757 * @l: tipc link to be checked
759 * Returns true if the link 'silent_intv_cnt' is about to reach the
760 * 'abort_limit' value, otherwise false
762 bool tipc_link_too_silent(struct tipc_link *l)
764 return (l->silent_intv_cnt + 2 > l->abort_limit);
767 /* tipc_link_timeout - perform periodic task as instructed from node timeout
769 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
776 u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
777 u16 bc_acked = l->bc_rcvlink->acked;
778 struct tipc_mon_state *mstate = &l->mon_state;
780 trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " ");
781 trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " ");
783 case LINK_ESTABLISHED:
786 link_profile_stats(l);
787 tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
788 if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
789 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
790 state = bc_acked != bc_snt;
791 state |= l->bc_rcvlink->rcv_unacked;
792 state |= l->rcv_unacked;
793 state |= !skb_queue_empty(&l->transmq);
794 state |= !skb_queue_empty(&l->deferdq);
795 probe = mstate->probing;
796 probe |= l->silent_intv_cnt;
797 if (probe || mstate->monitoring)
798 l->silent_intv_cnt++;
801 setup = l->rst_cnt++ <= 4;
802 setup |= !(l->rst_cnt % 16);
805 case LINK_ESTABLISHING:
809 case LINK_PEER_RESET:
811 case LINK_FAILINGOVER:
817 if (state || probe || setup)
818 tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
824 * link_schedule_user - schedule a message sender for wakeup after congestion
826 * @hdr: header of message that is being sent
827 * Create pseudo msg to send back to user when congestion abates
829 static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
831 u32 dnode = tipc_own_addr(l->net);
832 u32 dport = msg_origport(hdr);
835 /* Create and schedule wakeup pseudo message */
836 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
837 dnode, l->addr, dport, 0, 0);
840 msg_set_dest_droppable(buf_msg(skb), true);
841 TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
842 skb_queue_tail(&l->wakeupq, skb);
843 l->stats.link_congs++;
844 trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!");
849 * link_prepare_wakeup - prepare users for wakeup after congestion
851 * Wake up a number of waiting users, as permitted by available space
854 static void link_prepare_wakeup(struct tipc_link *l)
856 struct sk_buff *skb, *tmp;
859 skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
860 imp = TIPC_SKB_CB(skb)->chain_imp;
861 if (l->backlog[imp].len < l->backlog[imp].limit) {
862 skb_unlink(skb, &l->wakeupq);
863 skb_queue_tail(l->inputq, skb);
864 } else if (i++ > 10) {
870 void tipc_link_reset(struct tipc_link *l)
872 struct sk_buff_head list;
874 __skb_queue_head_init(&list);
876 l->in_session = false;
877 /* Force re-synch of peer session number before establishing */
880 l->mtu = l->advertised_mtu;
882 spin_lock_bh(&l->wakeupq.lock);
883 skb_queue_splice_init(&l->wakeupq, &list);
884 spin_unlock_bh(&l->wakeupq.lock);
886 spin_lock_bh(&l->inputq->lock);
887 skb_queue_splice_init(&list, l->inputq);
888 spin_unlock_bh(&l->inputq->lock);
890 __skb_queue_purge(&l->transmq);
891 __skb_queue_purge(&l->deferdq);
892 __skb_queue_purge(&l->backlogq);
893 __skb_queue_purge(&l->failover_deferdq);
894 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
895 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
896 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
897 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
898 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
899 kfree_skb(l->reasm_buf);
900 kfree_skb(l->failover_reasm_skb);
902 l->failover_reasm_skb = NULL;
906 l->snd_nxt_state = 1;
907 l->rcv_nxt_state = 1;
909 l->silent_intv_cnt = 0;
911 l->bc_peer_is_up = false;
912 memset(&l->mon_state, 0, sizeof(l->mon_state));
913 tipc_link_reset_stats(l);
917 * tipc_link_xmit(): enqueue buffer list according to queue situation
919 * @list: chain of buffers containing message
920 * @xmitq: returned list of packets to be sent by caller
922 * Consumes the buffer chain.
923 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
924 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
926 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
927 struct sk_buff_head *xmitq)
929 struct tipc_msg *hdr = buf_msg(skb_peek(list));
930 unsigned int maxwin = l->window;
931 int imp = msg_importance(hdr);
932 unsigned int mtu = l->mtu;
933 u16 ack = l->rcv_nxt - 1;
934 u16 seqno = l->snd_nxt;
935 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
936 struct sk_buff_head *transmq = &l->transmq;
937 struct sk_buff_head *backlogq = &l->backlogq;
938 struct sk_buff *skb, *_skb, *bskb;
939 int pkt_cnt = skb_queue_len(list);
942 if (unlikely(msg_size(hdr) > mtu)) {
943 skb_queue_purge(list);
947 /* Allow oversubscription of one data msg per source at congestion */
948 if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
949 if (imp == TIPC_SYSTEM_IMPORTANCE) {
950 pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
953 rc = link_schedule_user(l, hdr);
957 l->stats.sent_fragmented++;
958 l->stats.sent_fragments += pkt_cnt;
961 /* Prepare each packet for sending, and add to relevant queue: */
962 while (skb_queue_len(list)) {
963 skb = skb_peek(list);
965 msg_set_seqno(hdr, seqno);
966 msg_set_ack(hdr, ack);
967 msg_set_bcast_ack(hdr, bc_ack);
969 if (likely(skb_queue_len(transmq) < maxwin)) {
970 _skb = skb_clone(skb, GFP_ATOMIC);
972 skb_queue_purge(list);
976 __skb_queue_tail(transmq, skb);
977 /* next retransmit attempt */
978 if (link_is_bc_sndlink(l))
979 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
980 __skb_queue_tail(xmitq, _skb);
981 TIPC_SKB_CB(skb)->ackers = l->ackers;
983 l->stats.sent_pkts++;
987 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
988 kfree_skb(__skb_dequeue(list));
989 l->stats.sent_bundled++;
992 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
993 kfree_skb(__skb_dequeue(list));
994 __skb_queue_tail(backlogq, bskb);
995 l->backlog[msg_importance(buf_msg(bskb))].len++;
996 l->stats.sent_bundled++;
997 l->stats.sent_bundles++;
1000 l->backlog[imp].len += skb_queue_len(list);
1001 skb_queue_splice_tail_init(list, backlogq);
1007 static void tipc_link_advance_backlog(struct tipc_link *l,
1008 struct sk_buff_head *xmitq)
1010 struct sk_buff *skb, *_skb;
1011 struct tipc_msg *hdr;
1012 u16 seqno = l->snd_nxt;
1013 u16 ack = l->rcv_nxt - 1;
1014 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1016 while (skb_queue_len(&l->transmq) < l->window) {
1017 skb = skb_peek(&l->backlogq);
1020 _skb = skb_clone(skb, GFP_ATOMIC);
1023 __skb_dequeue(&l->backlogq);
1025 l->backlog[msg_importance(hdr)].len--;
1026 __skb_queue_tail(&l->transmq, skb);
1027 /* next retransmit attempt */
1028 if (link_is_bc_sndlink(l))
1029 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
1031 __skb_queue_tail(xmitq, _skb);
1032 TIPC_SKB_CB(skb)->ackers = l->ackers;
1033 msg_set_seqno(hdr, seqno);
1034 msg_set_ack(hdr, ack);
1035 msg_set_bcast_ack(hdr, bc_ack);
1037 l->stats.sent_pkts++;
1044 * link_retransmit_failure() - Detect repeated retransmit failures
1045 * @l: tipc link sender
1046 * @r: tipc link receiver (= l in case of unicast)
1047 * @from: seqno of the 1st packet in retransmit request
1048 * @rc: returned code
1050 * Return: true if the repeated retransmit failures happens, otherwise
1053 static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
1056 struct sk_buff *skb = skb_peek(&l->transmq);
1057 struct tipc_msg *hdr;
1063 /* Detect repeated retransmit failures on same packet */
1064 if (r->prev_from != from) {
1065 r->prev_from = from;
1066 r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance);
1067 } else if (time_after(jiffies, r->stale_limit)) {
1068 pr_warn("Retransmission failure on link <%s>\n", l->name);
1069 link_print(l, "State of link ");
1070 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1071 msg_user(hdr), msg_type(hdr), msg_size(hdr),
1073 pr_info("sqno %u, prev: %x, src: %x\n",
1074 msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
1076 trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1077 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1078 trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
1080 if (link_is_bc_sndlink(l))
1081 *rc = TIPC_LINK_DOWN_EVT;
1083 *rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1090 /* tipc_link_bc_retrans() - retransmit zero or more packets
1091 * @l: the link to transmit on
1092 * @r: the receiving link ordering the retransmit. Same as l if unicast
1093 * @from: retransmit from (inclusive) this sequence number
1094 * @to: retransmit to (inclusive) this sequence number
1095 * xmitq: queue for accumulating the retransmitted packets
1097 static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r,
1098 u16 from, u16 to, struct sk_buff_head *xmitq)
1100 struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
1101 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1102 u16 ack = l->rcv_nxt - 1;
1103 struct tipc_msg *hdr;
1111 trace_tipc_link_retrans(r, from, to, &l->transmq);
1113 if (link_retransmit_failure(l, r, from, &rc))
1116 skb_queue_walk(&l->transmq, skb) {
1118 if (less(msg_seqno(hdr), from))
1120 if (more(msg_seqno(hdr), to))
1122 if (link_is_bc_sndlink(l)) {
1123 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1125 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
1127 _skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE, GFP_ATOMIC);
1130 hdr = buf_msg(_skb);
1131 msg_set_ack(hdr, ack);
1132 msg_set_bcast_ack(hdr, bc_ack);
1133 _skb->priority = TC_PRIO_CONTROL;
1134 __skb_queue_tail(xmitq, _skb);
1135 l->stats.retransmitted++;
1140 /* tipc_data_input - deliver data and name distr msgs to upper layer
1142 * Consumes buffer if message is of right type
1143 * Node lock must be held
1145 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1146 struct sk_buff_head *inputq)
1148 struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
1149 struct tipc_msg *hdr = buf_msg(skb);
1151 switch (msg_user(hdr)) {
1152 case TIPC_LOW_IMPORTANCE:
1153 case TIPC_MEDIUM_IMPORTANCE:
1154 case TIPC_HIGH_IMPORTANCE:
1155 case TIPC_CRITICAL_IMPORTANCE:
1156 if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
1157 skb_queue_tail(mc_inputq, skb);
1162 skb_queue_tail(inputq, skb);
1164 case GROUP_PROTOCOL:
1165 skb_queue_tail(mc_inputq, skb);
1167 case NAME_DISTRIBUTOR:
1168 l->bc_rcvlink->state = LINK_ESTABLISHED;
1169 skb_queue_tail(l->namedq, skb);
1172 case TUNNEL_PROTOCOL:
1173 case MSG_FRAGMENTER:
1174 case BCAST_PROTOCOL:
1177 pr_warn("Dropping received illegal msg type\n");
1183 /* tipc_link_input - process packet that has passed link protocol check
1187 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1188 struct sk_buff_head *inputq,
1189 struct sk_buff **reasm_skb)
1191 struct tipc_msg *hdr = buf_msg(skb);
1192 struct sk_buff *iskb;
1193 struct sk_buff_head tmpq;
1194 int usr = msg_user(hdr);
1197 if (usr == MSG_BUNDLER) {
1198 skb_queue_head_init(&tmpq);
1199 l->stats.recv_bundles++;
1200 l->stats.recv_bundled += msg_msgcnt(hdr);
1201 while (tipc_msg_extract(skb, &iskb, &pos))
1202 tipc_data_input(l, iskb, &tmpq);
1203 tipc_skb_queue_splice_tail(&tmpq, inputq);
1205 } else if (usr == MSG_FRAGMENTER) {
1206 l->stats.recv_fragments++;
1207 if (tipc_buf_append(reasm_skb, &skb)) {
1208 l->stats.recv_fragmented++;
1209 tipc_data_input(l, skb, inputq);
1210 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1211 pr_warn_ratelimited("Unable to build fragment list\n");
1212 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1215 } else if (usr == BCAST_PROTOCOL) {
1216 tipc_bcast_lock(l->net);
1217 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
1218 tipc_bcast_unlock(l->net);
1225 /* tipc_link_tnl_rcv() - receive TUNNEL_PROTOCOL message, drop or process the
1226 * inner message along with the ones in the old link's
1229 * @skb: TUNNEL_PROTOCOL message
1230 * @inputq: queue to put messages ready for delivery
1232 static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb,
1233 struct sk_buff_head *inputq)
1235 struct sk_buff **reasm_skb = &l->failover_reasm_skb;
1236 struct sk_buff_head *fdefq = &l->failover_deferdq;
1237 struct tipc_msg *hdr = buf_msg(skb);
1238 struct sk_buff *iskb;
1244 if (msg_type(hdr) == SYNCH_MSG)
1248 if (!tipc_msg_extract(skb, &iskb, &ipos)) {
1249 pr_warn_ratelimited("Cannot extract FAILOVER_MSG, defq: %d\n",
1250 skb_queue_len(fdefq));
1255 seqno = buf_seqno(iskb);
1257 if (unlikely(less(seqno, l->drop_point))) {
1262 if (unlikely(seqno != l->drop_point)) {
1263 __tipc_skb_queue_sorted(fdefq, seqno, iskb);
1269 if (!tipc_data_input(l, iskb, inputq))
1270 rc |= tipc_link_input(l, iskb, inputq, reasm_skb);
1273 } while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point)));
1280 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1282 bool released = false;
1283 struct sk_buff *skb, *tmp;
1285 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1286 if (more(buf_seqno(skb), acked))
1288 __skb_unlink(skb, &l->transmq);
1295 /* tipc_build_gap_ack_blks - build Gap ACK blocks
1296 * @l: tipc link that data have come with gaps in sequence if any
1297 * @data: data buffer to store the Gap ACK blocks after built
1299 * returns the actual allocated memory size
1301 static u16 tipc_build_gap_ack_blks(struct tipc_link *l, void *data)
1303 struct sk_buff *skb = skb_peek(&l->deferdq);
1304 struct tipc_gap_ack_blks *ga = data;
1305 u16 len, expect, seqno = 0;
1311 expect = buf_seqno(skb);
1312 skb_queue_walk(&l->deferdq, skb) {
1313 seqno = buf_seqno(skb);
1314 if (unlikely(more(seqno, expect))) {
1315 ga->gacks[n].ack = htons(expect - 1);
1316 ga->gacks[n].gap = htons(seqno - expect);
1317 if (++n >= MAX_GAP_ACK_BLKS) {
1318 pr_info_ratelimited("Too few Gap ACK blocks!\n");
1321 } else if (unlikely(less(seqno, expect))) {
1322 pr_warn("Unexpected skb in deferdq!\n");
1329 ga->gacks[n].ack = htons(seqno);
1330 ga->gacks[n].gap = 0;
1334 len = tipc_gap_ack_blks_sz(n);
1335 ga->len = htons(len);
1340 /* tipc_link_advance_transmq - advance TIPC link transmq queue by releasing
1341 * acked packets, also doing retransmissions if
1343 * @l: tipc link with transmq queue to be advanced
1344 * @acked: seqno of last packet acked by peer without any gaps before
1345 * @gap: # of gap packets
1346 * @ga: buffer pointer to Gap ACK blocks from peer
1347 * @xmitq: queue for accumulating the retransmitted packets if any
1349 * In case of a repeated retransmit failures, the call will return shortly
1350 * with a returned code (e.g. TIPC_LINK_DOWN_EVT)
1352 static int tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap,
1353 struct tipc_gap_ack_blks *ga,
1354 struct sk_buff_head *xmitq)
1356 struct sk_buff *skb, *_skb, *tmp;
1357 struct tipc_msg *hdr;
1358 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1359 u16 ack = l->rcv_nxt - 1;
1363 if (gap && link_retransmit_failure(l, l, acked + 1, &rc))
1366 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1367 seqno = buf_seqno(skb);
1370 if (less_eq(seqno, acked)) {
1372 __skb_unlink(skb, &l->transmq);
1374 } else if (less_eq(seqno, acked + gap)) {
1375 /* retransmit skb */
1376 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1378 TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
1380 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
1383 hdr = buf_msg(_skb);
1384 msg_set_ack(hdr, ack);
1385 msg_set_bcast_ack(hdr, bc_ack);
1386 _skb->priority = TC_PRIO_CONTROL;
1387 __skb_queue_tail(xmitq, _skb);
1388 l->stats.retransmitted++;
1390 /* retry with Gap ACK blocks if any */
1391 if (!ga || n >= ga->gack_cnt)
1393 acked = ntohs(ga->gacks[n].ack);
1394 gap = ntohs(ga->gacks[n].gap);
1403 /* tipc_link_build_state_msg: prepare link state message for transmission
1405 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1406 * risk of ack storms towards the sender
1408 int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1413 /* Broadcast ACK must be sent via a unicast link => defer to caller */
1414 if (link_is_bc_rcvlink(l)) {
1415 if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
1419 /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1420 l->snd_nxt = l->rcv_nxt;
1421 return TIPC_LINK_SND_STATE;
1426 l->stats.sent_acks++;
1427 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1431 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1433 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1435 int mtyp = RESET_MSG;
1436 struct sk_buff *skb;
1438 if (l->state == LINK_ESTABLISHING)
1439 mtyp = ACTIVATE_MSG;
1441 tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
1443 /* Inform peer that this endpoint is going down if applicable */
1444 skb = skb_peek_tail(xmitq);
1445 if (skb && (l->state == LINK_RESET))
1446 msg_set_peer_stopping(buf_msg(skb), 1);
1449 /* tipc_link_build_nack_msg: prepare link nack message for transmission
1450 * Note that sending of broadcast NACK is coordinated among nodes, to
1451 * reduce the risk of NACK storms towards the sender
1453 static int tipc_link_build_nack_msg(struct tipc_link *l,
1454 struct sk_buff_head *xmitq)
1456 u32 def_cnt = ++l->stats.deferred_recv;
1457 u32 defq_len = skb_queue_len(&l->deferdq);
1460 if (link_is_bc_rcvlink(l)) {
1461 match1 = def_cnt & 0xf;
1462 match2 = tipc_own_addr(l->net) & 0xf;
1463 if (match1 == match2)
1464 return TIPC_LINK_SND_STATE;
1468 if (defq_len >= 3 && !((defq_len - 3) % 16))
1469 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1473 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1474 * @l: the link that should handle the message
1476 * @xmitq: queue to place packets to be sent after this call
1478 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1479 struct sk_buff_head *xmitq)
1481 struct sk_buff_head *defq = &l->deferdq;
1482 struct tipc_msg *hdr = buf_msg(skb);
1483 u16 seqno, rcv_nxt, win_lim;
1486 /* Verify and update link state */
1487 if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1488 return tipc_link_proto_rcv(l, skb, xmitq);
1490 /* Don't send probe at next timeout expiration */
1491 l->silent_intv_cnt = 0;
1495 seqno = msg_seqno(hdr);
1496 rcv_nxt = l->rcv_nxt;
1497 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
1499 if (unlikely(!link_is_up(l))) {
1500 if (l->state == LINK_ESTABLISHING)
1501 rc = TIPC_LINK_UP_EVT;
1505 /* Drop if outside receive window */
1506 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1507 l->stats.duplicates++;
1511 /* Forward queues and wake up waiting users */
1512 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1513 tipc_link_advance_backlog(l, xmitq);
1514 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1515 link_prepare_wakeup(l);
1518 /* Defer delivery if sequence gap */
1519 if (unlikely(seqno != rcv_nxt)) {
1520 __tipc_skb_queue_sorted(defq, seqno, skb);
1521 rc |= tipc_link_build_nack_msg(l, xmitq);
1525 /* Deliver packet */
1527 l->stats.recv_pkts++;
1529 if (unlikely(msg_user(hdr) == TUNNEL_PROTOCOL))
1530 rc |= tipc_link_tnl_rcv(l, skb, l->inputq);
1531 else if (!tipc_data_input(l, skb, l->inputq))
1532 rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf);
1533 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1534 rc |= tipc_link_build_state_msg(l, xmitq);
1535 if (unlikely(rc & ~TIPC_LINK_SND_STATE))
1537 } while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt)));
1545 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1546 bool probe_reply, u16 rcvgap,
1547 int tolerance, int priority,
1548 struct sk_buff_head *xmitq)
1550 struct tipc_link *bcl = l->bc_rcvlink;
1551 struct sk_buff *skb;
1552 struct tipc_msg *hdr;
1553 struct sk_buff_head *dfq = &l->deferdq;
1554 bool node_up = link_is_up(bcl);
1555 struct tipc_mon_state *mstate = &l->mon_state;
1560 /* Don't send protocol message during reset or link failover */
1561 if (tipc_link_is_blocked(l))
1564 if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1567 if (!skb_queue_empty(dfq))
1568 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1570 skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1571 tipc_max_domain_size + MAX_GAP_ACK_BLKS_SZ,
1572 l->addr, tipc_own_addr(l->net), 0, 0, 0);
1577 data = msg_data(hdr);
1578 msg_set_session(hdr, l->session);
1579 msg_set_bearer_id(hdr, l->bearer_id);
1580 msg_set_net_plane(hdr, l->net_plane);
1581 msg_set_next_sent(hdr, l->snd_nxt);
1582 msg_set_ack(hdr, l->rcv_nxt - 1);
1583 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1584 msg_set_bc_ack_invalid(hdr, !node_up);
1585 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1586 msg_set_link_tolerance(hdr, tolerance);
1587 msg_set_linkprio(hdr, priority);
1588 msg_set_redundant_link(hdr, node_up);
1589 msg_set_seq_gap(hdr, 0);
1590 msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
1592 if (mtyp == STATE_MSG) {
1593 if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
1594 msg_set_seqno(hdr, l->snd_nxt_state++);
1595 msg_set_seq_gap(hdr, rcvgap);
1596 msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
1597 msg_set_probe(hdr, probe);
1598 msg_set_is_keepalive(hdr, probe || probe_reply);
1599 if (l->peer_caps & TIPC_GAP_ACK_BLOCK)
1600 glen = tipc_build_gap_ack_blks(l, data);
1601 tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id);
1602 msg_set_size(hdr, INT_H_SIZE + glen + dlen);
1603 skb_trim(skb, INT_H_SIZE + glen + dlen);
1604 l->stats.sent_states++;
1607 /* RESET_MSG or ACTIVATE_MSG */
1608 if (mtyp == ACTIVATE_MSG) {
1609 msg_set_dest_session_valid(hdr, 1);
1610 msg_set_dest_session(hdr, l->peer_session);
1612 msg_set_max_pkt(hdr, l->advertised_mtu);
1613 strcpy(data, l->if_name);
1614 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1615 skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
1618 l->stats.sent_probes++;
1620 l->stats.sent_nacks++;
1621 skb->priority = TC_PRIO_CONTROL;
1622 __skb_queue_tail(xmitq, skb);
1623 trace_tipc_proto_build(skb, false, l->name);
1626 void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
1627 struct sk_buff_head *xmitq)
1629 u32 onode = tipc_own_addr(l->net);
1630 struct tipc_msg *hdr, *ihdr;
1631 struct sk_buff_head tnlq;
1632 struct sk_buff *skb;
1633 u32 dnode = l->addr;
1635 skb_queue_head_init(&tnlq);
1636 skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
1637 INT_H_SIZE, BASIC_H_SIZE,
1638 dnode, onode, 0, 0, 0);
1640 pr_warn("%sunable to create tunnel packet\n", link_co_err);
1645 msg_set_msgcnt(hdr, 1);
1646 msg_set_bearer_id(hdr, l->peer_bearer_id);
1648 ihdr = (struct tipc_msg *)msg_data(hdr);
1649 tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1650 BASIC_H_SIZE, dnode);
1651 msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
1652 __skb_queue_tail(&tnlq, skb);
1653 tipc_link_xmit(l, &tnlq, xmitq);
1656 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1657 * with contents of the link's transmit and backlog queues.
1659 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1660 int mtyp, struct sk_buff_head *xmitq)
1662 struct sk_buff_head *fdefq = &tnl->failover_deferdq;
1663 struct sk_buff *skb, *tnlskb;
1664 struct tipc_msg *hdr, tnlhdr;
1665 struct sk_buff_head *queue = &l->transmq;
1666 struct sk_buff_head tmpxq, tnlq;
1667 u16 pktlen, pktcnt, seqno = l->snd_nxt;
1672 skb_queue_head_init(&tnlq);
1673 skb_queue_head_init(&tmpxq);
1675 /* At least one packet required for safe algorithm => add dummy */
1676 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1677 BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
1678 0, 0, TIPC_ERR_NO_PORT);
1680 pr_warn("%sunable to create tunnel packet\n", link_co_err);
1683 skb_queue_tail(&tnlq, skb);
1684 tipc_link_xmit(l, &tnlq, &tmpxq);
1685 __skb_queue_purge(&tmpxq);
1687 /* Initialize reusable tunnel packet header */
1688 tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
1689 mtyp, INT_H_SIZE, l->addr);
1690 if (mtyp == SYNCH_MSG)
1691 pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq));
1693 pktcnt = skb_queue_len(&l->transmq);
1694 pktcnt += skb_queue_len(&l->backlogq);
1695 msg_set_msgcnt(&tnlhdr, pktcnt);
1696 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1698 /* Wrap each packet into a tunnel packet */
1699 skb_queue_walk(queue, skb) {
1701 if (queue == &l->backlogq)
1702 msg_set_seqno(hdr, seqno++);
1703 pktlen = msg_size(hdr);
1704 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
1705 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
1707 pr_warn("%sunable to send packet\n", link_co_err);
1710 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1711 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1712 __skb_queue_tail(&tnlq, tnlskb);
1714 if (queue != &l->backlogq) {
1715 queue = &l->backlogq;
1719 tipc_link_xmit(tnl, &tnlq, xmitq);
1721 if (mtyp == FAILOVER_MSG) {
1722 tnl->drop_point = l->rcv_nxt;
1723 tnl->failover_reasm_skb = l->reasm_buf;
1724 l->reasm_buf = NULL;
1726 /* Failover the link's deferdq */
1727 if (unlikely(!skb_queue_empty(fdefq))) {
1728 pr_warn("Link failover deferdq not empty: %d!\n",
1729 skb_queue_len(fdefq));
1730 __skb_queue_purge(fdefq);
1732 skb_queue_splice_init(&l->deferdq, fdefq);
1737 * tipc_link_failover_prepare() - prepare tnl for link failover
1739 * This is a special version of the precursor - tipc_link_tnl_prepare(),
1740 * see the tipc_node_link_failover() for details
1744 * @xmitq: queue for messages to be xmited
1746 void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,
1747 struct sk_buff_head *xmitq)
1749 struct sk_buff_head *fdefq = &tnl->failover_deferdq;
1751 tipc_link_create_dummy_tnl_msg(tnl, xmitq);
1753 /* This failover link enpoint was never established before,
1754 * so it has not received anything from peer.
1755 * Otherwise, it must be a normal failover situation or the
1756 * node has entered SELF_DOWN_PEER_LEAVING and both peer nodes
1757 * would have to start over from scratch instead.
1759 tnl->drop_point = 1;
1760 tnl->failover_reasm_skb = NULL;
1762 /* Initiate the link's failover deferdq */
1763 if (unlikely(!skb_queue_empty(fdefq))) {
1764 pr_warn("Link failover deferdq not empty: %d!\n",
1765 skb_queue_len(fdefq));
1766 __skb_queue_purge(fdefq);
1770 /* tipc_link_validate_msg(): validate message against current link state
1771 * Returns true if message should be accepted, otherwise false
1773 bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
1775 u16 curr_session = l->peer_session;
1776 u16 session = msg_session(hdr);
1777 int mtyp = msg_type(hdr);
1779 if (msg_user(hdr) != LINK_PROTOCOL)
1786 /* Accept only RESET with new session number */
1787 return more(session, curr_session);
1791 /* Accept only ACTIVATE with new or current session number */
1792 return !less(session, curr_session);
1794 /* Accept only STATE with current session number */
1797 if (session != curr_session)
1799 /* Extra sanity check */
1800 if (!link_is_up(l) && msg_ack(hdr))
1802 if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
1804 /* Accept only STATE with new sequence number */
1805 return !less(msg_seqno(hdr), l->rcv_nxt_state);
1811 /* tipc_link_proto_rcv(): receive link level protocol message :
1812 * Note that network plane id propagates through the network, and may
1813 * change at any time. The node with lowest numerical id determines
1816 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1817 struct sk_buff_head *xmitq)
1819 struct tipc_msg *hdr = buf_msg(skb);
1820 struct tipc_gap_ack_blks *ga = NULL;
1822 u16 ack = msg_ack(hdr);
1823 u16 gap = msg_seq_gap(hdr);
1824 u16 peers_snd_nxt = msg_next_sent(hdr);
1825 u16 peers_tol = msg_link_tolerance(hdr);
1826 u16 peers_prio = msg_linkprio(hdr);
1827 u16 rcv_nxt = l->rcv_nxt;
1828 u16 dlen = msg_data_sz(hdr);
1829 int mtyp = msg_type(hdr);
1830 bool reply = msg_probe(hdr);
1836 trace_tipc_proto_rcv(skb, false, l->name);
1837 if (tipc_link_is_blocked(l) || !xmitq)
1840 if (tipc_own_addr(l->net) > msg_prevnode(hdr))
1841 l->net_plane = msg_net_plane(hdr);
1845 data = msg_data(hdr);
1847 if (!tipc_link_validate_msg(l, hdr)) {
1848 trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!");
1849 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!");
1856 /* Complete own link name with peer's interface name */
1857 if_name = strrchr(l->name, ':') + 1;
1858 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1860 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1862 strncpy(if_name, data, TIPC_MAX_IF_NAME);
1864 /* Update own tolerance if peer indicates a non-zero value */
1865 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
1866 l->tolerance = peers_tol;
1867 l->bc_rcvlink->tolerance = peers_tol;
1869 /* Update own priority if peer's priority is higher */
1870 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1871 l->priority = peers_prio;
1873 /* If peer is going down we want full re-establish cycle */
1874 if (msg_peer_stopping(hdr)) {
1875 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1879 /* If this endpoint was re-created while peer was ESTABLISHING
1880 * it doesn't know current session number. Force re-synch.
1882 if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) &&
1883 l->session != msg_dest_session(hdr)) {
1884 if (less(l->session, msg_dest_session(hdr)))
1885 l->session = msg_dest_session(hdr) + 1;
1889 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1890 if (mtyp == RESET_MSG || !link_is_up(l))
1891 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1893 /* ACTIVATE_MSG takes up link if it was already locally reset */
1894 if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
1895 rc = TIPC_LINK_UP_EVT;
1897 l->peer_session = msg_session(hdr);
1898 l->in_session = true;
1899 l->peer_bearer_id = msg_bearer_id(hdr);
1900 if (l->mtu > msg_max_pkt(hdr))
1901 l->mtu = msg_max_pkt(hdr);
1905 l->rcv_nxt_state = msg_seqno(hdr) + 1;
1907 /* Update own tolerance if peer indicates a non-zero value */
1908 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
1909 l->tolerance = peers_tol;
1910 l->bc_rcvlink->tolerance = peers_tol;
1912 /* Update own prio if peer indicates a different value */
1913 if ((peers_prio != l->priority) &&
1914 in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
1915 l->priority = peers_prio;
1916 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1919 l->silent_intv_cnt = 0;
1920 l->stats.recv_states++;
1922 l->stats.recv_probes++;
1924 if (!link_is_up(l)) {
1925 if (l->state == LINK_ESTABLISHING)
1926 rc = TIPC_LINK_UP_EVT;
1930 /* Receive Gap ACK blocks from peer if any */
1931 if (l->peer_caps & TIPC_GAP_ACK_BLOCK) {
1932 ga = (struct tipc_gap_ack_blks *)data;
1933 glen = ntohs(ga->len);
1934 /* sanity check: if failed, ignore Gap ACK blocks */
1935 if (glen != tipc_gap_ack_blks_sz(ga->gack_cnt))
1939 tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
1940 &l->mon_state, l->bearer_id);
1942 /* Send NACK if peer has sent pkts we haven't received yet */
1943 if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
1944 rcvgap = peers_snd_nxt - l->rcv_nxt;
1945 if (rcvgap || reply)
1946 tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
1947 rcvgap, 0, 0, xmitq);
1949 rc |= tipc_link_advance_transmq(l, ack, gap, ga, xmitq);
1951 /* If NACK, retransmit will now start at right position */
1953 l->stats.recv_nacks++;
1955 tipc_link_advance_backlog(l, xmitq);
1956 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1957 link_prepare_wakeup(l);
1964 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1966 static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
1968 struct sk_buff_head *xmitq)
1970 struct sk_buff *skb;
1971 struct tipc_msg *hdr;
1972 struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
1973 u16 ack = l->rcv_nxt - 1;
1974 u16 gap_to = peers_snd_nxt - 1;
1976 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
1977 0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
1981 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1982 msg_set_bcast_ack(hdr, ack);
1983 msg_set_bcgap_after(hdr, ack);
1985 gap_to = buf_seqno(dfrd_skb) - 1;
1986 msg_set_bcgap_to(hdr, gap_to);
1987 msg_set_non_seq(hdr, bcast);
1988 __skb_queue_tail(xmitq, skb);
1992 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1994 * Give a newly added peer node the sequence number where it should
1995 * start receiving and acking broadcast packets.
1997 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
1998 struct sk_buff_head *xmitq)
2000 struct sk_buff_head list;
2002 __skb_queue_head_init(&list);
2003 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
2005 msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
2006 tipc_link_xmit(l, &list, xmitq);
2009 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
2011 void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
2013 int mtyp = msg_type(hdr);
2014 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2019 if (msg_user(hdr) == BCAST_PROTOCOL) {
2020 l->rcv_nxt = peers_snd_nxt;
2021 l->state = LINK_ESTABLISHED;
2025 if (l->peer_caps & TIPC_BCAST_SYNCH)
2028 if (msg_peer_node_is_up(hdr))
2031 /* Compatibility: accept older, less safe initial synch data */
2032 if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
2033 l->rcv_nxt = peers_snd_nxt;
2036 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
2038 int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
2039 struct sk_buff_head *xmitq)
2041 struct tipc_link *snd_l = l->bc_sndlink;
2042 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2043 u16 from = msg_bcast_ack(hdr) + 1;
2044 u16 to = from + msg_bc_gap(hdr) - 1;
2050 if (!msg_peer_node_is_up(hdr))
2053 /* Open when peer ackowledges our bcast init msg (pkt #1) */
2055 l->bc_peer_is_up = true;
2057 if (!l->bc_peer_is_up)
2060 l->stats.recv_nacks++;
2062 /* Ignore if peers_snd_nxt goes beyond receive window */
2063 if (more(peers_snd_nxt, l->rcv_nxt + l->window))
2066 rc = tipc_link_bc_retrans(snd_l, l, from, to, xmitq);
2068 l->snd_nxt = peers_snd_nxt;
2069 if (link_bc_rcv_gap(l))
2070 rc |= TIPC_LINK_SND_STATE;
2072 /* Return now if sender supports nack via STATE messages */
2073 if (l->peer_caps & TIPC_BCAST_STATE_NACK)
2076 /* Otherwise, be backwards compatible */
2078 if (!more(peers_snd_nxt, l->rcv_nxt)) {
2079 l->nack_state = BC_NACK_SND_CONDITIONAL;
2083 /* Don't NACK if one was recently sent or peeked */
2084 if (l->nack_state == BC_NACK_SND_SUPPRESS) {
2085 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2089 /* Conditionally delay NACK sending until next synch rcv */
2090 if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
2091 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2092 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
2096 /* Send NACK now but suppress next one */
2097 tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
2098 l->nack_state = BC_NACK_SND_SUPPRESS;
2102 void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
2103 struct sk_buff_head *xmitq)
2105 struct sk_buff *skb, *tmp;
2106 struct tipc_link *snd_l = l->bc_sndlink;
2108 if (!link_is_up(l) || !l->bc_peer_is_up)
2111 if (!more(acked, l->acked))
2114 trace_tipc_link_bc_ack(l, l->acked, acked, &snd_l->transmq);
2115 /* Skip over packets peer has already acked */
2116 skb_queue_walk(&snd_l->transmq, skb) {
2117 if (more(buf_seqno(skb), l->acked))
2121 /* Update/release the packets peer is acking now */
2122 skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
2123 if (more(buf_seqno(skb), acked))
2125 if (!--TIPC_SKB_CB(skb)->ackers) {
2126 __skb_unlink(skb, &snd_l->transmq);
2131 tipc_link_advance_backlog(snd_l, xmitq);
2132 if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
2133 link_prepare_wakeup(snd_l);
2136 /* tipc_link_bc_nack_rcv(): receive broadcast nack message
2137 * This function is here for backwards compatibility, since
2138 * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
2140 int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
2141 struct sk_buff_head *xmitq)
2143 struct tipc_msg *hdr = buf_msg(skb);
2144 u32 dnode = msg_destnode(hdr);
2145 int mtyp = msg_type(hdr);
2146 u16 acked = msg_bcast_ack(hdr);
2147 u16 from = acked + 1;
2148 u16 to = msg_bcgap_to(hdr);
2149 u16 peers_snd_nxt = to + 1;
2154 if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
2157 if (mtyp != STATE_MSG)
2160 if (dnode == tipc_own_addr(l->net)) {
2161 tipc_link_bc_ack_rcv(l, acked, xmitq);
2162 rc = tipc_link_bc_retrans(l->bc_sndlink, l, from, to, xmitq);
2163 l->stats.recv_nacks++;
2167 /* Msg for other node => suppress own NACK at next sync if applicable */
2168 if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
2169 l->nack_state = BC_NACK_SND_SUPPRESS;
2174 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
2176 int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
2179 l->backlog[TIPC_LOW_IMPORTANCE].limit = max_t(u16, 50, win);
2180 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = max_t(u16, 100, win * 2);
2181 l->backlog[TIPC_HIGH_IMPORTANCE].limit = max_t(u16, 150, win * 3);
2182 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
2183 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
2187 * link_reset_stats - reset link statistics
2188 * @l: pointer to link
2190 void tipc_link_reset_stats(struct tipc_link *l)
2192 memset(&l->stats, 0, sizeof(l->stats));
2195 static void link_print(struct tipc_link *l, const char *str)
2197 struct sk_buff *hskb = skb_peek(&l->transmq);
2198 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
2199 u16 tail = l->snd_nxt - 1;
2201 pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
2202 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
2203 skb_queue_len(&l->transmq), head, tail,
2204 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
2207 /* Parse and validate nested (link) properties valid for media, bearer and link
2209 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
2213 err = nla_parse_nested_deprecated(props, TIPC_NLA_PROP_MAX, prop,
2214 tipc_nl_prop_policy, NULL);
2218 if (props[TIPC_NLA_PROP_PRIO]) {
2221 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2222 if (prio > TIPC_MAX_LINK_PRI)
2226 if (props[TIPC_NLA_PROP_TOL]) {
2229 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2230 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
2234 if (props[TIPC_NLA_PROP_WIN]) {
2237 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2238 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
2245 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
2248 struct nlattr *stats;
2255 struct nla_map map[] = {
2256 {TIPC_NLA_STATS_RX_INFO, 0},
2257 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2258 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2259 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2260 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
2261 {TIPC_NLA_STATS_TX_INFO, 0},
2262 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2263 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2264 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2265 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2266 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2267 s->msg_length_counts : 1},
2268 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2269 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2270 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2271 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2272 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2273 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2274 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2275 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2276 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2277 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
2278 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2279 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2280 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2281 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
2282 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2283 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2284 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2285 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2286 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2287 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2288 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2289 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2290 (s->accu_queue_sz / s->queue_sz_counts) : 0}
2293 stats = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2297 for (i = 0; i < ARRAY_SIZE(map); i++)
2298 if (nla_put_u32(skb, map[i].key, map[i].val))
2301 nla_nest_end(skb, stats);
2305 nla_nest_cancel(skb, stats);
2310 /* Caller should hold appropriate locks to protect the link */
2311 int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2312 struct tipc_link *link, int nlflags)
2314 u32 self = tipc_own_addr(net);
2315 struct nlattr *attrs;
2316 struct nlattr *prop;
2320 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2321 nlflags, TIPC_NL_LINK_GET);
2325 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2329 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2331 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
2333 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
2335 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
2337 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
2340 if (tipc_link_is_up(link))
2341 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2344 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2347 prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2350 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2352 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2354 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2357 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2359 nla_nest_end(msg->skb, prop);
2361 err = __tipc_nl_add_stats(msg->skb, &link->stats);
2365 nla_nest_end(msg->skb, attrs);
2366 genlmsg_end(msg->skb, hdr);
2371 nla_nest_cancel(msg->skb, prop);
2373 nla_nest_cancel(msg->skb, attrs);
2375 genlmsg_cancel(msg->skb, hdr);
2380 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2381 struct tipc_stats *stats)
2384 struct nlattr *nest;
2391 struct nla_map map[] = {
2392 {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
2393 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2394 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2395 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2396 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
2397 {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
2398 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2399 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2400 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2401 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2402 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2403 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2404 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2405 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2406 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2407 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2408 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2409 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2410 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2411 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2414 nest = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2418 for (i = 0; i < ARRAY_SIZE(map); i++)
2419 if (nla_put_u32(skb, map[i].key, map[i].val))
2422 nla_nest_end(skb, nest);
2426 nla_nest_cancel(skb, nest);
2431 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
2435 struct nlattr *attrs;
2436 struct nlattr *prop;
2437 struct tipc_net *tn = net_generic(net, tipc_net_id);
2438 u32 bc_mode = tipc_bcast_get_broadcast_mode(net);
2439 u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net);
2440 struct tipc_link *bcl = tn->bcl;
2445 tipc_bcast_lock(net);
2447 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2448 NLM_F_MULTI, TIPC_NL_LINK_GET);
2450 tipc_bcast_unlock(net);
2454 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2458 /* The broadcast link is always up */
2459 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2462 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2464 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2466 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
2468 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
2471 prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2474 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
2476 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST, bc_mode))
2478 if (bc_mode & BCLINK_MODE_SEL)
2479 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST_RATIO,
2482 nla_nest_end(msg->skb, prop);
2484 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2488 tipc_bcast_unlock(net);
2489 nla_nest_end(msg->skb, attrs);
2490 genlmsg_end(msg->skb, hdr);
2495 nla_nest_cancel(msg->skb, prop);
2497 nla_nest_cancel(msg->skb, attrs);
2499 tipc_bcast_unlock(net);
2500 genlmsg_cancel(msg->skb, hdr);
2505 void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2506 struct sk_buff_head *xmitq)
2510 l->bc_rcvlink->tolerance = tol;
2512 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
2515 void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2516 struct sk_buff_head *xmitq)
2519 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
2522 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2524 l->abort_limit = limit;
2527 char *tipc_link_name_ext(struct tipc_link *l, char *buf)
2530 scnprintf(buf, TIPC_MAX_LINK_NAME, "null");
2531 else if (link_is_bc_sndlink(l))
2532 scnprintf(buf, TIPC_MAX_LINK_NAME, "broadcast-sender");
2533 else if (link_is_bc_rcvlink(l))
2534 scnprintf(buf, TIPC_MAX_LINK_NAME,
2535 "broadcast-receiver, peer %x", l->addr);
2537 memcpy(buf, l->name, TIPC_MAX_LINK_NAME);
2543 * tipc_link_dump - dump TIPC link data
2544 * @l: tipc link to be dumped
2545 * @dqueues: bitmask to decide if any link queue to be dumped?
2546 * - TIPC_DUMP_NONE: don't dump link queues
2547 * - TIPC_DUMP_TRANSMQ: dump link transmq queue
2548 * - TIPC_DUMP_BACKLOGQ: dump link backlog queue
2549 * - TIPC_DUMP_DEFERDQ: dump link deferd queue
2550 * - TIPC_DUMP_INPUTQ: dump link input queue
2551 * - TIPC_DUMP_WAKEUP: dump link wakeup queue
2552 * - TIPC_DUMP_ALL: dump all the link queues above
2553 * @buf: returned buffer of dump data in format
2555 int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
2558 size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN;
2559 struct sk_buff_head *list;
2560 struct sk_buff *hskb, *tskb;
2564 i += scnprintf(buf, sz, "link data: (null)\n");
2568 i += scnprintf(buf, sz, "link data: %x", l->addr);
2569 i += scnprintf(buf + i, sz - i, " %x", l->state);
2570 i += scnprintf(buf + i, sz - i, " %u", l->in_session);
2571 i += scnprintf(buf + i, sz - i, " %u", l->session);
2572 i += scnprintf(buf + i, sz - i, " %u", l->peer_session);
2573 i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt);
2574 i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt);
2575 i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state);
2576 i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state);
2577 i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
2578 i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
2579 i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
2580 i += scnprintf(buf + i, sz - i, " %u", l->prev_from);
2581 i += scnprintf(buf + i, sz - i, " %u", 0);
2582 i += scnprintf(buf + i, sz - i, " %u", l->acked);
2585 len = skb_queue_len(list);
2586 hskb = skb_peek(list);
2587 tskb = skb_peek_tail(list);
2588 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2589 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2590 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2593 len = skb_queue_len(list);
2594 hskb = skb_peek(list);
2595 tskb = skb_peek_tail(list);
2596 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2597 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2598 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2600 list = &l->backlogq;
2601 len = skb_queue_len(list);
2602 hskb = skb_peek(list);
2603 tskb = skb_peek_tail(list);
2604 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2605 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2606 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2609 len = skb_queue_len(list);
2610 hskb = skb_peek(list);
2611 tskb = skb_peek_tail(list);
2612 i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len,
2613 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2614 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2616 if (dqueues & TIPC_DUMP_TRANSMQ) {
2617 i += scnprintf(buf + i, sz - i, "transmq: ");
2618 i += tipc_list_dump(&l->transmq, false, buf + i);
2620 if (dqueues & TIPC_DUMP_BACKLOGQ) {
2621 i += scnprintf(buf + i, sz - i,
2622 "backlogq: <%u %u %u %u %u>, ",
2623 l->backlog[TIPC_LOW_IMPORTANCE].len,
2624 l->backlog[TIPC_MEDIUM_IMPORTANCE].len,
2625 l->backlog[TIPC_HIGH_IMPORTANCE].len,
2626 l->backlog[TIPC_CRITICAL_IMPORTANCE].len,
2627 l->backlog[TIPC_SYSTEM_IMPORTANCE].len);
2628 i += tipc_list_dump(&l->backlogq, false, buf + i);
2630 if (dqueues & TIPC_DUMP_DEFERDQ) {
2631 i += scnprintf(buf + i, sz - i, "deferdq: ");
2632 i += tipc_list_dump(&l->deferdq, false, buf + i);
2634 if (dqueues & TIPC_DUMP_INPUTQ) {
2635 i += scnprintf(buf + i, sz - i, "inputq: ");
2636 i += tipc_list_dump(l->inputq, false, buf + i);
2638 if (dqueues & TIPC_DUMP_WAKEUP) {
2639 i += scnprintf(buf + i, sz - i, "wakeup: ");
2640 i += tipc_list_dump(&l->wakeupq, false, buf + i);