2 * net/tipc/socket.c: TIPC socket API
4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/rhashtable.h>
38 #include <linux/sched/signal.h>
41 #include "name_table.h"
44 #include "name_distr.h"
50 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
51 #define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
52 #define TIPC_FWD_MSG 1
53 #define TIPC_MAX_PORT 0xffffffff
54 #define TIPC_MIN_PORT 1
55 #define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */
58 TIPC_LISTEN = TCP_LISTEN,
59 TIPC_ESTABLISHED = TCP_ESTABLISHED,
60 TIPC_OPEN = TCP_CLOSE,
61 TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
62 TIPC_CONNECTING = TCP_SYN_SENT,
65 struct sockaddr_pair {
66 struct sockaddr_tipc sock;
67 struct sockaddr_tipc member;
71 * struct tipc_sock - TIPC socket structure
72 * @sk: socket - interacts with 'port' and with user via the socket API
73 * @conn_type: TIPC type used when connection was established
74 * @conn_instance: TIPC instance used when connection was established
75 * @published: non-zero if port has one or more associated names
76 * @max_pkt: maximum packet size "hint" used when building messages sent by port
77 * @portid: unique port identity in TIPC socket hash table
78 * @phdr: preformatted message header used when sending messages
79 * #cong_links: list of congested links
80 * @publications: list of publications for port
81 * @blocking_link: address of the congested link we are currently sleeping on
82 * @pub_count: total # of publications port has made during its lifetime
84 * @conn_timeout: the time we can wait for an unresponded setup request
85 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
86 * @cong_link_cnt: number of congested links
87 * @snt_unacked: # messages sent by socket, and not yet acked by peer
88 * @rcv_unacked: # messages read by user, but not yet acked back to peer
89 * @peer: 'connected' peer for dgram/rdm
90 * @node: hash table node
91 * @mc_method: cookie for use between socket and broadcast layer
92 * @rcu: rcu struct for tipc_sock
101 struct tipc_msg phdr;
102 struct list_head cong_links;
103 struct list_head publications;
106 atomic_t dupl_rcvcnt;
114 struct sockaddr_tipc peer;
115 struct rhash_head node;
116 struct tipc_mc_method mc_method;
118 struct tipc_group *group;
122 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
123 static void tipc_data_ready(struct sock *sk);
124 static void tipc_write_space(struct sock *sk);
125 static void tipc_sock_destruct(struct sock *sk);
126 static int tipc_release(struct socket *sock);
127 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
129 static void tipc_sk_timeout(struct timer_list *t);
130 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
131 struct tipc_name_seq const *seq);
132 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
133 struct tipc_name_seq const *seq);
134 static int tipc_sk_leave(struct tipc_sock *tsk);
135 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
136 static int tipc_sk_insert(struct tipc_sock *tsk);
137 static void tipc_sk_remove(struct tipc_sock *tsk);
138 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
139 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
141 static const struct proto_ops packet_ops;
142 static const struct proto_ops stream_ops;
143 static const struct proto_ops msg_ops;
144 static struct proto tipc_proto;
145 static const struct rhashtable_params tsk_rht_params;
147 static u32 tsk_own_node(struct tipc_sock *tsk)
149 return msg_prevnode(&tsk->phdr);
152 static u32 tsk_peer_node(struct tipc_sock *tsk)
154 return msg_destnode(&tsk->phdr);
157 static u32 tsk_peer_port(struct tipc_sock *tsk)
159 return msg_destport(&tsk->phdr);
162 static bool tsk_unreliable(struct tipc_sock *tsk)
164 return msg_src_droppable(&tsk->phdr) != 0;
167 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
169 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
172 static bool tsk_unreturnable(struct tipc_sock *tsk)
174 return msg_dest_droppable(&tsk->phdr) != 0;
177 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
179 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
182 static int tsk_importance(struct tipc_sock *tsk)
184 return msg_importance(&tsk->phdr);
187 static int tsk_set_importance(struct tipc_sock *tsk, int imp)
189 if (imp > TIPC_CRITICAL_IMPORTANCE)
191 msg_set_importance(&tsk->phdr, (u32)imp);
195 static struct tipc_sock *tipc_sk(const struct sock *sk)
197 return container_of(sk, struct tipc_sock, sk);
200 static bool tsk_conn_cong(struct tipc_sock *tsk)
202 return tsk->snt_unacked > tsk->snd_win;
205 static u16 tsk_blocks(int len)
207 return ((len / FLOWCTL_BLK_SZ) + 1);
210 /* tsk_blocks(): translate a buffer size in bytes to number of
211 * advertisable blocks, taking into account the ratio truesize(len)/len
212 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
214 static u16 tsk_adv_blocks(int len)
216 return len / FLOWCTL_BLK_SZ / 4;
219 /* tsk_inc(): increment counter for sent or received data
220 * - If block based flow control is not supported by peer we
221 * fall back to message based ditto, incrementing the counter
223 static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
225 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
226 return ((msglen / FLOWCTL_BLK_SZ) + 1);
231 * tsk_advance_rx_queue - discard first buffer in socket receive queue
233 * Caller must hold socket lock
235 static void tsk_advance_rx_queue(struct sock *sk)
237 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
240 /* tipc_sk_respond() : send response message back to sender
242 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
246 u32 onode = tipc_own_addr(sock_net(sk));
248 if (!tipc_msg_reverse(onode, &skb, err))
251 dnode = msg_destnode(buf_msg(skb));
252 selector = msg_origport(buf_msg(skb));
253 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
257 * tsk_rej_rx_queue - reject all buffers in socket receive queue
259 * Caller must hold socket lock
261 static void tsk_rej_rx_queue(struct sock *sk)
265 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
266 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
269 static bool tipc_sk_connected(struct sock *sk)
271 return sk->sk_state == TIPC_ESTABLISHED;
274 /* tipc_sk_type_connectionless - check if the socket is datagram socket
277 * Returns true if connection less, false otherwise
279 static bool tipc_sk_type_connectionless(struct sock *sk)
281 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
284 /* tsk_peer_msg - verify if message was sent by connected port's peer
286 * Handles cases where the node's network address has changed from
287 * the default of <0.0.0> to its configured setting.
289 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
291 struct sock *sk = &tsk->sk;
292 u32 self = tipc_own_addr(sock_net(sk));
293 u32 peer_port = tsk_peer_port(tsk);
294 u32 orig_node, peer_node;
296 if (unlikely(!tipc_sk_connected(sk)))
299 if (unlikely(msg_origport(msg) != peer_port))
302 orig_node = msg_orignode(msg);
303 peer_node = tsk_peer_node(tsk);
305 if (likely(orig_node == peer_node))
308 if (!orig_node && peer_node == self)
311 if (!peer_node && orig_node == self)
317 /* tipc_set_sk_state - set the sk_state of the socket
320 * Caller must hold socket lock
322 * Returns 0 on success, errno otherwise
324 static int tipc_set_sk_state(struct sock *sk, int state)
326 int oldsk_state = sk->sk_state;
334 case TIPC_CONNECTING:
335 if (oldsk_state == TIPC_OPEN)
338 case TIPC_ESTABLISHED:
339 if (oldsk_state == TIPC_CONNECTING ||
340 oldsk_state == TIPC_OPEN)
343 case TIPC_DISCONNECTING:
344 if (oldsk_state == TIPC_CONNECTING ||
345 oldsk_state == TIPC_ESTABLISHED)
351 sk->sk_state = state;
356 static int tipc_sk_sock_err(struct socket *sock, long *timeout)
358 struct sock *sk = sock->sk;
359 int err = sock_error(sk);
360 int typ = sock->type;
364 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
365 if (sk->sk_state == TIPC_DISCONNECTING)
367 else if (!tipc_sk_connected(sk))
372 if (signal_pending(current))
373 return sock_intr_errno(*timeout);
378 #define tipc_wait_for_cond(sock_, timeo_, condition_) \
383 while ((rc_ = !(condition_))) { \
384 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
386 rc_ = tipc_sk_sock_err((sock_), timeo_); \
389 prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \
391 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
392 sched_annotate_sleep(); \
394 remove_wait_queue(sk_sleep(sk_), &wait_); \
400 * tipc_sk_create - create a TIPC socket
401 * @net: network namespace (must be default network)
402 * @sock: pre-allocated socket structure
403 * @protocol: protocol indicator (must be 0)
404 * @kern: caused by kernel or by userspace?
406 * This routine creates additional data structures used by the TIPC socket,
407 * initializes them, and links them together.
409 * Returns 0 on success, errno otherwise
411 static int tipc_sk_create(struct net *net, struct socket *sock,
412 int protocol, int kern)
414 const struct proto_ops *ops;
416 struct tipc_sock *tsk;
417 struct tipc_msg *msg;
419 /* Validate arguments */
420 if (unlikely(protocol != 0))
421 return -EPROTONOSUPPORT;
423 switch (sock->type) {
438 /* Allocate socket's protocol area */
439 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
444 tsk->max_pkt = MAX_PKT_DEFAULT;
445 INIT_LIST_HEAD(&tsk->publications);
446 INIT_LIST_HEAD(&tsk->cong_links);
449 /* Finish initializing socket data structures */
451 sock_init_data(sock, sk);
452 tipc_set_sk_state(sk, TIPC_OPEN);
453 if (tipc_sk_insert(tsk)) {
454 pr_warn("Socket create failed; port number exhausted\n");
458 /* Ensure tsk is visible before we read own_addr. */
461 tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
462 TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
464 msg_set_origport(msg, tsk->portid);
465 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
467 sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
468 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
469 sk->sk_data_ready = tipc_data_ready;
470 sk->sk_write_space = tipc_write_space;
471 sk->sk_destruct = tipc_sock_destruct;
472 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
473 tsk->group_is_open = true;
474 atomic_set(&tsk->dupl_rcvcnt, 0);
476 /* Start out with safe limits until we receive an advertised window */
477 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
478 tsk->rcv_win = tsk->snd_win;
480 if (tipc_sk_type_connectionless(sk)) {
481 tsk_set_unreturnable(tsk, true);
482 if (sock->type == SOCK_DGRAM)
483 tsk_set_unreliable(tsk, true);
489 static void tipc_sk_callback(struct rcu_head *head)
491 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
496 /* Caller should hold socket lock for the socket. */
497 static void __tipc_shutdown(struct socket *sock, int error)
499 struct sock *sk = sock->sk;
500 struct tipc_sock *tsk = tipc_sk(sk);
501 struct net *net = sock_net(sk);
502 long timeout = CONN_TIMEOUT_DEFAULT;
503 u32 dnode = tsk_peer_node(tsk);
506 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
507 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
508 !tsk_conn_cong(tsk)));
510 /* Reject all unreceived messages, except on an active connection
511 * (which disconnects locally & sends a 'FIN+' to peer).
513 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
514 if (TIPC_SKB_CB(skb)->bytes_read) {
518 if (!tipc_sk_type_connectionless(sk) &&
519 sk->sk_state != TIPC_DISCONNECTING) {
520 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
521 tipc_node_remove_conn(net, dnode, tsk->portid);
523 tipc_sk_respond(sk, skb, error);
526 if (tipc_sk_type_connectionless(sk))
529 if (sk->sk_state != TIPC_DISCONNECTING) {
530 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
531 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
532 tsk_own_node(tsk), tsk_peer_port(tsk),
535 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
536 tipc_node_remove_conn(net, dnode, tsk->portid);
537 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
542 * tipc_release - destroy a TIPC socket
543 * @sock: socket to destroy
545 * This routine cleans up any messages that are still queued on the socket.
546 * For DGRAM and RDM socket types, all queued messages are rejected.
547 * For SEQPACKET and STREAM socket types, the first message is rejected
548 * and any others are discarded. (If the first message on a STREAM socket
549 * is partially-read, it is discarded and the next one is rejected instead.)
551 * NOTE: Rejected messages are not necessarily returned to the sender! They
552 * are returned or discarded according to the "destination droppable" setting
553 * specified for the message by the sender.
555 * Returns 0 on success, errno otherwise
557 static int tipc_release(struct socket *sock)
559 struct sock *sk = sock->sk;
560 struct tipc_sock *tsk;
563 * Exit if socket isn't fully initialized (occurs when a failed accept()
564 * releases a pre-allocated child socket that was never used)
572 __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
573 sk->sk_shutdown = SHUTDOWN_MASK;
575 tipc_sk_withdraw(tsk, 0, NULL);
576 sk_stop_timer(sk, &sk->sk_timer);
580 /* Reject any messages that accumulated in backlog queue */
582 tipc_dest_list_purge(&tsk->cong_links);
583 tsk->cong_link_cnt = 0;
584 call_rcu(&tsk->rcu, tipc_sk_callback);
591 * tipc_bind - associate or disassocate TIPC name(s) with a socket
592 * @sock: socket structure
593 * @uaddr: socket address describing name(s) and desired operation
594 * @uaddr_len: size of socket address data structure
596 * Name and name sequence binding is indicated using a positive scope value;
597 * a negative scope value unbinds the specified name. Specifying no name
598 * (i.e. a socket address length of 0) unbinds all names from the socket.
600 * Returns 0 on success, errno otherwise
602 * NOTE: This routine doesn't need to take the socket lock since it doesn't
603 * access any non-constant socket information.
605 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
608 struct sock *sk = sock->sk;
609 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
610 struct tipc_sock *tsk = tipc_sk(sk);
614 if (unlikely(!uaddr_len)) {
615 res = tipc_sk_withdraw(tsk, 0, NULL);
622 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
626 if (addr->family != AF_TIPC) {
631 if (addr->addrtype == TIPC_ADDR_NAME)
632 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
633 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
638 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
639 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
640 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
645 res = (addr->scope >= 0) ?
646 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
647 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
654 * tipc_getname - get port ID of socket or peer socket
655 * @sock: socket structure
656 * @uaddr: area for returned socket address
657 * @uaddr_len: area for returned length of socket address
658 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
660 * Returns 0 on success, errno otherwise
662 * NOTE: This routine doesn't need to take the socket lock since it only
663 * accesses socket information that is unchanging (or which changes in
664 * a completely predictable manner).
666 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
669 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
670 struct sock *sk = sock->sk;
671 struct tipc_sock *tsk = tipc_sk(sk);
673 memset(addr, 0, sizeof(*addr));
675 if ((!tipc_sk_connected(sk)) &&
676 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
678 addr->addr.id.ref = tsk_peer_port(tsk);
679 addr->addr.id.node = tsk_peer_node(tsk);
681 addr->addr.id.ref = tsk->portid;
682 addr->addr.id.node = tipc_own_addr(sock_net(sk));
685 addr->addrtype = TIPC_ADDR_ID;
686 addr->family = AF_TIPC;
688 addr->addr.name.domain = 0;
690 return sizeof(*addr);
694 * tipc_poll - read and possibly block on pollmask
695 * @file: file structure associated with the socket
696 * @sock: socket for which to calculate the poll bits
699 * Returns pollmask value
702 * It appears that the usual socket locking mechanisms are not useful here
703 * since the pollmask info is potentially out-of-date the moment this routine
704 * exits. TCP and other protocols seem to rely on higher level poll routines
705 * to handle any preventable race conditions, so TIPC will do the same ...
707 * IMPORTANT: The fact that a read or write operation is indicated does NOT
708 * imply that the operation will succeed, merely that it should be performed
709 * and will not block.
711 static __poll_t tipc_poll(struct file *file, struct socket *sock,
714 struct sock *sk = sock->sk;
715 struct tipc_sock *tsk = tipc_sk(sk);
716 __poll_t revents = 0;
718 sock_poll_wait(file, wait);
720 if (sk->sk_shutdown & RCV_SHUTDOWN)
721 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
722 if (sk->sk_shutdown == SHUTDOWN_MASK)
725 switch (sk->sk_state) {
726 case TIPC_ESTABLISHED:
727 case TIPC_CONNECTING:
728 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
732 if (!skb_queue_empty(&sk->sk_receive_queue))
733 revents |= EPOLLIN | EPOLLRDNORM;
736 if (tsk->group_is_open && !tsk->cong_link_cnt)
738 if (!tipc_sk_type_connectionless(sk))
740 if (skb_queue_empty(&sk->sk_receive_queue))
742 revents |= EPOLLIN | EPOLLRDNORM;
744 case TIPC_DISCONNECTING:
745 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
752 * tipc_sendmcast - send multicast message
753 * @sock: socket structure
754 * @seq: destination address
755 * @msg: message to send
756 * @dlen: length of data to send
757 * @timeout: timeout to wait for wakeup
759 * Called from function tipc_sendmsg(), which has done all sanity checks
760 * Returns the number of bytes sent on success, or errno
762 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
763 struct msghdr *msg, size_t dlen, long timeout)
765 struct sock *sk = sock->sk;
766 struct tipc_sock *tsk = tipc_sk(sk);
767 struct tipc_msg *hdr = &tsk->phdr;
768 struct net *net = sock_net(sk);
769 int mtu = tipc_bcast_get_mtu(net);
770 struct tipc_mc_method *method = &tsk->mc_method;
771 struct sk_buff_head pkts;
772 struct tipc_nlist dsts;
778 /* Block or return if any destination link is congested */
779 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
783 /* Lookup destination nodes */
784 tipc_nlist_init(&dsts, tipc_own_addr(net));
785 tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
787 if (!dsts.local && !dsts.remote)
788 return -EHOSTUNREACH;
790 /* Build message header */
791 msg_set_type(hdr, TIPC_MCAST_MSG);
792 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
793 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
794 msg_set_destport(hdr, 0);
795 msg_set_destnode(hdr, 0);
796 msg_set_nametype(hdr, seq->type);
797 msg_set_namelower(hdr, seq->lower);
798 msg_set_nameupper(hdr, seq->upper);
800 /* Build message as chain of buffers */
801 skb_queue_head_init(&pkts);
802 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
804 /* Send message if build was successful */
805 if (unlikely(rc == dlen))
806 rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
807 &tsk->cong_link_cnt);
809 tipc_nlist_purge(&dsts);
811 return rc ? rc : dlen;
815 * tipc_send_group_msg - send a message to a member in the group
816 * @net: network namespace
817 * @m: message to send
819 * @dnode: destination node
820 * @dport: destination port
821 * @dlen: total length of message data
823 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
824 struct msghdr *m, struct tipc_member *mb,
825 u32 dnode, u32 dport, int dlen)
827 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
828 struct tipc_mc_method *method = &tsk->mc_method;
829 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
830 struct tipc_msg *hdr = &tsk->phdr;
831 struct sk_buff_head pkts;
834 /* Complete message header */
835 msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
836 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
837 msg_set_destport(hdr, dport);
838 msg_set_destnode(hdr, dnode);
839 msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
841 /* Build message as chain of buffers */
842 skb_queue_head_init(&pkts);
843 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
844 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
845 if (unlikely(rc != dlen))
849 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
850 if (unlikely(rc == -ELINKCONG)) {
851 tipc_dest_push(&tsk->cong_links, dnode, 0);
852 tsk->cong_link_cnt++;
855 /* Update send window */
856 tipc_group_update_member(mb, blks);
858 /* A broadcast sent within next EXPIRE period must follow same path */
859 method->rcast = true;
860 method->mandatory = true;
865 * tipc_send_group_unicast - send message to a member in the group
866 * @sock: socket structure
867 * @m: message to send
868 * @dlen: total length of message data
869 * @timeout: timeout to wait for wakeup
871 * Called from function tipc_sendmsg(), which has done all sanity checks
872 * Returns the number of bytes sent on success, or errno
874 static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
875 int dlen, long timeout)
877 struct sock *sk = sock->sk;
878 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
879 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
880 struct tipc_sock *tsk = tipc_sk(sk);
881 struct tipc_group *grp = tsk->group;
882 struct net *net = sock_net(sk);
883 struct tipc_member *mb = NULL;
887 node = dest->addr.id.node;
888 port = dest->addr.id.ref;
890 return -EHOSTUNREACH;
892 /* Block or return if destination link or member is congested */
893 rc = tipc_wait_for_cond(sock, &timeout,
894 !tipc_dest_find(&tsk->cong_links, node, 0) &&
895 !tipc_group_cong(grp, node, port, blks, &mb));
900 return -EHOSTUNREACH;
902 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
904 return rc ? rc : dlen;
908 * tipc_send_group_anycast - send message to any member with given identity
909 * @sock: socket structure
910 * @m: message to send
911 * @dlen: total length of message data
912 * @timeout: timeout to wait for wakeup
914 * Called from function tipc_sendmsg(), which has done all sanity checks
915 * Returns the number of bytes sent on success, or errno
917 static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
918 int dlen, long timeout)
920 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
921 struct sock *sk = sock->sk;
922 struct tipc_sock *tsk = tipc_sk(sk);
923 struct list_head *cong_links = &tsk->cong_links;
924 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
925 struct tipc_group *grp = tsk->group;
926 struct tipc_msg *hdr = &tsk->phdr;
927 struct tipc_member *first = NULL;
928 struct tipc_member *mbr = NULL;
929 struct net *net = sock_net(sk);
930 u32 node, port, exclude;
931 struct list_head dsts;
932 u32 type, inst, scope;
937 INIT_LIST_HEAD(&dsts);
939 type = msg_nametype(hdr);
940 inst = dest->addr.name.name.instance;
941 scope = msg_lookup_scope(hdr);
942 exclude = tipc_group_exclude(grp);
944 while (++lookups < 4) {
947 /* Look for a non-congested destination member, if any */
949 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
950 &dstcnt, exclude, false))
951 return -EHOSTUNREACH;
952 tipc_dest_pop(&dsts, &node, &port);
953 cong = tipc_group_cong(grp, node, port, blks, &mbr);
962 /* Start over if destination was not in member list */
966 if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
969 /* Block or return if destination link or member is congested */
970 rc = tipc_wait_for_cond(sock, &timeout,
971 !tipc_dest_find(cong_links, node, 0) &&
972 !tipc_group_cong(grp, node, port,
977 /* Send, unless destination disappeared while waiting */
982 if (unlikely(lookups >= 4))
983 return -EHOSTUNREACH;
985 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
987 return rc ? rc : dlen;
991 * tipc_send_group_bcast - send message to all members in communication group
992 * @sk: socket structure
993 * @m: message to send
994 * @dlen: total length of message data
995 * @timeout: timeout to wait for wakeup
997 * Called from function tipc_sendmsg(), which has done all sanity checks
998 * Returns the number of bytes sent on success, or errno
1000 static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1001 int dlen, long timeout)
1003 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1004 struct sock *sk = sock->sk;
1005 struct net *net = sock_net(sk);
1006 struct tipc_sock *tsk = tipc_sk(sk);
1007 struct tipc_group *grp = tsk->group;
1008 struct tipc_nlist *dsts = tipc_group_dests(grp);
1009 struct tipc_mc_method *method = &tsk->mc_method;
1010 bool ack = method->mandatory && method->rcast;
1011 int blks = tsk_blocks(MCAST_H_SIZE + dlen);
1012 struct tipc_msg *hdr = &tsk->phdr;
1013 int mtu = tipc_bcast_get_mtu(net);
1014 struct sk_buff_head pkts;
1015 int rc = -EHOSTUNREACH;
1017 if (!dsts->local && !dsts->remote)
1018 return -EHOSTUNREACH;
1020 /* Block or return if any destination link or member is congested */
1021 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt &&
1022 !tipc_group_bc_cong(grp, blks));
1026 /* Complete message header */
1028 msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
1029 msg_set_nameinst(hdr, dest->addr.name.name.instance);
1031 msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
1032 msg_set_nameinst(hdr, 0);
1034 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
1035 msg_set_destport(hdr, 0);
1036 msg_set_destnode(hdr, 0);
1037 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp));
1039 /* Avoid getting stuck with repeated forced replicasts */
1040 msg_set_grp_bc_ack_req(hdr, ack);
1042 /* Build message as chain of buffers */
1043 skb_queue_head_init(&pkts);
1044 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1045 if (unlikely(rc != dlen))
1049 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
1053 /* Update broadcast sequence number and send windows */
1054 tipc_group_update_bc_members(tsk->group, blks, ack);
1056 /* Broadcast link is now free to choose method for next broadcast */
1057 method->mandatory = false;
1058 method->expires = jiffies;
1064 * tipc_send_group_mcast - send message to all members with given identity
1065 * @sock: socket structure
1066 * @m: message to send
1067 * @dlen: total length of message data
1068 * @timeout: timeout to wait for wakeup
1070 * Called from function tipc_sendmsg(), which has done all sanity checks
1071 * Returns the number of bytes sent on success, or errno
1073 static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
1074 int dlen, long timeout)
1076 struct sock *sk = sock->sk;
1077 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1078 struct tipc_sock *tsk = tipc_sk(sk);
1079 struct tipc_group *grp = tsk->group;
1080 struct tipc_msg *hdr = &tsk->phdr;
1081 struct net *net = sock_net(sk);
1082 u32 type, inst, scope, exclude;
1083 struct list_head dsts;
1086 INIT_LIST_HEAD(&dsts);
1088 type = msg_nametype(hdr);
1089 inst = dest->addr.name.name.instance;
1090 scope = msg_lookup_scope(hdr);
1091 exclude = tipc_group_exclude(grp);
1093 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
1094 &dstcnt, exclude, true))
1095 return -EHOSTUNREACH;
1098 tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref);
1099 return tipc_send_group_unicast(sock, m, dlen, timeout);
1102 tipc_dest_list_purge(&dsts);
1103 return tipc_send_group_bcast(sock, m, dlen, timeout);
1107 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
1108 * @arrvq: queue with arriving messages, to be cloned after destination lookup
1109 * @inputq: queue with cloned messages, delivered to socket after dest lookup
1111 * Multi-threaded: parallel calls with reference to same queues may occur
1113 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
1114 struct sk_buff_head *inputq)
1116 u32 self = tipc_own_addr(net);
1117 u32 type, lower, upper, scope;
1118 struct sk_buff *skb, *_skb;
1120 struct sk_buff_head tmpq;
1121 struct list_head dports;
1122 struct tipc_msg *hdr;
1123 int user, mtyp, hlen;
1126 __skb_queue_head_init(&tmpq);
1127 INIT_LIST_HEAD(&dports);
1129 skb = tipc_skb_peek(arrvq, &inputq->lock);
1130 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
1132 user = msg_user(hdr);
1133 mtyp = msg_type(hdr);
1134 hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
1135 onode = msg_orignode(hdr);
1136 type = msg_nametype(hdr);
1138 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
1139 spin_lock_bh(&inputq->lock);
1140 if (skb_peek(arrvq) == skb) {
1141 __skb_dequeue(arrvq);
1142 __skb_queue_tail(inputq, skb);
1145 spin_unlock_bh(&inputq->lock);
1149 /* Group messages require exact scope match */
1150 if (msg_in_group(hdr)) {
1153 scope = msg_lookup_scope(hdr);
1156 /* TIPC_NODE_SCOPE means "any scope" in this context */
1158 scope = TIPC_NODE_SCOPE;
1160 scope = TIPC_CLUSTER_SCOPE;
1162 lower = msg_namelower(hdr);
1163 upper = msg_nameupper(hdr);
1166 /* Create destination port list: */
1167 tipc_nametbl_mc_lookup(net, type, lower, upper,
1168 scope, exact, &dports);
1170 /* Clone message per destination */
1171 while (tipc_dest_pop(&dports, NULL, &portid)) {
1172 _skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
1174 msg_set_destport(buf_msg(_skb), portid);
1175 __skb_queue_tail(&tmpq, _skb);
1178 pr_warn("Failed to clone mcast rcv buffer\n");
1180 /* Append to inputq if not already done by other thread */
1181 spin_lock_bh(&inputq->lock);
1182 if (skb_peek(arrvq) == skb) {
1183 skb_queue_splice_tail_init(&tmpq, inputq);
1184 kfree_skb(__skb_dequeue(arrvq));
1186 spin_unlock_bh(&inputq->lock);
1187 __skb_queue_purge(&tmpq);
1190 tipc_sk_rcv(net, inputq);
1194 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
1195 * @tsk: receiving socket
1196 * @skb: pointer to message buffer.
1198 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1199 struct sk_buff_head *xmitq)
1201 struct tipc_msg *hdr = buf_msg(skb);
1202 u32 onode = tsk_own_node(tsk);
1203 struct sock *sk = &tsk->sk;
1204 int mtyp = msg_type(hdr);
1207 /* Ignore if connection cannot be validated: */
1208 if (!tsk_peer_msg(tsk, hdr))
1211 if (unlikely(msg_errcode(hdr))) {
1212 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1213 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1214 tsk_peer_port(tsk));
1215 sk->sk_state_change(sk);
1219 tsk->probe_unacked = false;
1221 if (mtyp == CONN_PROBE) {
1222 msg_set_type(hdr, CONN_PROBE_REPLY);
1223 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
1224 __skb_queue_tail(xmitq, skb);
1226 } else if (mtyp == CONN_ACK) {
1227 conn_cong = tsk_conn_cong(tsk);
1228 tsk->snt_unacked -= msg_conn_ack(hdr);
1229 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1230 tsk->snd_win = msg_adv_win(hdr);
1232 sk->sk_write_space(sk);
1233 } else if (mtyp != CONN_PROBE_REPLY) {
1234 pr_warn("Received unknown CONN_PROTO msg\n");
1241 * tipc_sendmsg - send message in connectionless manner
1242 * @sock: socket structure
1243 * @m: message to send
1244 * @dsz: amount of user data to be sent
1246 * Message must have an destination specified explicitly.
1247 * Used for SOCK_RDM and SOCK_DGRAM messages,
1248 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
1249 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
1251 * Returns the number of bytes sent on success, or errno otherwise
1253 static int tipc_sendmsg(struct socket *sock,
1254 struct msghdr *m, size_t dsz)
1256 struct sock *sk = sock->sk;
1260 ret = __tipc_sendmsg(sock, m, dsz);
1266 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1268 struct sock *sk = sock->sk;
1269 struct net *net = sock_net(sk);
1270 struct tipc_sock *tsk = tipc_sk(sk);
1271 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1272 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1273 struct list_head *clinks = &tsk->cong_links;
1274 bool syn = !tipc_sk_type_connectionless(sk);
1275 struct tipc_group *grp = tsk->group;
1276 struct tipc_msg *hdr = &tsk->phdr;
1277 struct tipc_name_seq *seq;
1278 struct sk_buff_head pkts;
1279 u32 dport, dnode = 0;
1283 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
1287 if (unlikely(m->msg_namelen < sizeof(*dest)))
1289 if (unlikely(dest->family != AF_TIPC))
1295 return tipc_send_group_bcast(sock, m, dlen, timeout);
1296 if (dest->addrtype == TIPC_ADDR_NAME)
1297 return tipc_send_group_anycast(sock, m, dlen, timeout);
1298 if (dest->addrtype == TIPC_ADDR_ID)
1299 return tipc_send_group_unicast(sock, m, dlen, timeout);
1300 if (dest->addrtype == TIPC_ADDR_MCAST)
1301 return tipc_send_group_mcast(sock, m, dlen, timeout);
1305 if (unlikely(!dest)) {
1307 if (!syn || dest->family != AF_TIPC)
1308 return -EDESTADDRREQ;
1311 if (unlikely(syn)) {
1312 if (sk->sk_state == TIPC_LISTEN)
1314 if (sk->sk_state != TIPC_OPEN)
1318 if (dest->addrtype == TIPC_ADDR_NAME) {
1319 tsk->conn_type = dest->addr.name.name.type;
1320 tsk->conn_instance = dest->addr.name.name.instance;
1324 seq = &dest->addr.nameseq;
1325 if (dest->addrtype == TIPC_ADDR_MCAST)
1326 return tipc_sendmcast(sock, seq, m, dlen, timeout);
1328 if (dest->addrtype == TIPC_ADDR_NAME) {
1329 type = dest->addr.name.name.type;
1330 inst = dest->addr.name.name.instance;
1331 dnode = dest->addr.name.domain;
1332 msg_set_type(hdr, TIPC_NAMED_MSG);
1333 msg_set_hdr_sz(hdr, NAMED_H_SIZE);
1334 msg_set_nametype(hdr, type);
1335 msg_set_nameinst(hdr, inst);
1336 msg_set_lookup_scope(hdr, tipc_node2scope(dnode));
1337 dport = tipc_nametbl_translate(net, type, inst, &dnode);
1338 msg_set_destnode(hdr, dnode);
1339 msg_set_destport(hdr, dport);
1340 if (unlikely(!dport && !dnode))
1341 return -EHOSTUNREACH;
1342 } else if (dest->addrtype == TIPC_ADDR_ID) {
1343 dnode = dest->addr.id.node;
1344 msg_set_type(hdr, TIPC_DIRECT_MSG);
1345 msg_set_lookup_scope(hdr, 0);
1346 msg_set_destnode(hdr, dnode);
1347 msg_set_destport(hdr, dest->addr.id.ref);
1348 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1353 /* Block or return if destination link is congested */
1354 rc = tipc_wait_for_cond(sock, &timeout,
1355 !tipc_dest_find(clinks, dnode, 0));
1359 skb_queue_head_init(&pkts);
1360 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
1361 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1362 if (unlikely(rc != dlen))
1365 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1366 if (unlikely(rc == -ELINKCONG)) {
1367 tipc_dest_push(clinks, dnode, 0);
1368 tsk->cong_link_cnt++;
1372 if (unlikely(syn && !rc))
1373 tipc_set_sk_state(sk, TIPC_CONNECTING);
1375 return rc ? rc : dlen;
1379 * tipc_sendstream - send stream-oriented data
1380 * @sock: socket structure
1382 * @dsz: total length of data to be transmitted
1384 * Used for SOCK_STREAM data.
1386 * Returns the number of bytes sent on success (or partial success),
1387 * or errno if no data sent
1389 static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1391 struct sock *sk = sock->sk;
1395 ret = __tipc_sendstream(sock, m, dsz);
1401 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1403 struct sock *sk = sock->sk;
1404 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1405 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1406 struct tipc_sock *tsk = tipc_sk(sk);
1407 struct tipc_msg *hdr = &tsk->phdr;
1408 struct net *net = sock_net(sk);
1409 struct sk_buff_head pkts;
1410 u32 dnode = tsk_peer_node(tsk);
1414 skb_queue_head_init(&pkts);
1416 if (unlikely(dlen > INT_MAX))
1419 /* Handle implicit connection setup */
1420 if (unlikely(dest)) {
1421 rc = __tipc_sendmsg(sock, m, dlen);
1422 if (dlen && (dlen == rc))
1423 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1428 rc = tipc_wait_for_cond(sock, &timeout,
1429 (!tsk->cong_link_cnt &&
1430 !tsk_conn_cong(tsk) &&
1431 tipc_sk_connected(sk)));
1435 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
1436 rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
1437 if (unlikely(rc != send))
1440 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1441 if (unlikely(rc == -ELINKCONG)) {
1442 tsk->cong_link_cnt = 1;
1446 tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
1449 } while (sent < dlen && !rc);
1451 return sent ? sent : rc;
1455 * tipc_send_packet - send a connection-oriented message
1456 * @sock: socket structure
1457 * @m: message to send
1458 * @dsz: length of data to be transmitted
1460 * Used for SOCK_SEQPACKET messages.
1462 * Returns the number of bytes sent on success, or errno otherwise
1464 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1466 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1469 return tipc_sendstream(sock, m, dsz);
1472 /* tipc_sk_finish_conn - complete the setup of a connection
1474 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1477 struct sock *sk = &tsk->sk;
1478 struct net *net = sock_net(sk);
1479 struct tipc_msg *msg = &tsk->phdr;
1481 msg_set_destnode(msg, peer_node);
1482 msg_set_destport(msg, peer_port);
1483 msg_set_type(msg, TIPC_CONN_MSG);
1484 msg_set_lookup_scope(msg, 0);
1485 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1487 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
1488 tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1489 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1490 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1491 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1492 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1495 /* Fall back to message based flow control */
1496 tsk->rcv_win = FLOWCTL_MSG_WIN;
1497 tsk->snd_win = FLOWCTL_MSG_WIN;
1501 * tipc_sk_set_orig_addr - capture sender's address for received message
1502 * @m: descriptor for message info
1503 * @hdr: received message header
1505 * Note: Address is not captured if not requested by receiver.
1507 static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1509 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
1510 struct tipc_msg *hdr = buf_msg(skb);
1515 srcaddr->sock.family = AF_TIPC;
1516 srcaddr->sock.addrtype = TIPC_ADDR_ID;
1517 srcaddr->sock.scope = 0;
1518 srcaddr->sock.addr.id.ref = msg_origport(hdr);
1519 srcaddr->sock.addr.id.node = msg_orignode(hdr);
1520 srcaddr->sock.addr.name.domain = 0;
1521 m->msg_namelen = sizeof(struct sockaddr_tipc);
1523 if (!msg_in_group(hdr))
1526 /* Group message users may also want to know sending member's id */
1527 srcaddr->member.family = AF_TIPC;
1528 srcaddr->member.addrtype = TIPC_ADDR_NAME;
1529 srcaddr->member.scope = 0;
1530 srcaddr->member.addr.name.name.type = msg_nametype(hdr);
1531 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
1532 srcaddr->member.addr.name.domain = 0;
1533 m->msg_namelen = sizeof(*srcaddr);
1537 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1538 * @m: descriptor for message info
1539 * @msg: received message header
1540 * @tsk: TIPC port associated with message
1542 * Note: Ancillary data is not captured if not requested by receiver.
1544 * Returns 0 if successful, otherwise errno
1546 static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1547 struct tipc_sock *tsk)
1555 if (likely(m->msg_controllen == 0))
1558 /* Optionally capture errored message object(s) */
1559 err = msg ? msg_errcode(msg) : 0;
1560 if (unlikely(err)) {
1562 anc_data[1] = msg_data_sz(msg);
1563 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1567 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1574 /* Optionally capture message destination object */
1575 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1576 switch (dest_type) {
1577 case TIPC_NAMED_MSG:
1579 anc_data[0] = msg_nametype(msg);
1580 anc_data[1] = msg_namelower(msg);
1581 anc_data[2] = msg_namelower(msg);
1583 case TIPC_MCAST_MSG:
1585 anc_data[0] = msg_nametype(msg);
1586 anc_data[1] = msg_namelower(msg);
1587 anc_data[2] = msg_nameupper(msg);
1590 has_name = (tsk->conn_type != 0);
1591 anc_data[0] = tsk->conn_type;
1592 anc_data[1] = tsk->conn_instance;
1593 anc_data[2] = tsk->conn_instance;
1599 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1607 static void tipc_sk_send_ack(struct tipc_sock *tsk)
1609 struct sock *sk = &tsk->sk;
1610 struct net *net = sock_net(sk);
1611 struct sk_buff *skb = NULL;
1612 struct tipc_msg *msg;
1613 u32 peer_port = tsk_peer_port(tsk);
1614 u32 dnode = tsk_peer_node(tsk);
1616 if (!tipc_sk_connected(sk))
1618 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1619 dnode, tsk_own_node(tsk), peer_port,
1620 tsk->portid, TIPC_OK);
1624 msg_set_conn_ack(msg, tsk->rcv_unacked);
1625 tsk->rcv_unacked = 0;
1627 /* Adjust to and advertize the correct window limit */
1628 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1629 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1630 msg_set_adv_win(msg, tsk->rcv_win);
1632 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1635 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1637 struct sock *sk = sock->sk;
1639 long timeo = *timeop;
1640 int err = sock_error(sk);
1646 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1647 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1648 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1653 timeo = schedule_timeout(timeo);
1657 if (!skb_queue_empty(&sk->sk_receive_queue))
1662 err = sock_intr_errno(timeo);
1663 if (signal_pending(current))
1666 err = sock_error(sk);
1670 finish_wait(sk_sleep(sk), &wait);
1676 * tipc_recvmsg - receive packet-oriented message
1677 * @m: descriptor for message info
1678 * @buflen: length of user buffer area
1679 * @flags: receive flags
1681 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1682 * If the complete message doesn't fit in user area, truncate it.
1684 * Returns size of returned message data, errno otherwise
1686 static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1687 size_t buflen, int flags)
1689 struct sock *sk = sock->sk;
1690 bool connected = !tipc_sk_type_connectionless(sk);
1691 struct tipc_sock *tsk = tipc_sk(sk);
1692 int rc, err, hlen, dlen, copy;
1693 struct sk_buff_head xmitq;
1694 struct tipc_msg *hdr;
1695 struct sk_buff *skb;
1699 /* Catch invalid receive requests */
1700 if (unlikely(!buflen))
1704 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
1708 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1710 /* Step rcv queue to first msg with data or error; wait if necessary */
1712 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1715 skb = skb_peek(&sk->sk_receive_queue);
1717 dlen = msg_data_sz(hdr);
1718 hlen = msg_hdr_sz(hdr);
1719 err = msg_errcode(hdr);
1720 grp_evt = msg_is_grp_evt(hdr);
1721 if (likely(dlen || err))
1723 tsk_advance_rx_queue(sk);
1726 /* Collect msg meta data, including error code and rejected data */
1727 tipc_sk_set_orig_addr(m, skb);
1728 rc = tipc_sk_anc_data_recv(m, hdr, tsk);
1732 /* Capture data if non-error msg, otherwise just set return value */
1734 copy = min_t(int, dlen, buflen);
1735 if (unlikely(copy != dlen))
1736 m->msg_flags |= MSG_TRUNC;
1737 rc = skb_copy_datagram_msg(skb, hlen, m, copy);
1741 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control)
1747 /* Mark message as group event if applicable */
1748 if (unlikely(grp_evt)) {
1749 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
1750 m->msg_flags |= MSG_EOR;
1751 m->msg_flags |= MSG_OOB;
1755 /* Caption of data or error code/rejected data was successful */
1756 if (unlikely(flags & MSG_PEEK))
1759 /* Send group flow control advertisement when applicable */
1760 if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1761 skb_queue_head_init(&xmitq);
1762 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1763 msg_orignode(hdr), msg_origport(hdr),
1765 tipc_node_distr_xmit(sock_net(sk), &xmitq);
1768 tsk_advance_rx_queue(sk);
1770 if (likely(!connected))
1773 /* Send connection flow control advertisement when applicable */
1774 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1775 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1776 tipc_sk_send_ack(tsk);
1779 return rc ? rc : copy;
1783 * tipc_recvstream - receive stream-oriented data
1784 * @m: descriptor for message info
1785 * @buflen: total size of user buffer area
1786 * @flags: receive flags
1788 * Used for SOCK_STREAM messages only. If not enough data is available
1789 * will optionally wait for more; never truncates data.
1791 * Returns size of returned message data, errno otherwise
1793 static int tipc_recvstream(struct socket *sock, struct msghdr *m,
1794 size_t buflen, int flags)
1796 struct sock *sk = sock->sk;
1797 struct tipc_sock *tsk = tipc_sk(sk);
1798 struct sk_buff *skb;
1799 struct tipc_msg *hdr;
1800 struct tipc_skb_cb *skb_cb;
1801 bool peek = flags & MSG_PEEK;
1802 int offset, required, copy, copied = 0;
1803 int hlen, dlen, err, rc;
1806 /* Catch invalid receive attempts */
1807 if (unlikely(!buflen))
1812 if (unlikely(sk->sk_state == TIPC_OPEN)) {
1816 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
1817 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1820 /* Look at first msg in receive queue; wait if necessary */
1821 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1824 skb = skb_peek(&sk->sk_receive_queue);
1825 skb_cb = TIPC_SKB_CB(skb);
1827 dlen = msg_data_sz(hdr);
1828 hlen = msg_hdr_sz(hdr);
1829 err = msg_errcode(hdr);
1831 /* Discard any empty non-errored (SYN-) message */
1832 if (unlikely(!dlen && !err)) {
1833 tsk_advance_rx_queue(sk);
1837 /* Collect msg meta data, incl. error code and rejected data */
1839 tipc_sk_set_orig_addr(m, skb);
1840 rc = tipc_sk_anc_data_recv(m, hdr, tsk);
1845 /* Copy data if msg ok, otherwise return error/partial data */
1847 offset = skb_cb->bytes_read;
1848 copy = min_t(int, dlen - offset, buflen - copied);
1849 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
1854 if (unlikely(offset < dlen)) {
1856 skb_cb->bytes_read = offset;
1861 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
1870 tsk_advance_rx_queue(sk);
1872 /* Send connection flow control advertisement when applicable */
1873 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1874 if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE))
1875 tipc_sk_send_ack(tsk);
1877 /* Exit if all requested data or FIN/error received */
1878 if (copied == buflen || err)
1881 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
1884 return copied ? copied : rc;
1888 * tipc_write_space - wake up thread if port congestion is released
1891 static void tipc_write_space(struct sock *sk)
1893 struct socket_wq *wq;
1896 wq = rcu_dereference(sk->sk_wq);
1897 if (skwq_has_sleeper(wq))
1898 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
1899 EPOLLWRNORM | EPOLLWRBAND);
1904 * tipc_data_ready - wake up threads to indicate messages have been received
1906 * @len: the length of messages
1908 static void tipc_data_ready(struct sock *sk)
1910 struct socket_wq *wq;
1913 wq = rcu_dereference(sk->sk_wq);
1914 if (skwq_has_sleeper(wq))
1915 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
1916 EPOLLRDNORM | EPOLLRDBAND);
1920 static void tipc_sock_destruct(struct sock *sk)
1922 __skb_queue_purge(&sk->sk_receive_queue);
1925 static void tipc_sk_proto_rcv(struct sock *sk,
1926 struct sk_buff_head *inputq,
1927 struct sk_buff_head *xmitq)
1929 struct sk_buff *skb = __skb_dequeue(inputq);
1930 struct tipc_sock *tsk = tipc_sk(sk);
1931 struct tipc_msg *hdr = buf_msg(skb);
1932 struct tipc_group *grp = tsk->group;
1933 bool wakeup = false;
1935 switch (msg_user(hdr)) {
1937 tipc_sk_conn_proto_rcv(tsk, skb, xmitq);
1940 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
1941 tsk->cong_link_cnt--;
1944 case GROUP_PROTOCOL:
1945 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
1948 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
1949 hdr, inputq, xmitq);
1956 sk->sk_write_space(sk);
1962 * tipc_filter_connect - Handle incoming message for a connection-based socket
1964 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
1966 * Returns true if everything ok, false otherwise
1968 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
1970 struct sock *sk = &tsk->sk;
1971 struct net *net = sock_net(sk);
1972 struct tipc_msg *hdr = buf_msg(skb);
1973 u32 pport = msg_origport(hdr);
1974 u32 pnode = msg_orignode(hdr);
1976 if (unlikely(msg_mcast(hdr)))
1979 switch (sk->sk_state) {
1980 case TIPC_CONNECTING:
1981 /* Accept only ACK or NACK message */
1982 if (unlikely(!msg_connected(hdr))) {
1983 if (pport != tsk_peer_port(tsk) ||
1984 pnode != tsk_peer_node(tsk))
1987 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1988 sk->sk_err = ECONNREFUSED;
1989 sk->sk_state_change(sk);
1993 if (unlikely(msg_errcode(hdr))) {
1994 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1995 sk->sk_err = ECONNREFUSED;
1996 sk->sk_state_change(sk);
2000 if (unlikely(!msg_isdata(hdr))) {
2001 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2002 sk->sk_err = EINVAL;
2003 sk->sk_state_change(sk);
2007 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
2008 msg_set_importance(&tsk->phdr, msg_importance(hdr));
2010 /* If 'ACK+' message, add to socket receive queue */
2011 if (msg_data_sz(hdr))
2014 /* If empty 'ACK-' message, wake up sleeping connect() */
2015 sk->sk_data_ready(sk);
2017 /* 'ACK-' message is neither accepted nor rejected: */
2018 msg_set_dest_droppable(hdr, 1);
2022 case TIPC_DISCONNECTING:
2025 /* Accept only SYN message */
2026 if (!msg_connected(hdr) && !(msg_errcode(hdr)))
2029 case TIPC_ESTABLISHED:
2030 /* Accept only connection-based messages sent by peer */
2031 if (unlikely(!tsk_peer_msg(tsk, hdr)))
2034 if (unlikely(msg_errcode(hdr))) {
2035 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2036 /* Let timer expire on it's own */
2037 tipc_node_remove_conn(net, tsk_peer_node(tsk),
2039 sk->sk_state_change(sk);
2043 pr_err("Unknown sk_state %u\n", sk->sk_state);
2050 * rcvbuf_limit - get proper overload limit of socket receive queue
2054 * For connection oriented messages, irrespective of importance,
2055 * default queue limit is 2 MB.
2057 * For connectionless messages, queue limits are based on message
2058 * importance as follows:
2060 * TIPC_LOW_IMPORTANCE (2 MB)
2061 * TIPC_MEDIUM_IMPORTANCE (4 MB)
2062 * TIPC_HIGH_IMPORTANCE (8 MB)
2063 * TIPC_CRITICAL_IMPORTANCE (16 MB)
2065 * Returns overload limit according to corresponding message importance
2067 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
2069 struct tipc_sock *tsk = tipc_sk(sk);
2070 struct tipc_msg *hdr = buf_msg(skb);
2072 if (unlikely(msg_in_group(hdr)))
2073 return sk->sk_rcvbuf;
2075 if (unlikely(!msg_connected(hdr)))
2076 return sk->sk_rcvbuf << msg_importance(hdr);
2078 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
2079 return sk->sk_rcvbuf;
2081 return FLOWCTL_MSG_LIM;
2085 * tipc_sk_filter_rcv - validate incoming message
2087 * @skb: pointer to message.
2089 * Enqueues message on receive queue if acceptable; optionally handles
2090 * disconnect indication for a connected socket.
2092 * Called with socket lock already taken
2095 static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2096 struct sk_buff_head *xmitq)
2098 bool sk_conn = !tipc_sk_type_connectionless(sk);
2099 struct tipc_sock *tsk = tipc_sk(sk);
2100 struct tipc_group *grp = tsk->group;
2101 struct tipc_msg *hdr = buf_msg(skb);
2102 struct net *net = sock_net(sk);
2103 struct sk_buff_head inputq;
2104 int limit, err = TIPC_OK;
2106 TIPC_SKB_CB(skb)->bytes_read = 0;
2107 __skb_queue_head_init(&inputq);
2108 __skb_queue_tail(&inputq, skb);
2110 if (unlikely(!msg_isdata(hdr)))
2111 tipc_sk_proto_rcv(sk, &inputq, xmitq);
2114 tipc_group_filter_msg(grp, &inputq, xmitq);
2116 /* Validate and add to receive buffer if there is space */
2117 while ((skb = __skb_dequeue(&inputq))) {
2119 limit = rcvbuf_limit(sk, skb);
2120 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
2121 (!sk_conn && msg_connected(hdr)) ||
2122 (!grp && msg_in_group(hdr)))
2123 err = TIPC_ERR_NO_PORT;
2124 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
2125 atomic_inc(&sk->sk_drops);
2126 err = TIPC_ERR_OVERLOAD;
2129 if (unlikely(err)) {
2130 tipc_skb_reject(net, err, skb, xmitq);
2134 __skb_queue_tail(&sk->sk_receive_queue, skb);
2135 skb_set_owner_r(skb, sk);
2136 sk->sk_data_ready(sk);
2141 * tipc_sk_backlog_rcv - handle incoming message from backlog queue
2145 * Caller must hold socket lock
2147 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
2149 unsigned int before = sk_rmem_alloc_get(sk);
2150 struct sk_buff_head xmitq;
2153 __skb_queue_head_init(&xmitq);
2155 tipc_sk_filter_rcv(sk, skb, &xmitq);
2156 added = sk_rmem_alloc_get(sk) - before;
2157 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
2159 /* Send pending response/rejected messages, if any */
2160 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2165 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
2166 * inputq and try adding them to socket or backlog queue
2167 * @inputq: list of incoming buffers with potentially different destinations
2168 * @sk: socket where the buffers should be enqueued
2169 * @dport: port number for the socket
2171 * Caller must hold socket lock
2173 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
2174 u32 dport, struct sk_buff_head *xmitq)
2176 unsigned long time_limit = jiffies + 2;
2177 struct sk_buff *skb;
2182 while (skb_queue_len(inputq)) {
2183 if (unlikely(time_after_eq(jiffies, time_limit)))
2186 skb = tipc_skb_dequeue(inputq, dport);
2190 /* Add message directly to receive queue if possible */
2191 if (!sock_owned_by_user(sk)) {
2192 tipc_sk_filter_rcv(sk, skb, xmitq);
2196 /* Try backlog, compensating for double-counted bytes */
2197 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
2198 if (!sk->sk_backlog.len)
2199 atomic_set(dcnt, 0);
2200 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
2201 if (likely(!sk_add_backlog(sk, skb, lim)))
2204 /* Overload => reject message back to sender */
2205 onode = tipc_own_addr(sock_net(sk));
2206 atomic_inc(&sk->sk_drops);
2207 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
2208 __skb_queue_tail(xmitq, skb);
2214 * tipc_sk_rcv - handle a chain of incoming buffers
2215 * @inputq: buffer list containing the buffers
2216 * Consumes all buffers in list until inputq is empty
2217 * Note: may be called in multiple threads referring to the same queue
2219 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
2221 struct sk_buff_head xmitq;
2222 u32 dnode, dport = 0;
2224 struct tipc_sock *tsk;
2226 struct sk_buff *skb;
2228 __skb_queue_head_init(&xmitq);
2229 while (skb_queue_len(inputq)) {
2230 dport = tipc_skb_peek_port(inputq, dport);
2231 tsk = tipc_sk_lookup(net, dport);
2235 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
2236 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
2237 spin_unlock_bh(&sk->sk_lock.slock);
2239 /* Send pending response/rejected messages, if any */
2240 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2244 /* No destination socket => dequeue skb if still there */
2245 skb = tipc_skb_dequeue(inputq, dport);
2249 /* Try secondary lookup if unresolved named message */
2250 err = TIPC_ERR_NO_PORT;
2251 if (tipc_msg_lookup_dest(net, skb, &err))
2254 /* Prepare for message rejection */
2255 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
2258 dnode = msg_destnode(buf_msg(skb));
2259 tipc_node_xmit_skb(net, skb, dnode, dport);
2263 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
2265 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2266 struct sock *sk = sock->sk;
2270 int err = sock_error(sk);
2275 if (signal_pending(current))
2276 return sock_intr_errno(*timeo_p);
2278 add_wait_queue(sk_sleep(sk), &wait);
2279 done = sk_wait_event(sk, timeo_p,
2280 sk->sk_state != TIPC_CONNECTING, &wait);
2281 remove_wait_queue(sk_sleep(sk), &wait);
2287 * tipc_connect - establish a connection to another TIPC port
2288 * @sock: socket structure
2289 * @dest: socket address for destination port
2290 * @destlen: size of socket address data structure
2291 * @flags: file-related flags associated with socket
2293 * Returns 0 on success, errno otherwise
2295 static int tipc_connect(struct socket *sock, struct sockaddr *dest,
2296 int destlen, int flags)
2298 struct sock *sk = sock->sk;
2299 struct tipc_sock *tsk = tipc_sk(sk);
2300 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
2301 struct msghdr m = {NULL,};
2302 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
2306 if (destlen != sizeof(struct sockaddr_tipc))
2316 if (dst->family == AF_UNSPEC) {
2317 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2318 if (!tipc_sk_type_connectionless(sk))
2321 } else if (dst->family != AF_TIPC) {
2324 if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME)
2329 /* DGRAM/RDM connect(), just save the destaddr */
2330 if (tipc_sk_type_connectionless(sk)) {
2331 memcpy(&tsk->peer, dest, destlen);
2335 previous = sk->sk_state;
2337 switch (sk->sk_state) {
2339 /* Send a 'SYN-' to destination */
2341 m.msg_namelen = destlen;
2343 /* If connect is in non-blocking case, set MSG_DONTWAIT to
2344 * indicate send_msg() is never blocked.
2347 m.msg_flags = MSG_DONTWAIT;
2349 res = __tipc_sendmsg(sock, &m, 0);
2350 if ((res < 0) && (res != -EWOULDBLOCK))
2353 /* Just entered TIPC_CONNECTING state; the only
2354 * difference is that return value in non-blocking
2355 * case is EINPROGRESS, rather than EALREADY.
2359 case TIPC_CONNECTING:
2361 if (previous == TIPC_CONNECTING)
2365 timeout = msecs_to_jiffies(timeout);
2366 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
2367 res = tipc_wait_for_connect(sock, &timeout);
2369 case TIPC_ESTABLISHED:
2382 * tipc_listen - allow socket to listen for incoming connections
2383 * @sock: socket structure
2386 * Returns 0 on success, errno otherwise
2388 static int tipc_listen(struct socket *sock, int len)
2390 struct sock *sk = sock->sk;
2394 res = tipc_set_sk_state(sk, TIPC_LISTEN);
2400 static int tipc_wait_for_accept(struct socket *sock, long timeo)
2402 struct sock *sk = sock->sk;
2406 /* True wake-one mechanism for incoming connections: only
2407 * one process gets woken up, not the 'whole herd'.
2408 * Since we do not 'race & poll' for established sockets
2409 * anymore, the common case will execute the loop only once.
2412 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
2413 TASK_INTERRUPTIBLE);
2414 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2416 timeo = schedule_timeout(timeo);
2420 if (!skb_queue_empty(&sk->sk_receive_queue))
2425 err = sock_intr_errno(timeo);
2426 if (signal_pending(current))
2429 finish_wait(sk_sleep(sk), &wait);
2434 * tipc_accept - wait for connection request
2435 * @sock: listening socket
2436 * @newsock: new socket that is to be connected
2437 * @flags: file-related flags associated with socket
2439 * Returns 0 on success, errno otherwise
2441 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2444 struct sock *new_sk, *sk = sock->sk;
2445 struct sk_buff *buf;
2446 struct tipc_sock *new_tsock;
2447 struct tipc_msg *msg;
2453 if (sk->sk_state != TIPC_LISTEN) {
2457 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2458 res = tipc_wait_for_accept(sock, timeo);
2462 buf = skb_peek(&sk->sk_receive_queue);
2464 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2467 security_sk_clone(sock->sk, new_sock->sk);
2469 new_sk = new_sock->sk;
2470 new_tsock = tipc_sk(new_sk);
2473 /* we lock on new_sk; but lockdep sees the lock on sk */
2474 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2477 * Reject any stray messages received by new socket
2478 * before the socket lock was taken (very, very unlikely)
2480 tsk_rej_rx_queue(new_sk);
2482 /* Connect new socket to it's peer */
2483 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2485 tsk_set_importance(new_tsock, msg_importance(msg));
2486 if (msg_named(msg)) {
2487 new_tsock->conn_type = msg_nametype(msg);
2488 new_tsock->conn_instance = msg_nameinst(msg);
2492 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2493 * Respond to 'SYN+' by queuing it on new socket.
2495 if (!msg_data_sz(msg)) {
2496 struct msghdr m = {NULL,};
2498 tsk_advance_rx_queue(sk);
2499 __tipc_sendstream(new_sock, &m, 0);
2501 __skb_dequeue(&sk->sk_receive_queue);
2502 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2503 skb_set_owner_r(buf, new_sk);
2505 release_sock(new_sk);
2512 * tipc_shutdown - shutdown socket connection
2513 * @sock: socket structure
2514 * @how: direction to close (must be SHUT_RDWR)
2516 * Terminates connection (if necessary), then purges socket's receive queue.
2518 * Returns 0 on success, errno otherwise
2520 static int tipc_shutdown(struct socket *sock, int how)
2522 struct sock *sk = sock->sk;
2525 if (how != SHUT_RDWR)
2530 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2531 sk->sk_shutdown = SEND_SHUTDOWN;
2533 if (sk->sk_state == TIPC_DISCONNECTING) {
2534 /* Discard any unreceived messages */
2535 __skb_queue_purge(&sk->sk_receive_queue);
2537 /* Wake up anyone sleeping in poll */
2538 sk->sk_state_change(sk);
2548 static void tipc_sk_timeout(struct timer_list *t)
2550 struct sock *sk = from_timer(sk, t, sk_timer);
2551 struct tipc_sock *tsk = tipc_sk(sk);
2552 u32 peer_port = tsk_peer_port(tsk);
2553 u32 peer_node = tsk_peer_node(tsk);
2554 u32 own_node = tsk_own_node(tsk);
2555 u32 own_port = tsk->portid;
2556 struct net *net = sock_net(sk);
2557 struct sk_buff *skb = NULL;
2560 if (!tipc_sk_connected(sk))
2563 /* Try again later if socket is busy */
2564 if (sock_owned_by_user(sk)) {
2565 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
2569 if (tsk->probe_unacked) {
2570 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2571 tipc_node_remove_conn(net, peer_node, peer_port);
2572 sk->sk_state_change(sk);
2575 /* Send new probe */
2576 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
2577 peer_node, own_node, peer_port, own_port,
2579 tsk->probe_unacked = true;
2580 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
2584 tipc_node_xmit_skb(net, skb, peer_node, own_port);
2588 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2589 struct tipc_name_seq const *seq)
2591 struct sock *sk = &tsk->sk;
2592 struct net *net = sock_net(sk);
2593 struct publication *publ;
2596 if (scope != TIPC_NODE_SCOPE)
2597 scope = TIPC_CLUSTER_SCOPE;
2599 if (tipc_sk_connected(sk))
2601 key = tsk->portid + tsk->pub_count + 1;
2602 if (key == tsk->portid)
2605 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2606 scope, tsk->portid, key);
2607 if (unlikely(!publ))
2610 list_add(&publ->binding_sock, &tsk->publications);
2616 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2617 struct tipc_name_seq const *seq)
2619 struct net *net = sock_net(&tsk->sk);
2620 struct publication *publ;
2621 struct publication *safe;
2624 if (scope != TIPC_NODE_SCOPE)
2625 scope = TIPC_CLUSTER_SCOPE;
2627 list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) {
2629 if (publ->scope != scope)
2631 if (publ->type != seq->type)
2633 if (publ->lower != seq->lower)
2635 if (publ->upper != seq->upper)
2637 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2638 publ->upper, publ->key);
2642 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2643 publ->upper, publ->key);
2646 if (list_empty(&tsk->publications))
2651 /* tipc_sk_reinit: set non-zero address in all existing sockets
2652 * when we go from standalone to network mode.
2654 void tipc_sk_reinit(struct net *net)
2656 struct tipc_net *tn = net_generic(net, tipc_net_id);
2657 struct rhashtable_iter iter;
2658 struct tipc_sock *tsk;
2659 struct tipc_msg *msg;
2661 rhashtable_walk_enter(&tn->sk_rht, &iter);
2664 rhashtable_walk_start(&iter);
2666 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2667 spin_lock_bh(&tsk->sk.sk_lock.slock);
2669 msg_set_prevnode(msg, tipc_own_addr(net));
2670 msg_set_orignode(msg, tipc_own_addr(net));
2671 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2674 rhashtable_walk_stop(&iter);
2675 } while (tsk == ERR_PTR(-EAGAIN));
2677 rhashtable_walk_exit(&iter);
2680 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2682 struct tipc_net *tn = net_generic(net, tipc_net_id);
2683 struct tipc_sock *tsk;
2686 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2688 sock_hold(&tsk->sk);
2694 static int tipc_sk_insert(struct tipc_sock *tsk)
2696 struct sock *sk = &tsk->sk;
2697 struct net *net = sock_net(sk);
2698 struct tipc_net *tn = net_generic(net, tipc_net_id);
2699 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2700 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2702 while (remaining--) {
2704 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2705 portid = TIPC_MIN_PORT;
2706 tsk->portid = portid;
2707 sock_hold(&tsk->sk);
2708 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2717 static void tipc_sk_remove(struct tipc_sock *tsk)
2719 struct sock *sk = &tsk->sk;
2720 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2722 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2723 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
2728 static const struct rhashtable_params tsk_rht_params = {
2730 .head_offset = offsetof(struct tipc_sock, node),
2731 .key_offset = offsetof(struct tipc_sock, portid),
2732 .key_len = sizeof(u32), /* portid */
2733 .max_size = 1048576,
2735 .automatic_shrinking = true,
2738 int tipc_sk_rht_init(struct net *net)
2740 struct tipc_net *tn = net_generic(net, tipc_net_id);
2742 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2745 void tipc_sk_rht_destroy(struct net *net)
2747 struct tipc_net *tn = net_generic(net, tipc_net_id);
2749 /* Wait for socket readers to complete */
2752 rhashtable_destroy(&tn->sk_rht);
2755 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
2757 struct net *net = sock_net(&tsk->sk);
2758 struct tipc_group *grp = tsk->group;
2759 struct tipc_msg *hdr = &tsk->phdr;
2760 struct tipc_name_seq seq;
2763 if (mreq->type < TIPC_RESERVED_TYPES)
2765 if (mreq->scope > TIPC_NODE_SCOPE)
2769 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
2773 msg_set_lookup_scope(hdr, mreq->scope);
2774 msg_set_nametype(hdr, mreq->type);
2775 msg_set_dest_droppable(hdr, true);
2776 seq.type = mreq->type;
2777 seq.lower = mreq->instance;
2778 seq.upper = seq.lower;
2779 tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope);
2780 rc = tipc_sk_publish(tsk, mreq->scope, &seq);
2782 tipc_group_delete(net, grp);
2786 /* Eliminate any risk that a broadcast overtakes sent JOINs */
2787 tsk->mc_method.rcast = true;
2788 tsk->mc_method.mandatory = true;
2789 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
2793 static int tipc_sk_leave(struct tipc_sock *tsk)
2795 struct net *net = sock_net(&tsk->sk);
2796 struct tipc_group *grp = tsk->group;
2797 struct tipc_name_seq seq;
2802 tipc_group_self(grp, &seq, &scope);
2803 tipc_group_delete(net, grp);
2805 tipc_sk_withdraw(tsk, scope, &seq);
2810 * tipc_setsockopt - set socket option
2811 * @sock: socket structure
2812 * @lvl: option level
2813 * @opt: option identifier
2814 * @ov: pointer to new option value
2815 * @ol: length of option value
2817 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2818 * (to ease compatibility).
2820 * Returns 0 on success, errno otherwise
2822 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2823 char __user *ov, unsigned int ol)
2825 struct sock *sk = sock->sk;
2826 struct tipc_sock *tsk = tipc_sk(sk);
2827 struct tipc_group_req mreq;
2831 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2833 if (lvl != SOL_TIPC)
2834 return -ENOPROTOOPT;
2837 case TIPC_IMPORTANCE:
2838 case TIPC_SRC_DROPPABLE:
2839 case TIPC_DEST_DROPPABLE:
2840 case TIPC_CONN_TIMEOUT:
2841 if (ol < sizeof(value))
2843 if (get_user(value, (u32 __user *)ov))
2846 case TIPC_GROUP_JOIN:
2847 if (ol < sizeof(mreq))
2849 if (copy_from_user(&mreq, ov, sizeof(mreq)))
2860 case TIPC_IMPORTANCE:
2861 res = tsk_set_importance(tsk, value);
2863 case TIPC_SRC_DROPPABLE:
2864 if (sock->type != SOCK_STREAM)
2865 tsk_set_unreliable(tsk, value);
2869 case TIPC_DEST_DROPPABLE:
2870 tsk_set_unreturnable(tsk, value);
2872 case TIPC_CONN_TIMEOUT:
2873 tipc_sk(sk)->conn_timeout = value;
2875 case TIPC_MCAST_BROADCAST:
2876 tsk->mc_method.rcast = false;
2877 tsk->mc_method.mandatory = true;
2879 case TIPC_MCAST_REPLICAST:
2880 tsk->mc_method.rcast = true;
2881 tsk->mc_method.mandatory = true;
2883 case TIPC_GROUP_JOIN:
2884 res = tipc_sk_join(tsk, &mreq);
2886 case TIPC_GROUP_LEAVE:
2887 res = tipc_sk_leave(tsk);
2899 * tipc_getsockopt - get socket option
2900 * @sock: socket structure
2901 * @lvl: option level
2902 * @opt: option identifier
2903 * @ov: receptacle for option value
2904 * @ol: receptacle for length of option value
2906 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
2907 * (to ease compatibility).
2909 * Returns 0 on success, errno otherwise
2911 static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2912 char __user *ov, int __user *ol)
2914 struct sock *sk = sock->sk;
2915 struct tipc_sock *tsk = tipc_sk(sk);
2916 struct tipc_name_seq seq;
2921 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2922 return put_user(0, ol);
2923 if (lvl != SOL_TIPC)
2924 return -ENOPROTOOPT;
2925 res = get_user(len, ol);
2932 case TIPC_IMPORTANCE:
2933 value = tsk_importance(tsk);
2935 case TIPC_SRC_DROPPABLE:
2936 value = tsk_unreliable(tsk);
2938 case TIPC_DEST_DROPPABLE:
2939 value = tsk_unreturnable(tsk);
2941 case TIPC_CONN_TIMEOUT:
2942 value = tsk->conn_timeout;
2943 /* no need to set "res", since already 0 at this point */
2945 case TIPC_NODE_RECVQ_DEPTH:
2946 value = 0; /* was tipc_queue_size, now obsolete */
2948 case TIPC_SOCK_RECVQ_DEPTH:
2949 value = skb_queue_len(&sk->sk_receive_queue);
2951 case TIPC_GROUP_JOIN:
2954 tipc_group_self(tsk->group, &seq, &scope);
2964 return res; /* "get" failed */
2966 if (len < sizeof(value))
2969 if (copy_to_user(ov, &value, sizeof(value)))
2972 return put_user(sizeof(value), ol);
2975 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2977 struct net *net = sock_net(sock->sk);
2978 struct tipc_sioc_nodeid_req nr = {0};
2979 struct tipc_sioc_ln_req lnr;
2980 void __user *argp = (void __user *)arg;
2983 case SIOCGETLINKNAME:
2984 if (copy_from_user(&lnr, argp, sizeof(lnr)))
2986 if (!tipc_node_get_linkname(net,
2987 lnr.bearer_id & 0xffff, lnr.peer,
2988 lnr.linkname, TIPC_MAX_LINK_NAME)) {
2989 if (copy_to_user(argp, &lnr, sizeof(lnr)))
2993 return -EADDRNOTAVAIL;
2995 if (copy_from_user(&nr, argp, sizeof(nr)))
2997 if (!tipc_node_get_id(net, nr.peer, nr.node_id))
2998 return -EADDRNOTAVAIL;
2999 if (copy_to_user(argp, &nr, sizeof(nr)))
3003 return -ENOIOCTLCMD;
3007 static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
3009 struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
3010 struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
3011 u32 onode = tipc_own_addr(sock_net(sock1->sk));
3013 tsk1->peer.family = AF_TIPC;
3014 tsk1->peer.addrtype = TIPC_ADDR_ID;
3015 tsk1->peer.scope = TIPC_NODE_SCOPE;
3016 tsk1->peer.addr.id.ref = tsk2->portid;
3017 tsk1->peer.addr.id.node = onode;
3018 tsk2->peer.family = AF_TIPC;
3019 tsk2->peer.addrtype = TIPC_ADDR_ID;
3020 tsk2->peer.scope = TIPC_NODE_SCOPE;
3021 tsk2->peer.addr.id.ref = tsk1->portid;
3022 tsk2->peer.addr.id.node = onode;
3024 tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
3025 tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
3029 /* Protocol switches for the various types of TIPC sockets */
3031 static const struct proto_ops msg_ops = {
3032 .owner = THIS_MODULE,
3034 .release = tipc_release,
3036 .connect = tipc_connect,
3037 .socketpair = tipc_socketpair,
3038 .accept = sock_no_accept,
3039 .getname = tipc_getname,
3041 .ioctl = tipc_ioctl,
3042 .listen = sock_no_listen,
3043 .shutdown = tipc_shutdown,
3044 .setsockopt = tipc_setsockopt,
3045 .getsockopt = tipc_getsockopt,
3046 .sendmsg = tipc_sendmsg,
3047 .recvmsg = tipc_recvmsg,
3048 .mmap = sock_no_mmap,
3049 .sendpage = sock_no_sendpage
3052 static const struct proto_ops packet_ops = {
3053 .owner = THIS_MODULE,
3055 .release = tipc_release,
3057 .connect = tipc_connect,
3058 .socketpair = tipc_socketpair,
3059 .accept = tipc_accept,
3060 .getname = tipc_getname,
3062 .ioctl = tipc_ioctl,
3063 .listen = tipc_listen,
3064 .shutdown = tipc_shutdown,
3065 .setsockopt = tipc_setsockopt,
3066 .getsockopt = tipc_getsockopt,
3067 .sendmsg = tipc_send_packet,
3068 .recvmsg = tipc_recvmsg,
3069 .mmap = sock_no_mmap,
3070 .sendpage = sock_no_sendpage
3073 static const struct proto_ops stream_ops = {
3074 .owner = THIS_MODULE,
3076 .release = tipc_release,
3078 .connect = tipc_connect,
3079 .socketpair = tipc_socketpair,
3080 .accept = tipc_accept,
3081 .getname = tipc_getname,
3083 .ioctl = tipc_ioctl,
3084 .listen = tipc_listen,
3085 .shutdown = tipc_shutdown,
3086 .setsockopt = tipc_setsockopt,
3087 .getsockopt = tipc_getsockopt,
3088 .sendmsg = tipc_sendstream,
3089 .recvmsg = tipc_recvstream,
3090 .mmap = sock_no_mmap,
3091 .sendpage = sock_no_sendpage
3094 static const struct net_proto_family tipc_family_ops = {
3095 .owner = THIS_MODULE,
3097 .create = tipc_sk_create
3100 static struct proto tipc_proto = {
3102 .owner = THIS_MODULE,
3103 .obj_size = sizeof(struct tipc_sock),
3104 .sysctl_rmem = sysctl_tipc_rmem
3108 * tipc_socket_init - initialize TIPC socket interface
3110 * Returns 0 on success, errno otherwise
3112 int tipc_socket_init(void)
3116 res = proto_register(&tipc_proto, 1);
3118 pr_err("Failed to register TIPC protocol type\n");
3122 res = sock_register(&tipc_family_ops);
3124 pr_err("Failed to register TIPC socket type\n");
3125 proto_unregister(&tipc_proto);
3133 * tipc_socket_stop - stop TIPC socket interface
3135 void tipc_socket_stop(void)
3137 sock_unregister(tipc_family_ops.family);
3138 proto_unregister(&tipc_proto);
3141 /* Caller should hold socket lock for the passed tipc socket. */
3142 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
3146 struct nlattr *nest;
3148 peer_node = tsk_peer_node(tsk);
3149 peer_port = tsk_peer_port(tsk);
3151 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
3153 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
3155 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
3158 if (tsk->conn_type != 0) {
3159 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
3161 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
3163 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
3166 nla_nest_end(skb, nest);
3171 nla_nest_cancel(skb, nest);
3176 static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
3179 struct net *net = sock_net(skb->sk);
3180 struct sock *sk = &tsk->sk;
3182 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
3183 nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
3186 if (tipc_sk_connected(sk)) {
3187 if (__tipc_nl_add_sk_con(skb, tsk))
3189 } else if (!list_empty(&tsk->publications)) {
3190 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
3196 /* Caller should hold socket lock for the passed tipc socket. */
3197 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
3198 struct tipc_sock *tsk)
3200 struct nlattr *attrs;
3203 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3204 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
3208 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
3210 goto genlmsg_cancel;
3212 if (__tipc_nl_add_sk_info(skb, tsk))
3213 goto attr_msg_cancel;
3215 nla_nest_end(skb, attrs);
3216 genlmsg_end(skb, hdr);
3221 nla_nest_cancel(skb, attrs);
3223 genlmsg_cancel(skb, hdr);
3228 int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
3229 int (*skb_handler)(struct sk_buff *skb,
3230 struct netlink_callback *cb,
3231 struct tipc_sock *tsk))
3233 struct rhashtable_iter *iter = (void *)cb->args[4];
3234 struct tipc_sock *tsk;
3237 rhashtable_walk_start(iter);
3238 while ((tsk = rhashtable_walk_next(iter)) != NULL) {
3241 if (err == -EAGAIN) {
3248 sock_hold(&tsk->sk);
3249 rhashtable_walk_stop(iter);
3250 lock_sock(&tsk->sk);
3251 err = skb_handler(skb, cb, tsk);
3253 release_sock(&tsk->sk);
3257 release_sock(&tsk->sk);
3258 rhashtable_walk_start(iter);
3261 rhashtable_walk_stop(iter);
3265 EXPORT_SYMBOL(tipc_nl_sk_walk);
3267 int tipc_dump_start(struct netlink_callback *cb)
3269 return __tipc_dump_start(cb, sock_net(cb->skb->sk));
3271 EXPORT_SYMBOL(tipc_dump_start);
3273 int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
3275 /* tipc_nl_name_table_dump() uses cb->args[0...3]. */
3276 struct rhashtable_iter *iter = (void *)cb->args[4];
3277 struct tipc_net *tn = tipc_net(net);
3280 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3284 cb->args[4] = (long)iter;
3287 rhashtable_walk_enter(&tn->sk_rht, iter);
3291 int tipc_dump_done(struct netlink_callback *cb)
3293 struct rhashtable_iter *hti = (void *)cb->args[4];
3295 rhashtable_walk_exit(hti);
3299 EXPORT_SYMBOL(tipc_dump_done);
3301 int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
3302 struct tipc_sock *tsk, u32 sk_filter_state,
3303 u64 (*tipc_diag_gen_cookie)(struct sock *sk))
3305 struct sock *sk = &tsk->sk;
3306 struct nlattr *attrs;
3307 struct nlattr *stat;
3309 /*filter response w.r.t sk_state*/
3310 if (!(sk_filter_state & (1 << sk->sk_state)))
3313 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
3317 if (__tipc_nl_add_sk_info(skb, tsk))
3318 goto attr_msg_cancel;
3320 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
3321 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
3322 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
3323 nla_put_u32(skb, TIPC_NLA_SOCK_UID,
3324 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
3326 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
3327 tipc_diag_gen_cookie(sk),
3329 goto attr_msg_cancel;
3331 stat = nla_nest_start(skb, TIPC_NLA_SOCK_STAT);
3333 goto attr_msg_cancel;
3335 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
3336 skb_queue_len(&sk->sk_receive_queue)) ||
3337 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
3338 skb_queue_len(&sk->sk_write_queue)) ||
3339 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
3340 atomic_read(&sk->sk_drops)))
3341 goto stat_msg_cancel;
3343 if (tsk->cong_link_cnt &&
3344 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
3345 goto stat_msg_cancel;
3347 if (tsk_conn_cong(tsk) &&
3348 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
3349 goto stat_msg_cancel;
3351 nla_nest_end(skb, stat);
3354 if (tipc_group_fill_sock_diag(tsk->group, skb))
3355 goto stat_msg_cancel;
3357 nla_nest_end(skb, attrs);
3362 nla_nest_cancel(skb, stat);
3364 nla_nest_cancel(skb, attrs);
3368 EXPORT_SYMBOL(tipc_sk_fill_sock_diag);
3370 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
3372 return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
3375 /* Caller should hold socket lock for the passed tipc socket. */
3376 static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
3377 struct netlink_callback *cb,
3378 struct publication *publ)
3381 struct nlattr *attrs;
3383 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3384 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
3388 attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
3390 goto genlmsg_cancel;
3392 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
3393 goto attr_msg_cancel;
3394 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
3395 goto attr_msg_cancel;
3396 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
3397 goto attr_msg_cancel;
3398 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
3399 goto attr_msg_cancel;
3401 nla_nest_end(skb, attrs);
3402 genlmsg_end(skb, hdr);
3407 nla_nest_cancel(skb, attrs);
3409 genlmsg_cancel(skb, hdr);
3414 /* Caller should hold socket lock for the passed tipc socket. */
3415 static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
3416 struct netlink_callback *cb,
3417 struct tipc_sock *tsk, u32 *last_publ)
3420 struct publication *p;
3423 list_for_each_entry(p, &tsk->publications, binding_sock) {
3424 if (p->key == *last_publ)
3427 if (p->key != *last_publ) {
3428 /* We never set seq or call nl_dump_check_consistent()
3429 * this means that setting prev_seq here will cause the
3430 * consistence check to fail in the netlink callback
3431 * handler. Resulting in the last NLMSG_DONE message
3432 * having the NLM_F_DUMP_INTR flag set.
3439 p = list_first_entry(&tsk->publications, struct publication,
3443 list_for_each_entry_from(p, &tsk->publications, binding_sock) {
3444 err = __tipc_nl_add_sk_publ(skb, cb, p);
3446 *last_publ = p->key;
3455 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
3458 u32 tsk_portid = cb->args[0];
3459 u32 last_publ = cb->args[1];
3460 u32 done = cb->args[2];
3461 struct net *net = sock_net(skb->sk);
3462 struct tipc_sock *tsk;
3465 struct nlattr **attrs;
3466 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
3468 err = tipc_nlmsg_parse(cb->nlh, &attrs);
3472 if (!attrs[TIPC_NLA_SOCK])
3475 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
3476 attrs[TIPC_NLA_SOCK],
3477 tipc_nl_sock_policy, NULL);
3481 if (!sock[TIPC_NLA_SOCK_REF])
3484 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
3490 tsk = tipc_sk_lookup(net, tsk_portid);
3494 lock_sock(&tsk->sk);
3495 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
3498 release_sock(&tsk->sk);
3501 cb->args[0] = tsk_portid;
3502 cb->args[1] = last_publ;