2 * net/tipc/socket.c: TIPC socket API
4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/rhashtable.h>
38 #include <linux/sched/signal.h>
41 #include "name_table.h"
44 #include "name_distr.h"
51 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
52 #define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
53 #define TIPC_FWD_MSG 1
54 #define TIPC_MAX_PORT 0xffffffff
55 #define TIPC_MIN_PORT 1
56 #define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */
59 TIPC_LISTEN = TCP_LISTEN,
60 TIPC_ESTABLISHED = TCP_ESTABLISHED,
61 TIPC_OPEN = TCP_CLOSE,
62 TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
63 TIPC_CONNECTING = TCP_SYN_SENT,
66 struct sockaddr_pair {
67 struct sockaddr_tipc sock;
68 struct sockaddr_tipc member;
72 * struct tipc_sock - TIPC socket structure
73 * @sk: socket - interacts with 'port' and with user via the socket API
74 * @conn_type: TIPC type used when connection was established
75 * @conn_instance: TIPC instance used when connection was established
76 * @published: non-zero if port has one or more associated names
77 * @max_pkt: maximum packet size "hint" used when building messages sent by port
78 * @portid: unique port identity in TIPC socket hash table
79 * @phdr: preformatted message header used when sending messages
80 * #cong_links: list of congested links
81 * @publications: list of publications for port
82 * @blocking_link: address of the congested link we are currently sleeping on
83 * @pub_count: total # of publications port has made during its lifetime
84 * @conn_timeout: the time we can wait for an unresponded setup request
85 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
86 * @cong_link_cnt: number of congested links
87 * @snt_unacked: # messages sent by socket, and not yet acked by peer
88 * @rcv_unacked: # messages read by user, but not yet acked back to peer
89 * @peer: 'connected' peer for dgram/rdm
90 * @node: hash table node
91 * @mc_method: cookie for use between socket and broadcast layer
92 * @rcu: rcu struct for tipc_sock
101 struct tipc_msg phdr;
102 struct list_head cong_links;
103 struct list_head publications;
105 atomic_t dupl_rcvcnt;
114 struct sockaddr_tipc peer;
115 struct rhash_head node;
116 struct tipc_mc_method mc_method;
118 struct tipc_group *group;
122 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
123 static void tipc_data_ready(struct sock *sk);
124 static void tipc_write_space(struct sock *sk);
125 static void tipc_sock_destruct(struct sock *sk);
126 static int tipc_release(struct socket *sock);
127 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
129 static void tipc_sk_timeout(struct timer_list *t);
130 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
131 struct tipc_name_seq const *seq);
132 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
133 struct tipc_name_seq const *seq);
134 static int tipc_sk_leave(struct tipc_sock *tsk);
135 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
136 static int tipc_sk_insert(struct tipc_sock *tsk);
137 static void tipc_sk_remove(struct tipc_sock *tsk);
138 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
139 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
141 static const struct proto_ops packet_ops;
142 static const struct proto_ops stream_ops;
143 static const struct proto_ops msg_ops;
144 static struct proto tipc_proto;
145 static const struct rhashtable_params tsk_rht_params;
147 static u32 tsk_own_node(struct tipc_sock *tsk)
149 return msg_prevnode(&tsk->phdr);
152 static u32 tsk_peer_node(struct tipc_sock *tsk)
154 return msg_destnode(&tsk->phdr);
157 static u32 tsk_peer_port(struct tipc_sock *tsk)
159 return msg_destport(&tsk->phdr);
162 static bool tsk_unreliable(struct tipc_sock *tsk)
164 return msg_src_droppable(&tsk->phdr) != 0;
167 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
169 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
172 static bool tsk_unreturnable(struct tipc_sock *tsk)
174 return msg_dest_droppable(&tsk->phdr) != 0;
177 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
179 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
182 static int tsk_importance(struct tipc_sock *tsk)
184 return msg_importance(&tsk->phdr);
187 static int tsk_set_importance(struct tipc_sock *tsk, int imp)
189 if (imp > TIPC_CRITICAL_IMPORTANCE)
191 msg_set_importance(&tsk->phdr, (u32)imp);
195 static struct tipc_sock *tipc_sk(const struct sock *sk)
197 return container_of(sk, struct tipc_sock, sk);
200 static bool tsk_conn_cong(struct tipc_sock *tsk)
202 return tsk->snt_unacked > tsk->snd_win;
205 static u16 tsk_blocks(int len)
207 return ((len / FLOWCTL_BLK_SZ) + 1);
210 /* tsk_blocks(): translate a buffer size in bytes to number of
211 * advertisable blocks, taking into account the ratio truesize(len)/len
212 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
214 static u16 tsk_adv_blocks(int len)
216 return len / FLOWCTL_BLK_SZ / 4;
219 /* tsk_inc(): increment counter for sent or received data
220 * - If block based flow control is not supported by peer we
221 * fall back to message based ditto, incrementing the counter
223 static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
225 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
226 return ((msglen / FLOWCTL_BLK_SZ) + 1);
231 * tsk_advance_rx_queue - discard first buffer in socket receive queue
233 * Caller must hold socket lock
235 static void tsk_advance_rx_queue(struct sock *sk)
237 trace_tipc_sk_advance_rx(sk, NULL, TIPC_DUMP_SK_RCVQ, " ");
238 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
241 /* tipc_sk_respond() : send response message back to sender
243 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
247 u32 onode = tipc_own_addr(sock_net(sk));
249 if (!tipc_msg_reverse(onode, &skb, err))
252 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!");
253 dnode = msg_destnode(buf_msg(skb));
254 selector = msg_origport(buf_msg(skb));
255 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
259 * tsk_rej_rx_queue - reject all buffers in socket receive queue
261 * Caller must hold socket lock
263 static void tsk_rej_rx_queue(struct sock *sk)
267 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
268 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
271 static bool tipc_sk_connected(struct sock *sk)
273 return sk->sk_state == TIPC_ESTABLISHED;
276 /* tipc_sk_type_connectionless - check if the socket is datagram socket
279 * Returns true if connection less, false otherwise
281 static bool tipc_sk_type_connectionless(struct sock *sk)
283 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
286 /* tsk_peer_msg - verify if message was sent by connected port's peer
288 * Handles cases where the node's network address has changed from
289 * the default of <0.0.0> to its configured setting.
291 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
293 struct sock *sk = &tsk->sk;
294 u32 self = tipc_own_addr(sock_net(sk));
295 u32 peer_port = tsk_peer_port(tsk);
296 u32 orig_node, peer_node;
298 if (unlikely(!tipc_sk_connected(sk)))
301 if (unlikely(msg_origport(msg) != peer_port))
304 orig_node = msg_orignode(msg);
305 peer_node = tsk_peer_node(tsk);
307 if (likely(orig_node == peer_node))
310 if (!orig_node && peer_node == self)
313 if (!peer_node && orig_node == self)
319 /* tipc_set_sk_state - set the sk_state of the socket
322 * Caller must hold socket lock
324 * Returns 0 on success, errno otherwise
326 static int tipc_set_sk_state(struct sock *sk, int state)
328 int oldsk_state = sk->sk_state;
336 case TIPC_CONNECTING:
337 if (oldsk_state == TIPC_OPEN)
340 case TIPC_ESTABLISHED:
341 if (oldsk_state == TIPC_CONNECTING ||
342 oldsk_state == TIPC_OPEN)
345 case TIPC_DISCONNECTING:
346 if (oldsk_state == TIPC_CONNECTING ||
347 oldsk_state == TIPC_ESTABLISHED)
353 sk->sk_state = state;
358 static int tipc_sk_sock_err(struct socket *sock, long *timeout)
360 struct sock *sk = sock->sk;
361 int err = sock_error(sk);
362 int typ = sock->type;
366 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
367 if (sk->sk_state == TIPC_DISCONNECTING)
369 else if (!tipc_sk_connected(sk))
374 if (signal_pending(current))
375 return sock_intr_errno(*timeout);
380 #define tipc_wait_for_cond(sock_, timeo_, condition_) \
382 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
386 while ((rc_ = !(condition_))) { \
387 /* coupled with smp_wmb() in tipc_sk_proto_rcv() */ \
390 rc_ = tipc_sk_sock_err((sock_), timeo_); \
393 add_wait_queue(sk_sleep(sk_), &wait_); \
395 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
396 sched_annotate_sleep(); \
398 remove_wait_queue(sk_sleep(sk_), &wait_); \
404 * tipc_sk_create - create a TIPC socket
405 * @net: network namespace (must be default network)
406 * @sock: pre-allocated socket structure
407 * @protocol: protocol indicator (must be 0)
408 * @kern: caused by kernel or by userspace?
410 * This routine creates additional data structures used by the TIPC socket,
411 * initializes them, and links them together.
413 * Returns 0 on success, errno otherwise
415 static int tipc_sk_create(struct net *net, struct socket *sock,
416 int protocol, int kern)
418 const struct proto_ops *ops;
420 struct tipc_sock *tsk;
421 struct tipc_msg *msg;
423 /* Validate arguments */
424 if (unlikely(protocol != 0))
425 return -EPROTONOSUPPORT;
427 switch (sock->type) {
442 /* Allocate socket's protocol area */
443 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
448 tsk->max_pkt = MAX_PKT_DEFAULT;
449 INIT_LIST_HEAD(&tsk->publications);
450 INIT_LIST_HEAD(&tsk->cong_links);
453 /* Finish initializing socket data structures */
455 sock_init_data(sock, sk);
456 tipc_set_sk_state(sk, TIPC_OPEN);
457 if (tipc_sk_insert(tsk)) {
458 pr_warn("Socket create failed; port number exhausted\n");
462 /* Ensure tsk is visible before we read own_addr. */
465 tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
466 TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
468 msg_set_origport(msg, tsk->portid);
469 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
471 sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
472 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
473 sk->sk_data_ready = tipc_data_ready;
474 sk->sk_write_space = tipc_write_space;
475 sk->sk_destruct = tipc_sock_destruct;
476 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
477 tsk->group_is_open = true;
478 atomic_set(&tsk->dupl_rcvcnt, 0);
480 /* Start out with safe limits until we receive an advertised window */
481 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
482 tsk->rcv_win = tsk->snd_win;
484 if (tipc_sk_type_connectionless(sk)) {
485 tsk_set_unreturnable(tsk, true);
486 if (sock->type == SOCK_DGRAM)
487 tsk_set_unreliable(tsk, true);
489 __skb_queue_head_init(&tsk->mc_method.deferredq);
490 trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
494 static void tipc_sk_callback(struct rcu_head *head)
496 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
501 /* Caller should hold socket lock for the socket. */
502 static void __tipc_shutdown(struct socket *sock, int error)
504 struct sock *sk = sock->sk;
505 struct tipc_sock *tsk = tipc_sk(sk);
506 struct net *net = sock_net(sk);
507 long timeout = CONN_TIMEOUT_DEFAULT;
508 u32 dnode = tsk_peer_node(tsk);
511 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
512 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
513 !tsk_conn_cong(tsk)));
515 /* Remove any pending SYN message */
516 __skb_queue_purge(&sk->sk_write_queue);
518 /* Reject all unreceived messages, except on an active connection
519 * (which disconnects locally & sends a 'FIN+' to peer).
521 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
522 if (TIPC_SKB_CB(skb)->bytes_read) {
526 if (!tipc_sk_type_connectionless(sk) &&
527 sk->sk_state != TIPC_DISCONNECTING) {
528 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
529 tipc_node_remove_conn(net, dnode, tsk->portid);
531 tipc_sk_respond(sk, skb, error);
534 if (tipc_sk_type_connectionless(sk))
537 if (sk->sk_state != TIPC_DISCONNECTING) {
538 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
539 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
540 tsk_own_node(tsk), tsk_peer_port(tsk),
543 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
544 tipc_node_remove_conn(net, dnode, tsk->portid);
545 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
550 * tipc_release - destroy a TIPC socket
551 * @sock: socket to destroy
553 * This routine cleans up any messages that are still queued on the socket.
554 * For DGRAM and RDM socket types, all queued messages are rejected.
555 * For SEQPACKET and STREAM socket types, the first message is rejected
556 * and any others are discarded. (If the first message on a STREAM socket
557 * is partially-read, it is discarded and the next one is rejected instead.)
559 * NOTE: Rejected messages are not necessarily returned to the sender! They
560 * are returned or discarded according to the "destination droppable" setting
561 * specified for the message by the sender.
563 * Returns 0 on success, errno otherwise
565 static int tipc_release(struct socket *sock)
567 struct sock *sk = sock->sk;
568 struct tipc_sock *tsk;
571 * Exit if socket isn't fully initialized (occurs when a failed accept()
572 * releases a pre-allocated child socket that was never used)
580 trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " ");
581 __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
582 sk->sk_shutdown = SHUTDOWN_MASK;
584 tipc_sk_withdraw(tsk, 0, NULL);
585 __skb_queue_purge(&tsk->mc_method.deferredq);
586 sk_stop_timer(sk, &sk->sk_timer);
590 /* Reject any messages that accumulated in backlog queue */
592 tipc_dest_list_purge(&tsk->cong_links);
593 tsk->cong_link_cnt = 0;
594 call_rcu(&tsk->rcu, tipc_sk_callback);
601 * tipc_bind - associate or disassocate TIPC name(s) with a socket
602 * @sock: socket structure
603 * @uaddr: socket address describing name(s) and desired operation
604 * @uaddr_len: size of socket address data structure
606 * Name and name sequence binding is indicated using a positive scope value;
607 * a negative scope value unbinds the specified name. Specifying no name
608 * (i.e. a socket address length of 0) unbinds all names from the socket.
610 * Returns 0 on success, errno otherwise
612 * NOTE: This routine doesn't need to take the socket lock since it doesn't
613 * access any non-constant socket information.
615 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
618 struct sock *sk = sock->sk;
619 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
620 struct tipc_sock *tsk = tipc_sk(sk);
624 if (unlikely(!uaddr_len)) {
625 res = tipc_sk_withdraw(tsk, 0, NULL);
632 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
636 if (addr->family != AF_TIPC) {
641 if (addr->addrtype == TIPC_ADDR_NAME)
642 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
643 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
648 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
649 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
650 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
655 res = (addr->scope >= 0) ?
656 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
657 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
664 * tipc_getname - get port ID of socket or peer socket
665 * @sock: socket structure
666 * @uaddr: area for returned socket address
667 * @uaddr_len: area for returned length of socket address
668 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
670 * Returns 0 on success, errno otherwise
672 * NOTE: This routine doesn't need to take the socket lock since it only
673 * accesses socket information that is unchanging (or which changes in
674 * a completely predictable manner).
676 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
679 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
680 struct sock *sk = sock->sk;
681 struct tipc_sock *tsk = tipc_sk(sk);
683 memset(addr, 0, sizeof(*addr));
685 if ((!tipc_sk_connected(sk)) &&
686 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
688 addr->addr.id.ref = tsk_peer_port(tsk);
689 addr->addr.id.node = tsk_peer_node(tsk);
691 addr->addr.id.ref = tsk->portid;
692 addr->addr.id.node = tipc_own_addr(sock_net(sk));
695 addr->addrtype = TIPC_ADDR_ID;
696 addr->family = AF_TIPC;
698 addr->addr.name.domain = 0;
700 return sizeof(*addr);
704 * tipc_poll - read and possibly block on pollmask
705 * @file: file structure associated with the socket
706 * @sock: socket for which to calculate the poll bits
709 * Returns pollmask value
712 * It appears that the usual socket locking mechanisms are not useful here
713 * since the pollmask info is potentially out-of-date the moment this routine
714 * exits. TCP and other protocols seem to rely on higher level poll routines
715 * to handle any preventable race conditions, so TIPC will do the same ...
717 * IMPORTANT: The fact that a read or write operation is indicated does NOT
718 * imply that the operation will succeed, merely that it should be performed
719 * and will not block.
721 static __poll_t tipc_poll(struct file *file, struct socket *sock,
724 struct sock *sk = sock->sk;
725 struct tipc_sock *tsk = tipc_sk(sk);
726 __poll_t revents = 0;
728 sock_poll_wait(file, sock, wait);
729 trace_tipc_sk_poll(sk, NULL, TIPC_DUMP_ALL, " ");
731 if (sk->sk_shutdown & RCV_SHUTDOWN)
732 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
733 if (sk->sk_shutdown == SHUTDOWN_MASK)
736 switch (sk->sk_state) {
737 case TIPC_ESTABLISHED:
738 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
742 case TIPC_CONNECTING:
743 if (!skb_queue_empty(&sk->sk_receive_queue))
744 revents |= EPOLLIN | EPOLLRDNORM;
747 if (tsk->group_is_open && !tsk->cong_link_cnt)
749 if (!tipc_sk_type_connectionless(sk))
751 if (skb_queue_empty(&sk->sk_receive_queue))
753 revents |= EPOLLIN | EPOLLRDNORM;
755 case TIPC_DISCONNECTING:
756 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
763 * tipc_sendmcast - send multicast message
764 * @sock: socket structure
765 * @seq: destination address
766 * @msg: message to send
767 * @dlen: length of data to send
768 * @timeout: timeout to wait for wakeup
770 * Called from function tipc_sendmsg(), which has done all sanity checks
771 * Returns the number of bytes sent on success, or errno
773 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
774 struct msghdr *msg, size_t dlen, long timeout)
776 struct sock *sk = sock->sk;
777 struct tipc_sock *tsk = tipc_sk(sk);
778 struct tipc_msg *hdr = &tsk->phdr;
779 struct net *net = sock_net(sk);
780 int mtu = tipc_bcast_get_mtu(net);
781 struct tipc_mc_method *method = &tsk->mc_method;
782 struct sk_buff_head pkts;
783 struct tipc_nlist dsts;
789 /* Block or return if any destination link is congested */
790 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
794 /* Lookup destination nodes */
795 tipc_nlist_init(&dsts, tipc_own_addr(net));
796 tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
798 if (!dsts.local && !dsts.remote)
799 return -EHOSTUNREACH;
801 /* Build message header */
802 msg_set_type(hdr, TIPC_MCAST_MSG);
803 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
804 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
805 msg_set_destport(hdr, 0);
806 msg_set_destnode(hdr, 0);
807 msg_set_nametype(hdr, seq->type);
808 msg_set_namelower(hdr, seq->lower);
809 msg_set_nameupper(hdr, seq->upper);
811 /* Build message as chain of buffers */
812 skb_queue_head_init(&pkts);
813 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
815 /* Send message if build was successful */
816 if (unlikely(rc == dlen)) {
817 trace_tipc_sk_sendmcast(sk, skb_peek(&pkts),
818 TIPC_DUMP_SK_SNDQ, " ");
819 rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
820 &tsk->cong_link_cnt);
823 tipc_nlist_purge(&dsts);
825 return rc ? rc : dlen;
829 * tipc_send_group_msg - send a message to a member in the group
830 * @net: network namespace
831 * @m: message to send
833 * @dnode: destination node
834 * @dport: destination port
835 * @dlen: total length of message data
837 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
838 struct msghdr *m, struct tipc_member *mb,
839 u32 dnode, u32 dport, int dlen)
841 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
842 struct tipc_mc_method *method = &tsk->mc_method;
843 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
844 struct tipc_msg *hdr = &tsk->phdr;
845 struct sk_buff_head pkts;
848 /* Complete message header */
849 msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
850 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
851 msg_set_destport(hdr, dport);
852 msg_set_destnode(hdr, dnode);
853 msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
855 /* Build message as chain of buffers */
856 skb_queue_head_init(&pkts);
857 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
858 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
859 if (unlikely(rc != dlen))
863 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
864 if (unlikely(rc == -ELINKCONG)) {
865 tipc_dest_push(&tsk->cong_links, dnode, 0);
866 tsk->cong_link_cnt++;
869 /* Update send window */
870 tipc_group_update_member(mb, blks);
872 /* A broadcast sent within next EXPIRE period must follow same path */
873 method->rcast = true;
874 method->mandatory = true;
879 * tipc_send_group_unicast - send message to a member in the group
880 * @sock: socket structure
881 * @m: message to send
882 * @dlen: total length of message data
883 * @timeout: timeout to wait for wakeup
885 * Called from function tipc_sendmsg(), which has done all sanity checks
886 * Returns the number of bytes sent on success, or errno
888 static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
889 int dlen, long timeout)
891 struct sock *sk = sock->sk;
892 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
893 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
894 struct tipc_sock *tsk = tipc_sk(sk);
895 struct net *net = sock_net(sk);
896 struct tipc_member *mb = NULL;
900 node = dest->addr.id.node;
901 port = dest->addr.id.ref;
903 return -EHOSTUNREACH;
905 /* Block or return if destination link or member is congested */
906 rc = tipc_wait_for_cond(sock, &timeout,
907 !tipc_dest_find(&tsk->cong_links, node, 0) &&
909 !tipc_group_cong(tsk->group, node, port, blks,
915 return -EHOSTUNREACH;
917 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
919 return rc ? rc : dlen;
923 * tipc_send_group_anycast - send message to any member with given identity
924 * @sock: socket structure
925 * @m: message to send
926 * @dlen: total length of message data
927 * @timeout: timeout to wait for wakeup
929 * Called from function tipc_sendmsg(), which has done all sanity checks
930 * Returns the number of bytes sent on success, or errno
932 static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
933 int dlen, long timeout)
935 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
936 struct sock *sk = sock->sk;
937 struct tipc_sock *tsk = tipc_sk(sk);
938 struct list_head *cong_links = &tsk->cong_links;
939 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
940 struct tipc_msg *hdr = &tsk->phdr;
941 struct tipc_member *first = NULL;
942 struct tipc_member *mbr = NULL;
943 struct net *net = sock_net(sk);
944 u32 node, port, exclude;
945 struct list_head dsts;
946 u32 type, inst, scope;
951 INIT_LIST_HEAD(&dsts);
953 type = msg_nametype(hdr);
954 inst = dest->addr.name.name.instance;
955 scope = msg_lookup_scope(hdr);
957 while (++lookups < 4) {
958 exclude = tipc_group_exclude(tsk->group);
962 /* Look for a non-congested destination member, if any */
964 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
965 &dstcnt, exclude, false))
966 return -EHOSTUNREACH;
967 tipc_dest_pop(&dsts, &node, &port);
968 cong = tipc_group_cong(tsk->group, node, port, blks,
978 /* Start over if destination was not in member list */
982 if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
985 /* Block or return if destination link or member is congested */
986 rc = tipc_wait_for_cond(sock, &timeout,
987 !tipc_dest_find(cong_links, node, 0) &&
989 !tipc_group_cong(tsk->group, node, port,
994 /* Send, unless destination disappeared while waiting */
999 if (unlikely(lookups >= 4))
1000 return -EHOSTUNREACH;
1002 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
1004 return rc ? rc : dlen;
1008 * tipc_send_group_bcast - send message to all members in communication group
1009 * @sk: socket structure
1010 * @m: message to send
1011 * @dlen: total length of message data
1012 * @timeout: timeout to wait for wakeup
1014 * Called from function tipc_sendmsg(), which has done all sanity checks
1015 * Returns the number of bytes sent on success, or errno
1017 static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1018 int dlen, long timeout)
1020 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1021 struct sock *sk = sock->sk;
1022 struct net *net = sock_net(sk);
1023 struct tipc_sock *tsk = tipc_sk(sk);
1024 struct tipc_nlist *dsts;
1025 struct tipc_mc_method *method = &tsk->mc_method;
1026 bool ack = method->mandatory && method->rcast;
1027 int blks = tsk_blocks(MCAST_H_SIZE + dlen);
1028 struct tipc_msg *hdr = &tsk->phdr;
1029 int mtu = tipc_bcast_get_mtu(net);
1030 struct sk_buff_head pkts;
1031 int rc = -EHOSTUNREACH;
1033 /* Block or return if any destination link or member is congested */
1034 rc = tipc_wait_for_cond(sock, &timeout,
1035 !tsk->cong_link_cnt && tsk->group &&
1036 !tipc_group_bc_cong(tsk->group, blks));
1040 dsts = tipc_group_dests(tsk->group);
1041 if (!dsts->local && !dsts->remote)
1042 return -EHOSTUNREACH;
1044 /* Complete message header */
1046 msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
1047 msg_set_nameinst(hdr, dest->addr.name.name.instance);
1049 msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
1050 msg_set_nameinst(hdr, 0);
1052 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
1053 msg_set_destport(hdr, 0);
1054 msg_set_destnode(hdr, 0);
1055 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
1057 /* Avoid getting stuck with repeated forced replicasts */
1058 msg_set_grp_bc_ack_req(hdr, ack);
1060 /* Build message as chain of buffers */
1061 skb_queue_head_init(&pkts);
1062 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1063 if (unlikely(rc != dlen))
1067 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
1071 /* Update broadcast sequence number and send windows */
1072 tipc_group_update_bc_members(tsk->group, blks, ack);
1074 /* Broadcast link is now free to choose method for next broadcast */
1075 method->mandatory = false;
1076 method->expires = jiffies;
1082 * tipc_send_group_mcast - send message to all members with given identity
1083 * @sock: socket structure
1084 * @m: message to send
1085 * @dlen: total length of message data
1086 * @timeout: timeout to wait for wakeup
1088 * Called from function tipc_sendmsg(), which has done all sanity checks
1089 * Returns the number of bytes sent on success, or errno
1091 static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
1092 int dlen, long timeout)
1094 struct sock *sk = sock->sk;
1095 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1096 struct tipc_sock *tsk = tipc_sk(sk);
1097 struct tipc_group *grp = tsk->group;
1098 struct tipc_msg *hdr = &tsk->phdr;
1099 struct net *net = sock_net(sk);
1100 u32 type, inst, scope, exclude;
1101 struct list_head dsts;
1104 INIT_LIST_HEAD(&dsts);
1106 type = msg_nametype(hdr);
1107 inst = dest->addr.name.name.instance;
1108 scope = msg_lookup_scope(hdr);
1109 exclude = tipc_group_exclude(grp);
1111 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
1112 &dstcnt, exclude, true))
1113 return -EHOSTUNREACH;
1116 tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref);
1117 return tipc_send_group_unicast(sock, m, dlen, timeout);
1120 tipc_dest_list_purge(&dsts);
1121 return tipc_send_group_bcast(sock, m, dlen, timeout);
1125 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
1126 * @arrvq: queue with arriving messages, to be cloned after destination lookup
1127 * @inputq: queue with cloned messages, delivered to socket after dest lookup
1129 * Multi-threaded: parallel calls with reference to same queues may occur
1131 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
1132 struct sk_buff_head *inputq)
1134 u32 self = tipc_own_addr(net);
1135 u32 type, lower, upper, scope;
1136 struct sk_buff *skb, *_skb;
1138 struct sk_buff_head tmpq;
1139 struct list_head dports;
1140 struct tipc_msg *hdr;
1141 int user, mtyp, hlen;
1144 __skb_queue_head_init(&tmpq);
1145 INIT_LIST_HEAD(&dports);
1147 skb = tipc_skb_peek(arrvq, &inputq->lock);
1148 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
1150 user = msg_user(hdr);
1151 mtyp = msg_type(hdr);
1152 hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
1153 onode = msg_orignode(hdr);
1154 type = msg_nametype(hdr);
1156 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
1157 spin_lock_bh(&inputq->lock);
1158 if (skb_peek(arrvq) == skb) {
1159 __skb_dequeue(arrvq);
1160 __skb_queue_tail(inputq, skb);
1163 spin_unlock_bh(&inputq->lock);
1167 /* Group messages require exact scope match */
1168 if (msg_in_group(hdr)) {
1171 scope = msg_lookup_scope(hdr);
1174 /* TIPC_NODE_SCOPE means "any scope" in this context */
1176 scope = TIPC_NODE_SCOPE;
1178 scope = TIPC_CLUSTER_SCOPE;
1180 lower = msg_namelower(hdr);
1181 upper = msg_nameupper(hdr);
1184 /* Create destination port list: */
1185 tipc_nametbl_mc_lookup(net, type, lower, upper,
1186 scope, exact, &dports);
1188 /* Clone message per destination */
1189 while (tipc_dest_pop(&dports, NULL, &portid)) {
1190 _skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
1192 msg_set_destport(buf_msg(_skb), portid);
1193 __skb_queue_tail(&tmpq, _skb);
1196 pr_warn("Failed to clone mcast rcv buffer\n");
1198 /* Append to inputq if not already done by other thread */
1199 spin_lock_bh(&inputq->lock);
1200 if (skb_peek(arrvq) == skb) {
1201 skb_queue_splice_tail_init(&tmpq, inputq);
1202 kfree_skb(__skb_dequeue(arrvq));
1204 spin_unlock_bh(&inputq->lock);
1205 __skb_queue_purge(&tmpq);
1208 tipc_sk_rcv(net, inputq);
1212 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
1213 * @tsk: receiving socket
1214 * @skb: pointer to message buffer.
1216 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1217 struct sk_buff_head *inputq,
1218 struct sk_buff_head *xmitq)
1220 struct tipc_msg *hdr = buf_msg(skb);
1221 u32 onode = tsk_own_node(tsk);
1222 struct sock *sk = &tsk->sk;
1223 int mtyp = msg_type(hdr);
1226 /* Ignore if connection cannot be validated: */
1227 if (!tsk_peer_msg(tsk, hdr)) {
1228 trace_tipc_sk_drop_msg(sk, skb, TIPC_DUMP_NONE, "@proto_rcv!");
1232 if (unlikely(msg_errcode(hdr))) {
1233 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1234 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1235 tsk_peer_port(tsk));
1236 sk->sk_state_change(sk);
1238 /* State change is ignored if socket already awake,
1239 * - convert msg to abort msg and add to inqueue
1241 msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
1242 msg_set_type(hdr, TIPC_CONN_MSG);
1243 msg_set_size(hdr, BASIC_H_SIZE);
1244 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1245 __skb_queue_tail(inputq, skb);
1249 tsk->probe_unacked = false;
1251 if (mtyp == CONN_PROBE) {
1252 msg_set_type(hdr, CONN_PROBE_REPLY);
1253 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
1254 __skb_queue_tail(xmitq, skb);
1256 } else if (mtyp == CONN_ACK) {
1257 conn_cong = tsk_conn_cong(tsk);
1258 tsk->snt_unacked -= msg_conn_ack(hdr);
1259 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1260 tsk->snd_win = msg_adv_win(hdr);
1262 sk->sk_write_space(sk);
1263 } else if (mtyp != CONN_PROBE_REPLY) {
1264 pr_warn("Received unknown CONN_PROTO msg\n");
1271 * tipc_sendmsg - send message in connectionless manner
1272 * @sock: socket structure
1273 * @m: message to send
1274 * @dsz: amount of user data to be sent
1276 * Message must have an destination specified explicitly.
1277 * Used for SOCK_RDM and SOCK_DGRAM messages,
1278 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
1279 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
1281 * Returns the number of bytes sent on success, or errno otherwise
1283 static int tipc_sendmsg(struct socket *sock,
1284 struct msghdr *m, size_t dsz)
1286 struct sock *sk = sock->sk;
1290 ret = __tipc_sendmsg(sock, m, dsz);
1296 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1298 struct sock *sk = sock->sk;
1299 struct net *net = sock_net(sk);
1300 struct tipc_sock *tsk = tipc_sk(sk);
1301 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1302 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1303 struct list_head *clinks = &tsk->cong_links;
1304 bool syn = !tipc_sk_type_connectionless(sk);
1305 struct tipc_group *grp = tsk->group;
1306 struct tipc_msg *hdr = &tsk->phdr;
1307 struct tipc_name_seq *seq;
1308 struct sk_buff_head pkts;
1309 u32 dport, dnode = 0;
1313 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
1317 if (unlikely(m->msg_namelen < sizeof(*dest)))
1319 if (unlikely(dest->family != AF_TIPC))
1325 return tipc_send_group_bcast(sock, m, dlen, timeout);
1326 if (dest->addrtype == TIPC_ADDR_NAME)
1327 return tipc_send_group_anycast(sock, m, dlen, timeout);
1328 if (dest->addrtype == TIPC_ADDR_ID)
1329 return tipc_send_group_unicast(sock, m, dlen, timeout);
1330 if (dest->addrtype == TIPC_ADDR_MCAST)
1331 return tipc_send_group_mcast(sock, m, dlen, timeout);
1335 if (unlikely(!dest)) {
1337 if (!syn && dest->family != AF_TIPC)
1338 return -EDESTADDRREQ;
1341 if (unlikely(syn)) {
1342 if (sk->sk_state == TIPC_LISTEN)
1344 if (sk->sk_state != TIPC_OPEN)
1348 if (dest->addrtype == TIPC_ADDR_NAME) {
1349 tsk->conn_type = dest->addr.name.name.type;
1350 tsk->conn_instance = dest->addr.name.name.instance;
1352 msg_set_syn(hdr, 1);
1355 seq = &dest->addr.nameseq;
1356 if (dest->addrtype == TIPC_ADDR_MCAST)
1357 return tipc_sendmcast(sock, seq, m, dlen, timeout);
1359 if (dest->addrtype == TIPC_ADDR_NAME) {
1360 type = dest->addr.name.name.type;
1361 inst = dest->addr.name.name.instance;
1362 dnode = dest->addr.name.domain;
1363 msg_set_type(hdr, TIPC_NAMED_MSG);
1364 msg_set_hdr_sz(hdr, NAMED_H_SIZE);
1365 msg_set_nametype(hdr, type);
1366 msg_set_nameinst(hdr, inst);
1367 msg_set_lookup_scope(hdr, tipc_node2scope(dnode));
1368 dport = tipc_nametbl_translate(net, type, inst, &dnode);
1369 msg_set_destnode(hdr, dnode);
1370 msg_set_destport(hdr, dport);
1371 if (unlikely(!dport && !dnode))
1372 return -EHOSTUNREACH;
1373 } else if (dest->addrtype == TIPC_ADDR_ID) {
1374 dnode = dest->addr.id.node;
1375 msg_set_type(hdr, TIPC_DIRECT_MSG);
1376 msg_set_lookup_scope(hdr, 0);
1377 msg_set_destnode(hdr, dnode);
1378 msg_set_destport(hdr, dest->addr.id.ref);
1379 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1384 /* Block or return if destination link is congested */
1385 rc = tipc_wait_for_cond(sock, &timeout,
1386 !tipc_dest_find(clinks, dnode, 0));
1390 skb_queue_head_init(&pkts);
1391 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
1392 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1393 if (unlikely(rc != dlen))
1395 if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue)))
1398 trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " ");
1399 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1400 if (unlikely(rc == -ELINKCONG)) {
1401 tipc_dest_push(clinks, dnode, 0);
1402 tsk->cong_link_cnt++;
1406 if (unlikely(syn && !rc))
1407 tipc_set_sk_state(sk, TIPC_CONNECTING);
1409 return rc ? rc : dlen;
1413 * tipc_sendstream - send stream-oriented data
1414 * @sock: socket structure
1416 * @dsz: total length of data to be transmitted
1418 * Used for SOCK_STREAM data.
1420 * Returns the number of bytes sent on success (or partial success),
1421 * or errno if no data sent
1423 static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1425 struct sock *sk = sock->sk;
1429 ret = __tipc_sendstream(sock, m, dsz);
1435 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1437 struct sock *sk = sock->sk;
1438 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1439 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1440 struct tipc_sock *tsk = tipc_sk(sk);
1441 struct tipc_msg *hdr = &tsk->phdr;
1442 struct net *net = sock_net(sk);
1443 struct sk_buff_head pkts;
1444 u32 dnode = tsk_peer_node(tsk);
1448 skb_queue_head_init(&pkts);
1450 if (unlikely(dlen > INT_MAX))
1453 /* Handle implicit connection setup */
1454 if (unlikely(dest)) {
1455 rc = __tipc_sendmsg(sock, m, dlen);
1456 if (dlen && dlen == rc) {
1457 tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
1458 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1464 rc = tipc_wait_for_cond(sock, &timeout,
1465 (!tsk->cong_link_cnt &&
1466 !tsk_conn_cong(tsk) &&
1467 tipc_sk_connected(sk)));
1471 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
1472 rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
1473 if (unlikely(rc != send))
1476 trace_tipc_sk_sendstream(sk, skb_peek(&pkts),
1477 TIPC_DUMP_SK_SNDQ, " ");
1478 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1479 if (unlikely(rc == -ELINKCONG)) {
1480 tsk->cong_link_cnt = 1;
1484 tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
1487 } while (sent < dlen && !rc);
1489 return sent ? sent : rc;
1493 * tipc_send_packet - send a connection-oriented message
1494 * @sock: socket structure
1495 * @m: message to send
1496 * @dsz: length of data to be transmitted
1498 * Used for SOCK_SEQPACKET messages.
1500 * Returns the number of bytes sent on success, or errno otherwise
1502 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1504 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1507 return tipc_sendstream(sock, m, dsz);
1510 /* tipc_sk_finish_conn - complete the setup of a connection
1512 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1515 struct sock *sk = &tsk->sk;
1516 struct net *net = sock_net(sk);
1517 struct tipc_msg *msg = &tsk->phdr;
1519 msg_set_syn(msg, 0);
1520 msg_set_destnode(msg, peer_node);
1521 msg_set_destport(msg, peer_port);
1522 msg_set_type(msg, TIPC_CONN_MSG);
1523 msg_set_lookup_scope(msg, 0);
1524 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1526 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
1527 tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1528 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1529 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1530 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1531 __skb_queue_purge(&sk->sk_write_queue);
1532 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1535 /* Fall back to message based flow control */
1536 tsk->rcv_win = FLOWCTL_MSG_WIN;
1537 tsk->snd_win = FLOWCTL_MSG_WIN;
1541 * tipc_sk_set_orig_addr - capture sender's address for received message
1542 * @m: descriptor for message info
1543 * @hdr: received message header
1545 * Note: Address is not captured if not requested by receiver.
1547 static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1549 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
1550 struct tipc_msg *hdr = buf_msg(skb);
1555 srcaddr->sock.family = AF_TIPC;
1556 srcaddr->sock.addrtype = TIPC_ADDR_ID;
1557 srcaddr->sock.scope = 0;
1558 srcaddr->sock.addr.id.ref = msg_origport(hdr);
1559 srcaddr->sock.addr.id.node = msg_orignode(hdr);
1560 srcaddr->sock.addr.name.domain = 0;
1561 m->msg_namelen = sizeof(struct sockaddr_tipc);
1563 if (!msg_in_group(hdr))
1566 /* Group message users may also want to know sending member's id */
1567 srcaddr->member.family = AF_TIPC;
1568 srcaddr->member.addrtype = TIPC_ADDR_NAME;
1569 srcaddr->member.scope = 0;
1570 srcaddr->member.addr.name.name.type = msg_nametype(hdr);
1571 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
1572 srcaddr->member.addr.name.domain = 0;
1573 m->msg_namelen = sizeof(*srcaddr);
1577 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1578 * @m: descriptor for message info
1579 * @skb: received message buffer
1580 * @tsk: TIPC port associated with message
1582 * Note: Ancillary data is not captured if not requested by receiver.
1584 * Returns 0 if successful, otherwise errno
1586 static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
1587 struct tipc_sock *tsk)
1589 struct tipc_msg *msg;
1596 if (likely(m->msg_controllen == 0))
1600 /* Optionally capture errored message object(s) */
1601 err = msg ? msg_errcode(msg) : 0;
1602 if (unlikely(err)) {
1604 anc_data[1] = msg_data_sz(msg);
1605 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1609 if (skb_linearize(skb))
1612 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1619 /* Optionally capture message destination object */
1620 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1621 switch (dest_type) {
1622 case TIPC_NAMED_MSG:
1624 anc_data[0] = msg_nametype(msg);
1625 anc_data[1] = msg_namelower(msg);
1626 anc_data[2] = msg_namelower(msg);
1628 case TIPC_MCAST_MSG:
1630 anc_data[0] = msg_nametype(msg);
1631 anc_data[1] = msg_namelower(msg);
1632 anc_data[2] = msg_nameupper(msg);
1635 has_name = (tsk->conn_type != 0);
1636 anc_data[0] = tsk->conn_type;
1637 anc_data[1] = tsk->conn_instance;
1638 anc_data[2] = tsk->conn_instance;
1644 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1652 static void tipc_sk_send_ack(struct tipc_sock *tsk)
1654 struct sock *sk = &tsk->sk;
1655 struct net *net = sock_net(sk);
1656 struct sk_buff *skb = NULL;
1657 struct tipc_msg *msg;
1658 u32 peer_port = tsk_peer_port(tsk);
1659 u32 dnode = tsk_peer_node(tsk);
1661 if (!tipc_sk_connected(sk))
1663 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1664 dnode, tsk_own_node(tsk), peer_port,
1665 tsk->portid, TIPC_OK);
1669 msg_set_conn_ack(msg, tsk->rcv_unacked);
1670 tsk->rcv_unacked = 0;
1672 /* Adjust to and advertize the correct window limit */
1673 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1674 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1675 msg_set_adv_win(msg, tsk->rcv_win);
1677 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1680 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1682 struct sock *sk = sock->sk;
1683 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1684 long timeo = *timeop;
1685 int err = sock_error(sk);
1691 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1692 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1696 add_wait_queue(sk_sleep(sk), &wait);
1698 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
1699 sched_annotate_sleep();
1701 remove_wait_queue(sk_sleep(sk), &wait);
1704 if (!skb_queue_empty(&sk->sk_receive_queue))
1709 err = sock_intr_errno(timeo);
1710 if (signal_pending(current))
1713 err = sock_error(sk);
1722 * tipc_recvmsg - receive packet-oriented message
1723 * @m: descriptor for message info
1724 * @buflen: length of user buffer area
1725 * @flags: receive flags
1727 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1728 * If the complete message doesn't fit in user area, truncate it.
1730 * Returns size of returned message data, errno otherwise
1732 static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1733 size_t buflen, int flags)
1735 struct sock *sk = sock->sk;
1736 bool connected = !tipc_sk_type_connectionless(sk);
1737 struct tipc_sock *tsk = tipc_sk(sk);
1738 int rc, err, hlen, dlen, copy;
1739 struct sk_buff_head xmitq;
1740 struct tipc_msg *hdr;
1741 struct sk_buff *skb;
1745 /* Catch invalid receive requests */
1746 if (unlikely(!buflen))
1750 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
1754 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1756 /* Step rcv queue to first msg with data or error; wait if necessary */
1758 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1761 skb = skb_peek(&sk->sk_receive_queue);
1763 dlen = msg_data_sz(hdr);
1764 hlen = msg_hdr_sz(hdr);
1765 err = msg_errcode(hdr);
1766 grp_evt = msg_is_grp_evt(hdr);
1767 if (likely(dlen || err))
1769 tsk_advance_rx_queue(sk);
1772 /* Collect msg meta data, including error code and rejected data */
1773 tipc_sk_set_orig_addr(m, skb);
1774 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1779 /* Capture data if non-error msg, otherwise just set return value */
1781 copy = min_t(int, dlen, buflen);
1782 if (unlikely(copy != dlen))
1783 m->msg_flags |= MSG_TRUNC;
1784 rc = skb_copy_datagram_msg(skb, hlen, m, copy);
1788 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control)
1794 /* Mark message as group event if applicable */
1795 if (unlikely(grp_evt)) {
1796 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
1797 m->msg_flags |= MSG_EOR;
1798 m->msg_flags |= MSG_OOB;
1802 /* Caption of data or error code/rejected data was successful */
1803 if (unlikely(flags & MSG_PEEK))
1806 /* Send group flow control advertisement when applicable */
1807 if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1808 skb_queue_head_init(&xmitq);
1809 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1810 msg_orignode(hdr), msg_origport(hdr),
1812 tipc_node_distr_xmit(sock_net(sk), &xmitq);
1815 tsk_advance_rx_queue(sk);
1817 if (likely(!connected))
1820 /* Send connection flow control advertisement when applicable */
1821 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1822 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1823 tipc_sk_send_ack(tsk);
1826 return rc ? rc : copy;
1830 * tipc_recvstream - receive stream-oriented data
1831 * @m: descriptor for message info
1832 * @buflen: total size of user buffer area
1833 * @flags: receive flags
1835 * Used for SOCK_STREAM messages only. If not enough data is available
1836 * will optionally wait for more; never truncates data.
1838 * Returns size of returned message data, errno otherwise
1840 static int tipc_recvstream(struct socket *sock, struct msghdr *m,
1841 size_t buflen, int flags)
1843 struct sock *sk = sock->sk;
1844 struct tipc_sock *tsk = tipc_sk(sk);
1845 struct sk_buff *skb;
1846 struct tipc_msg *hdr;
1847 struct tipc_skb_cb *skb_cb;
1848 bool peek = flags & MSG_PEEK;
1849 int offset, required, copy, copied = 0;
1850 int hlen, dlen, err, rc;
1853 /* Catch invalid receive attempts */
1854 if (unlikely(!buflen))
1859 if (unlikely(sk->sk_state == TIPC_OPEN)) {
1863 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
1864 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1867 /* Look at first msg in receive queue; wait if necessary */
1868 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1871 skb = skb_peek(&sk->sk_receive_queue);
1872 skb_cb = TIPC_SKB_CB(skb);
1874 dlen = msg_data_sz(hdr);
1875 hlen = msg_hdr_sz(hdr);
1876 err = msg_errcode(hdr);
1878 /* Discard any empty non-errored (SYN-) message */
1879 if (unlikely(!dlen && !err)) {
1880 tsk_advance_rx_queue(sk);
1884 /* Collect msg meta data, incl. error code and rejected data */
1886 tipc_sk_set_orig_addr(m, skb);
1887 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1893 /* Copy data if msg ok, otherwise return error/partial data */
1895 offset = skb_cb->bytes_read;
1896 copy = min_t(int, dlen - offset, buflen - copied);
1897 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
1902 if (unlikely(offset < dlen)) {
1904 skb_cb->bytes_read = offset;
1909 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
1918 tsk_advance_rx_queue(sk);
1920 /* Send connection flow control advertisement when applicable */
1921 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1922 if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE))
1923 tipc_sk_send_ack(tsk);
1925 /* Exit if all requested data or FIN/error received */
1926 if (copied == buflen || err)
1929 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
1932 return copied ? copied : rc;
1936 * tipc_write_space - wake up thread if port congestion is released
1939 static void tipc_write_space(struct sock *sk)
1941 struct socket_wq *wq;
1944 wq = rcu_dereference(sk->sk_wq);
1945 if (skwq_has_sleeper(wq))
1946 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
1947 EPOLLWRNORM | EPOLLWRBAND);
1952 * tipc_data_ready - wake up threads to indicate messages have been received
1954 * @len: the length of messages
1956 static void tipc_data_ready(struct sock *sk)
1958 struct socket_wq *wq;
1961 wq = rcu_dereference(sk->sk_wq);
1962 if (skwq_has_sleeper(wq))
1963 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
1964 EPOLLRDNORM | EPOLLRDBAND);
1968 static void tipc_sock_destruct(struct sock *sk)
1970 __skb_queue_purge(&sk->sk_receive_queue);
1973 static void tipc_sk_proto_rcv(struct sock *sk,
1974 struct sk_buff_head *inputq,
1975 struct sk_buff_head *xmitq)
1977 struct sk_buff *skb = __skb_dequeue(inputq);
1978 struct tipc_sock *tsk = tipc_sk(sk);
1979 struct tipc_msg *hdr = buf_msg(skb);
1980 struct tipc_group *grp = tsk->group;
1981 bool wakeup = false;
1983 switch (msg_user(hdr)) {
1985 tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
1988 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
1989 /* coupled with smp_rmb() in tipc_wait_for_cond() */
1991 tsk->cong_link_cnt--;
1994 case GROUP_PROTOCOL:
1995 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
1998 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
1999 hdr, inputq, xmitq);
2006 sk->sk_write_space(sk);
2012 * tipc_sk_filter_connect - check incoming message for a connection-based socket
2014 * @skb: pointer to message buffer.
2015 * Returns true if message should be added to receive queue, false otherwise
2017 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
2019 struct sock *sk = &tsk->sk;
2020 struct net *net = sock_net(sk);
2021 struct tipc_msg *hdr = buf_msg(skb);
2022 bool con_msg = msg_connected(hdr);
2023 u32 pport = tsk_peer_port(tsk);
2024 u32 pnode = tsk_peer_node(tsk);
2025 u32 oport = msg_origport(hdr);
2026 u32 onode = msg_orignode(hdr);
2027 int err = msg_errcode(hdr);
2028 unsigned long delay;
2030 if (unlikely(msg_mcast(hdr)))
2033 switch (sk->sk_state) {
2034 case TIPC_CONNECTING:
2036 if (likely(con_msg)) {
2039 tipc_sk_finish_conn(tsk, oport, onode);
2040 msg_set_importance(&tsk->phdr, msg_importance(hdr));
2041 /* ACK+ message with data is added to receive queue */
2042 if (msg_data_sz(hdr))
2044 /* Empty ACK-, - wake up sleeping connect() and drop */
2045 sk->sk_state_change(sk);
2046 msg_set_dest_droppable(hdr, 1);
2049 /* Ignore connectionless message if not from listening socket */
2050 if (oport != pport || onode != pnode)
2054 if (err != TIPC_ERR_OVERLOAD)
2057 /* Prepare for new setup attempt if we have a SYN clone */
2058 if (skb_queue_empty(&sk->sk_write_queue))
2060 get_random_bytes(&delay, 2);
2061 delay %= (tsk->conn_timeout / 4);
2062 delay = msecs_to_jiffies(delay + 100);
2063 sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
2066 case TIPC_DISCONNECTING:
2069 /* Accept only SYN message */
2070 if (!msg_is_syn(hdr) &&
2071 tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT)
2073 if (!con_msg && !err)
2076 case TIPC_ESTABLISHED:
2077 /* Accept only connection-based messages sent by peer */
2078 if (likely(con_msg && !err && pport == oport && pnode == onode))
2080 if (!tsk_peer_msg(tsk, hdr))
2084 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2085 tipc_node_remove_conn(net, pnode, tsk->portid);
2086 sk->sk_state_change(sk);
2089 pr_err("Unknown sk_state %u\n", sk->sk_state);
2091 /* Abort connection setup attempt */
2092 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2093 sk->sk_err = ECONNREFUSED;
2094 sk->sk_state_change(sk);
2099 * rcvbuf_limit - get proper overload limit of socket receive queue
2103 * For connection oriented messages, irrespective of importance,
2104 * default queue limit is 2 MB.
2106 * For connectionless messages, queue limits are based on message
2107 * importance as follows:
2109 * TIPC_LOW_IMPORTANCE (2 MB)
2110 * TIPC_MEDIUM_IMPORTANCE (4 MB)
2111 * TIPC_HIGH_IMPORTANCE (8 MB)
2112 * TIPC_CRITICAL_IMPORTANCE (16 MB)
2114 * Returns overload limit according to corresponding message importance
2116 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
2118 struct tipc_sock *tsk = tipc_sk(sk);
2119 struct tipc_msg *hdr = buf_msg(skb);
2121 if (unlikely(msg_in_group(hdr)))
2122 return sk->sk_rcvbuf;
2124 if (unlikely(!msg_connected(hdr)))
2125 return sk->sk_rcvbuf << msg_importance(hdr);
2127 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
2128 return sk->sk_rcvbuf;
2130 return FLOWCTL_MSG_LIM;
2134 * tipc_sk_filter_rcv - validate incoming message
2136 * @skb: pointer to message.
2138 * Enqueues message on receive queue if acceptable; optionally handles
2139 * disconnect indication for a connected socket.
2141 * Called with socket lock already taken
2144 static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2145 struct sk_buff_head *xmitq)
2147 bool sk_conn = !tipc_sk_type_connectionless(sk);
2148 struct tipc_sock *tsk = tipc_sk(sk);
2149 struct tipc_group *grp = tsk->group;
2150 struct tipc_msg *hdr = buf_msg(skb);
2151 struct net *net = sock_net(sk);
2152 struct sk_buff_head inputq;
2153 int mtyp = msg_type(hdr);
2154 int limit, err = TIPC_OK;
2156 trace_tipc_sk_filter_rcv(sk, skb, TIPC_DUMP_ALL, " ");
2157 TIPC_SKB_CB(skb)->bytes_read = 0;
2158 __skb_queue_head_init(&inputq);
2159 __skb_queue_tail(&inputq, skb);
2161 if (unlikely(!msg_isdata(hdr)))
2162 tipc_sk_proto_rcv(sk, &inputq, xmitq);
2165 tipc_group_filter_msg(grp, &inputq, xmitq);
2167 if (unlikely(!grp) && mtyp == TIPC_MCAST_MSG)
2168 tipc_mcast_filter_msg(net, &tsk->mc_method.deferredq, &inputq);
2170 /* Validate and add to receive buffer if there is space */
2171 while ((skb = __skb_dequeue(&inputq))) {
2173 limit = rcvbuf_limit(sk, skb);
2174 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
2175 (!sk_conn && msg_connected(hdr)) ||
2176 (!grp && msg_in_group(hdr)))
2177 err = TIPC_ERR_NO_PORT;
2178 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
2179 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
2181 atomic_inc(&sk->sk_drops);
2182 err = TIPC_ERR_OVERLOAD;
2185 if (unlikely(err)) {
2186 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) {
2187 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE,
2189 __skb_queue_tail(xmitq, skb);
2194 __skb_queue_tail(&sk->sk_receive_queue, skb);
2195 skb_set_owner_r(skb, sk);
2196 trace_tipc_sk_overlimit2(sk, skb, TIPC_DUMP_ALL,
2197 "rcvq >90% allocated!");
2198 sk->sk_data_ready(sk);
2203 * tipc_sk_backlog_rcv - handle incoming message from backlog queue
2207 * Caller must hold socket lock
2209 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
2211 unsigned int before = sk_rmem_alloc_get(sk);
2212 struct sk_buff_head xmitq;
2215 __skb_queue_head_init(&xmitq);
2217 tipc_sk_filter_rcv(sk, skb, &xmitq);
2218 added = sk_rmem_alloc_get(sk) - before;
2219 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
2221 /* Send pending response/rejected messages, if any */
2222 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2227 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
2228 * inputq and try adding them to socket or backlog queue
2229 * @inputq: list of incoming buffers with potentially different destinations
2230 * @sk: socket where the buffers should be enqueued
2231 * @dport: port number for the socket
2233 * Caller must hold socket lock
2235 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
2236 u32 dport, struct sk_buff_head *xmitq)
2238 unsigned long time_limit = jiffies + 2;
2239 struct sk_buff *skb;
2244 while (skb_queue_len(inputq)) {
2245 if (unlikely(time_after_eq(jiffies, time_limit)))
2248 skb = tipc_skb_dequeue(inputq, dport);
2252 /* Add message directly to receive queue if possible */
2253 if (!sock_owned_by_user(sk)) {
2254 tipc_sk_filter_rcv(sk, skb, xmitq);
2258 /* Try backlog, compensating for double-counted bytes */
2259 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
2260 if (!sk->sk_backlog.len)
2261 atomic_set(dcnt, 0);
2262 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
2263 if (likely(!sk_add_backlog(sk, skb, lim))) {
2264 trace_tipc_sk_overlimit1(sk, skb, TIPC_DUMP_ALL,
2265 "bklg & rcvq >90% allocated!");
2269 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
2270 /* Overload => reject message back to sender */
2271 onode = tipc_own_addr(sock_net(sk));
2272 atomic_inc(&sk->sk_drops);
2273 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) {
2274 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
2276 __skb_queue_tail(xmitq, skb);
2283 * tipc_sk_rcv - handle a chain of incoming buffers
2284 * @inputq: buffer list containing the buffers
2285 * Consumes all buffers in list until inputq is empty
2286 * Note: may be called in multiple threads referring to the same queue
2288 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
2290 struct sk_buff_head xmitq;
2291 u32 dnode, dport = 0;
2293 struct tipc_sock *tsk;
2295 struct sk_buff *skb;
2297 __skb_queue_head_init(&xmitq);
2298 while (skb_queue_len(inputq)) {
2299 dport = tipc_skb_peek_port(inputq, dport);
2300 tsk = tipc_sk_lookup(net, dport);
2304 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
2305 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
2306 spin_unlock_bh(&sk->sk_lock.slock);
2308 /* Send pending response/rejected messages, if any */
2309 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2313 /* No destination socket => dequeue skb if still there */
2314 skb = tipc_skb_dequeue(inputq, dport);
2318 /* Try secondary lookup if unresolved named message */
2319 err = TIPC_ERR_NO_PORT;
2320 if (tipc_msg_lookup_dest(net, skb, &err))
2323 /* Prepare for message rejection */
2324 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
2327 trace_tipc_sk_rej_msg(NULL, skb, TIPC_DUMP_NONE, "@sk_rcv!");
2329 dnode = msg_destnode(buf_msg(skb));
2330 tipc_node_xmit_skb(net, skb, dnode, dport);
2334 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
2336 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2337 struct sock *sk = sock->sk;
2341 int err = sock_error(sk);
2346 if (signal_pending(current))
2347 return sock_intr_errno(*timeo_p);
2349 add_wait_queue(sk_sleep(sk), &wait);
2350 done = sk_wait_event(sk, timeo_p,
2351 sk->sk_state != TIPC_CONNECTING, &wait);
2352 remove_wait_queue(sk_sleep(sk), &wait);
2357 static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
2359 if (addr->family != AF_TIPC)
2361 if (addr->addrtype == TIPC_SERVICE_RANGE)
2362 return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
2363 return (addr->addrtype == TIPC_SERVICE_ADDR ||
2364 addr->addrtype == TIPC_SOCKET_ADDR);
2368 * tipc_connect - establish a connection to another TIPC port
2369 * @sock: socket structure
2370 * @dest: socket address for destination port
2371 * @destlen: size of socket address data structure
2372 * @flags: file-related flags associated with socket
2374 * Returns 0 on success, errno otherwise
2376 static int tipc_connect(struct socket *sock, struct sockaddr *dest,
2377 int destlen, int flags)
2379 struct sock *sk = sock->sk;
2380 struct tipc_sock *tsk = tipc_sk(sk);
2381 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
2382 struct msghdr m = {NULL,};
2383 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
2387 if (destlen != sizeof(struct sockaddr_tipc))
2397 if (dst->family == AF_UNSPEC) {
2398 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2399 if (!tipc_sk_type_connectionless(sk))
2403 if (!tipc_sockaddr_is_sane(dst)) {
2407 /* DGRAM/RDM connect(), just save the destaddr */
2408 if (tipc_sk_type_connectionless(sk)) {
2409 memcpy(&tsk->peer, dest, destlen);
2411 } else if (dst->addrtype == TIPC_SERVICE_RANGE) {
2416 previous = sk->sk_state;
2418 switch (sk->sk_state) {
2420 /* Send a 'SYN-' to destination */
2422 m.msg_namelen = destlen;
2424 /* If connect is in non-blocking case, set MSG_DONTWAIT to
2425 * indicate send_msg() is never blocked.
2428 m.msg_flags = MSG_DONTWAIT;
2430 res = __tipc_sendmsg(sock, &m, 0);
2431 if ((res < 0) && (res != -EWOULDBLOCK))
2434 /* Just entered TIPC_CONNECTING state; the only
2435 * difference is that return value in non-blocking
2436 * case is EINPROGRESS, rather than EALREADY.
2440 case TIPC_CONNECTING:
2442 if (previous == TIPC_CONNECTING)
2446 timeout = msecs_to_jiffies(timeout);
2447 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
2448 res = tipc_wait_for_connect(sock, &timeout);
2450 case TIPC_ESTABLISHED:
2463 * tipc_listen - allow socket to listen for incoming connections
2464 * @sock: socket structure
2467 * Returns 0 on success, errno otherwise
2469 static int tipc_listen(struct socket *sock, int len)
2471 struct sock *sk = sock->sk;
2475 res = tipc_set_sk_state(sk, TIPC_LISTEN);
2481 static int tipc_wait_for_accept(struct socket *sock, long timeo)
2483 struct sock *sk = sock->sk;
2487 /* True wake-one mechanism for incoming connections: only
2488 * one process gets woken up, not the 'whole herd'.
2489 * Since we do not 'race & poll' for established sockets
2490 * anymore, the common case will execute the loop only once.
2493 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
2494 TASK_INTERRUPTIBLE);
2495 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2497 timeo = schedule_timeout(timeo);
2501 if (!skb_queue_empty(&sk->sk_receive_queue))
2506 err = sock_intr_errno(timeo);
2507 if (signal_pending(current))
2510 finish_wait(sk_sleep(sk), &wait);
2515 * tipc_accept - wait for connection request
2516 * @sock: listening socket
2517 * @newsock: new socket that is to be connected
2518 * @flags: file-related flags associated with socket
2520 * Returns 0 on success, errno otherwise
2522 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2525 struct sock *new_sk, *sk = sock->sk;
2526 struct sk_buff *buf;
2527 struct tipc_sock *new_tsock;
2528 struct tipc_msg *msg;
2534 if (sk->sk_state != TIPC_LISTEN) {
2538 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2539 res = tipc_wait_for_accept(sock, timeo);
2543 buf = skb_peek(&sk->sk_receive_queue);
2545 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2548 security_sk_clone(sock->sk, new_sock->sk);
2550 new_sk = new_sock->sk;
2551 new_tsock = tipc_sk(new_sk);
2554 /* we lock on new_sk; but lockdep sees the lock on sk */
2555 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2558 * Reject any stray messages received by new socket
2559 * before the socket lock was taken (very, very unlikely)
2561 tsk_rej_rx_queue(new_sk);
2563 /* Connect new socket to it's peer */
2564 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2566 tsk_set_importance(new_tsock, msg_importance(msg));
2567 if (msg_named(msg)) {
2568 new_tsock->conn_type = msg_nametype(msg);
2569 new_tsock->conn_instance = msg_nameinst(msg);
2573 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2574 * Respond to 'SYN+' by queuing it on new socket.
2576 if (!msg_data_sz(msg)) {
2577 struct msghdr m = {NULL,};
2579 tsk_advance_rx_queue(sk);
2580 __tipc_sendstream(new_sock, &m, 0);
2582 __skb_dequeue(&sk->sk_receive_queue);
2583 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2584 skb_set_owner_r(buf, new_sk);
2586 release_sock(new_sk);
2593 * tipc_shutdown - shutdown socket connection
2594 * @sock: socket structure
2595 * @how: direction to close (must be SHUT_RDWR)
2597 * Terminates connection (if necessary), then purges socket's receive queue.
2599 * Returns 0 on success, errno otherwise
2601 static int tipc_shutdown(struct socket *sock, int how)
2603 struct sock *sk = sock->sk;
2606 if (how != SHUT_RDWR)
2611 trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
2612 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2613 sk->sk_shutdown = SEND_SHUTDOWN;
2615 if (sk->sk_state == TIPC_DISCONNECTING) {
2616 /* Discard any unreceived messages */
2617 __skb_queue_purge(&sk->sk_receive_queue);
2619 /* Wake up anyone sleeping in poll */
2620 sk->sk_state_change(sk);
2630 static void tipc_sk_check_probing_state(struct sock *sk,
2631 struct sk_buff_head *list)
2633 struct tipc_sock *tsk = tipc_sk(sk);
2634 u32 pnode = tsk_peer_node(tsk);
2635 u32 pport = tsk_peer_port(tsk);
2636 u32 self = tsk_own_node(tsk);
2637 u32 oport = tsk->portid;
2638 struct sk_buff *skb;
2640 if (tsk->probe_unacked) {
2641 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2642 sk->sk_err = ECONNABORTED;
2643 tipc_node_remove_conn(sock_net(sk), pnode, pport);
2644 sk->sk_state_change(sk);
2647 /* Prepare new probe */
2648 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
2649 pnode, self, pport, oport, TIPC_OK);
2651 __skb_queue_tail(list, skb);
2652 tsk->probe_unacked = true;
2653 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
2656 static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list)
2658 struct tipc_sock *tsk = tipc_sk(sk);
2660 /* Try again later if dest link is congested */
2661 if (tsk->cong_link_cnt) {
2662 sk_reset_timer(sk, &sk->sk_timer, msecs_to_jiffies(100));
2665 /* Prepare SYN for retransmit */
2666 tipc_msg_skb_clone(&sk->sk_write_queue, list);
2669 static void tipc_sk_timeout(struct timer_list *t)
2671 struct sock *sk = from_timer(sk, t, sk_timer);
2672 struct tipc_sock *tsk = tipc_sk(sk);
2673 u32 pnode = tsk_peer_node(tsk);
2674 struct sk_buff_head list;
2677 skb_queue_head_init(&list);
2680 /* Try again later if socket is busy */
2681 if (sock_owned_by_user(sk)) {
2682 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
2687 if (sk->sk_state == TIPC_ESTABLISHED)
2688 tipc_sk_check_probing_state(sk, &list);
2689 else if (sk->sk_state == TIPC_CONNECTING)
2690 tipc_sk_retry_connect(sk, &list);
2694 if (!skb_queue_empty(&list))
2695 rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
2697 /* SYN messages may cause link congestion */
2698 if (rc == -ELINKCONG) {
2699 tipc_dest_push(&tsk->cong_links, pnode, 0);
2700 tsk->cong_link_cnt = 1;
2705 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2706 struct tipc_name_seq const *seq)
2708 struct sock *sk = &tsk->sk;
2709 struct net *net = sock_net(sk);
2710 struct publication *publ;
2713 if (scope != TIPC_NODE_SCOPE)
2714 scope = TIPC_CLUSTER_SCOPE;
2716 if (tipc_sk_connected(sk))
2718 key = tsk->portid + tsk->pub_count + 1;
2719 if (key == tsk->portid)
2722 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2723 scope, tsk->portid, key);
2724 if (unlikely(!publ))
2727 list_add(&publ->binding_sock, &tsk->publications);
2733 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2734 struct tipc_name_seq const *seq)
2736 struct net *net = sock_net(&tsk->sk);
2737 struct publication *publ;
2738 struct publication *safe;
2741 if (scope != TIPC_NODE_SCOPE)
2742 scope = TIPC_CLUSTER_SCOPE;
2744 list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) {
2746 if (publ->scope != scope)
2748 if (publ->type != seq->type)
2750 if (publ->lower != seq->lower)
2752 if (publ->upper != seq->upper)
2754 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2755 publ->upper, publ->key);
2759 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2760 publ->upper, publ->key);
2763 if (list_empty(&tsk->publications))
2768 /* tipc_sk_reinit: set non-zero address in all existing sockets
2769 * when we go from standalone to network mode.
2771 void tipc_sk_reinit(struct net *net)
2773 struct tipc_net *tn = net_generic(net, tipc_net_id);
2774 struct rhashtable_iter iter;
2775 struct tipc_sock *tsk;
2776 struct tipc_msg *msg;
2778 rhashtable_walk_enter(&tn->sk_rht, &iter);
2781 rhashtable_walk_start(&iter);
2783 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2784 sock_hold(&tsk->sk);
2785 rhashtable_walk_stop(&iter);
2786 lock_sock(&tsk->sk);
2788 msg_set_prevnode(msg, tipc_own_addr(net));
2789 msg_set_orignode(msg, tipc_own_addr(net));
2790 release_sock(&tsk->sk);
2791 rhashtable_walk_start(&iter);
2795 rhashtable_walk_stop(&iter);
2796 } while (tsk == ERR_PTR(-EAGAIN));
2798 rhashtable_walk_exit(&iter);
2801 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2803 struct tipc_net *tn = net_generic(net, tipc_net_id);
2804 struct tipc_sock *tsk;
2807 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2809 sock_hold(&tsk->sk);
2815 static int tipc_sk_insert(struct tipc_sock *tsk)
2817 struct sock *sk = &tsk->sk;
2818 struct net *net = sock_net(sk);
2819 struct tipc_net *tn = net_generic(net, tipc_net_id);
2820 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2821 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2823 while (remaining--) {
2825 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2826 portid = TIPC_MIN_PORT;
2827 tsk->portid = portid;
2828 sock_hold(&tsk->sk);
2829 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2838 static void tipc_sk_remove(struct tipc_sock *tsk)
2840 struct sock *sk = &tsk->sk;
2841 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2843 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2844 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
2849 static const struct rhashtable_params tsk_rht_params = {
2851 .head_offset = offsetof(struct tipc_sock, node),
2852 .key_offset = offsetof(struct tipc_sock, portid),
2853 .key_len = sizeof(u32), /* portid */
2854 .max_size = 1048576,
2856 .automatic_shrinking = true,
2859 int tipc_sk_rht_init(struct net *net)
2861 struct tipc_net *tn = net_generic(net, tipc_net_id);
2863 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2866 void tipc_sk_rht_destroy(struct net *net)
2868 struct tipc_net *tn = net_generic(net, tipc_net_id);
2870 /* Wait for socket readers to complete */
2873 rhashtable_destroy(&tn->sk_rht);
2876 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
2878 struct net *net = sock_net(&tsk->sk);
2879 struct tipc_group *grp = tsk->group;
2880 struct tipc_msg *hdr = &tsk->phdr;
2881 struct tipc_name_seq seq;
2884 if (mreq->type < TIPC_RESERVED_TYPES)
2886 if (mreq->scope > TIPC_NODE_SCOPE)
2890 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
2894 msg_set_lookup_scope(hdr, mreq->scope);
2895 msg_set_nametype(hdr, mreq->type);
2896 msg_set_dest_droppable(hdr, true);
2897 seq.type = mreq->type;
2898 seq.lower = mreq->instance;
2899 seq.upper = seq.lower;
2900 tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope);
2901 rc = tipc_sk_publish(tsk, mreq->scope, &seq);
2903 tipc_group_delete(net, grp);
2907 /* Eliminate any risk that a broadcast overtakes sent JOINs */
2908 tsk->mc_method.rcast = true;
2909 tsk->mc_method.mandatory = true;
2910 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
2914 static int tipc_sk_leave(struct tipc_sock *tsk)
2916 struct net *net = sock_net(&tsk->sk);
2917 struct tipc_group *grp = tsk->group;
2918 struct tipc_name_seq seq;
2923 tipc_group_self(grp, &seq, &scope);
2924 tipc_group_delete(net, grp);
2926 tipc_sk_withdraw(tsk, scope, &seq);
2931 * tipc_setsockopt - set socket option
2932 * @sock: socket structure
2933 * @lvl: option level
2934 * @opt: option identifier
2935 * @ov: pointer to new option value
2936 * @ol: length of option value
2938 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2939 * (to ease compatibility).
2941 * Returns 0 on success, errno otherwise
2943 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2944 char __user *ov, unsigned int ol)
2946 struct sock *sk = sock->sk;
2947 struct tipc_sock *tsk = tipc_sk(sk);
2948 struct tipc_group_req mreq;
2952 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2954 if (lvl != SOL_TIPC)
2955 return -ENOPROTOOPT;
2958 case TIPC_IMPORTANCE:
2959 case TIPC_SRC_DROPPABLE:
2960 case TIPC_DEST_DROPPABLE:
2961 case TIPC_CONN_TIMEOUT:
2962 if (ol < sizeof(value))
2964 if (get_user(value, (u32 __user *)ov))
2967 case TIPC_GROUP_JOIN:
2968 if (ol < sizeof(mreq))
2970 if (copy_from_user(&mreq, ov, sizeof(mreq)))
2981 case TIPC_IMPORTANCE:
2982 res = tsk_set_importance(tsk, value);
2984 case TIPC_SRC_DROPPABLE:
2985 if (sock->type != SOCK_STREAM)
2986 tsk_set_unreliable(tsk, value);
2990 case TIPC_DEST_DROPPABLE:
2991 tsk_set_unreturnable(tsk, value);
2993 case TIPC_CONN_TIMEOUT:
2994 tipc_sk(sk)->conn_timeout = value;
2996 case TIPC_MCAST_BROADCAST:
2997 tsk->mc_method.rcast = false;
2998 tsk->mc_method.mandatory = true;
3000 case TIPC_MCAST_REPLICAST:
3001 tsk->mc_method.rcast = true;
3002 tsk->mc_method.mandatory = true;
3004 case TIPC_GROUP_JOIN:
3005 res = tipc_sk_join(tsk, &mreq);
3007 case TIPC_GROUP_LEAVE:
3008 res = tipc_sk_leave(tsk);
3020 * tipc_getsockopt - get socket option
3021 * @sock: socket structure
3022 * @lvl: option level
3023 * @opt: option identifier
3024 * @ov: receptacle for option value
3025 * @ol: receptacle for length of option value
3027 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
3028 * (to ease compatibility).
3030 * Returns 0 on success, errno otherwise
3032 static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
3033 char __user *ov, int __user *ol)
3035 struct sock *sk = sock->sk;
3036 struct tipc_sock *tsk = tipc_sk(sk);
3037 struct tipc_name_seq seq;
3042 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
3043 return put_user(0, ol);
3044 if (lvl != SOL_TIPC)
3045 return -ENOPROTOOPT;
3046 res = get_user(len, ol);
3053 case TIPC_IMPORTANCE:
3054 value = tsk_importance(tsk);
3056 case TIPC_SRC_DROPPABLE:
3057 value = tsk_unreliable(tsk);
3059 case TIPC_DEST_DROPPABLE:
3060 value = tsk_unreturnable(tsk);
3062 case TIPC_CONN_TIMEOUT:
3063 value = tsk->conn_timeout;
3064 /* no need to set "res", since already 0 at this point */
3066 case TIPC_NODE_RECVQ_DEPTH:
3067 value = 0; /* was tipc_queue_size, now obsolete */
3069 case TIPC_SOCK_RECVQ_DEPTH:
3070 value = skb_queue_len(&sk->sk_receive_queue);
3072 case TIPC_SOCK_RECVQ_USED:
3073 value = sk_rmem_alloc_get(sk);
3075 case TIPC_GROUP_JOIN:
3078 tipc_group_self(tsk->group, &seq, &scope);
3088 return res; /* "get" failed */
3090 if (len < sizeof(value))
3093 if (copy_to_user(ov, &value, sizeof(value)))
3096 return put_user(sizeof(value), ol);
3099 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3101 struct net *net = sock_net(sock->sk);
3102 struct tipc_sioc_nodeid_req nr = {0};
3103 struct tipc_sioc_ln_req lnr;
3104 void __user *argp = (void __user *)arg;
3107 case SIOCGETLINKNAME:
3108 if (copy_from_user(&lnr, argp, sizeof(lnr)))
3110 if (!tipc_node_get_linkname(net,
3111 lnr.bearer_id & 0xffff, lnr.peer,
3112 lnr.linkname, TIPC_MAX_LINK_NAME)) {
3113 if (copy_to_user(argp, &lnr, sizeof(lnr)))
3117 return -EADDRNOTAVAIL;
3119 if (copy_from_user(&nr, argp, sizeof(nr)))
3121 if (!tipc_node_get_id(net, nr.peer, nr.node_id))
3122 return -EADDRNOTAVAIL;
3123 if (copy_to_user(argp, &nr, sizeof(nr)))
3127 return -ENOIOCTLCMD;
3131 static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
3133 struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
3134 struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
3135 u32 onode = tipc_own_addr(sock_net(sock1->sk));
3137 tsk1->peer.family = AF_TIPC;
3138 tsk1->peer.addrtype = TIPC_ADDR_ID;
3139 tsk1->peer.scope = TIPC_NODE_SCOPE;
3140 tsk1->peer.addr.id.ref = tsk2->portid;
3141 tsk1->peer.addr.id.node = onode;
3142 tsk2->peer.family = AF_TIPC;
3143 tsk2->peer.addrtype = TIPC_ADDR_ID;
3144 tsk2->peer.scope = TIPC_NODE_SCOPE;
3145 tsk2->peer.addr.id.ref = tsk1->portid;
3146 tsk2->peer.addr.id.node = onode;
3148 tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
3149 tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
3153 /* Protocol switches for the various types of TIPC sockets */
3155 static const struct proto_ops msg_ops = {
3156 .owner = THIS_MODULE,
3158 .release = tipc_release,
3160 .connect = tipc_connect,
3161 .socketpair = tipc_socketpair,
3162 .accept = sock_no_accept,
3163 .getname = tipc_getname,
3165 .ioctl = tipc_ioctl,
3166 .listen = sock_no_listen,
3167 .shutdown = tipc_shutdown,
3168 .setsockopt = tipc_setsockopt,
3169 .getsockopt = tipc_getsockopt,
3170 .sendmsg = tipc_sendmsg,
3171 .recvmsg = tipc_recvmsg,
3172 .mmap = sock_no_mmap,
3173 .sendpage = sock_no_sendpage
3176 static const struct proto_ops packet_ops = {
3177 .owner = THIS_MODULE,
3179 .release = tipc_release,
3181 .connect = tipc_connect,
3182 .socketpair = tipc_socketpair,
3183 .accept = tipc_accept,
3184 .getname = tipc_getname,
3186 .ioctl = tipc_ioctl,
3187 .listen = tipc_listen,
3188 .shutdown = tipc_shutdown,
3189 .setsockopt = tipc_setsockopt,
3190 .getsockopt = tipc_getsockopt,
3191 .sendmsg = tipc_send_packet,
3192 .recvmsg = tipc_recvmsg,
3193 .mmap = sock_no_mmap,
3194 .sendpage = sock_no_sendpage
3197 static const struct proto_ops stream_ops = {
3198 .owner = THIS_MODULE,
3200 .release = tipc_release,
3202 .connect = tipc_connect,
3203 .socketpair = tipc_socketpair,
3204 .accept = tipc_accept,
3205 .getname = tipc_getname,
3207 .ioctl = tipc_ioctl,
3208 .listen = tipc_listen,
3209 .shutdown = tipc_shutdown,
3210 .setsockopt = tipc_setsockopt,
3211 .getsockopt = tipc_getsockopt,
3212 .sendmsg = tipc_sendstream,
3213 .recvmsg = tipc_recvstream,
3214 .mmap = sock_no_mmap,
3215 .sendpage = sock_no_sendpage
3218 static const struct net_proto_family tipc_family_ops = {
3219 .owner = THIS_MODULE,
3221 .create = tipc_sk_create
3224 static struct proto tipc_proto = {
3226 .owner = THIS_MODULE,
3227 .obj_size = sizeof(struct tipc_sock),
3228 .sysctl_rmem = sysctl_tipc_rmem
3232 * tipc_socket_init - initialize TIPC socket interface
3234 * Returns 0 on success, errno otherwise
3236 int tipc_socket_init(void)
3240 res = proto_register(&tipc_proto, 1);
3242 pr_err("Failed to register TIPC protocol type\n");
3246 res = sock_register(&tipc_family_ops);
3248 pr_err("Failed to register TIPC socket type\n");
3249 proto_unregister(&tipc_proto);
3257 * tipc_socket_stop - stop TIPC socket interface
3259 void tipc_socket_stop(void)
3261 sock_unregister(tipc_family_ops.family);
3262 proto_unregister(&tipc_proto);
3265 /* Caller should hold socket lock for the passed tipc socket. */
3266 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
3270 struct nlattr *nest;
3272 peer_node = tsk_peer_node(tsk);
3273 peer_port = tsk_peer_port(tsk);
3275 nest = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_CON);
3279 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
3281 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
3284 if (tsk->conn_type != 0) {
3285 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
3287 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
3289 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
3292 nla_nest_end(skb, nest);
3297 nla_nest_cancel(skb, nest);
3302 static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
3305 struct net *net = sock_net(skb->sk);
3306 struct sock *sk = &tsk->sk;
3308 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
3309 nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
3312 if (tipc_sk_connected(sk)) {
3313 if (__tipc_nl_add_sk_con(skb, tsk))
3315 } else if (!list_empty(&tsk->publications)) {
3316 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
3322 /* Caller should hold socket lock for the passed tipc socket. */
3323 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
3324 struct tipc_sock *tsk)
3326 struct nlattr *attrs;
3329 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3330 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
3334 attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
3336 goto genlmsg_cancel;
3338 if (__tipc_nl_add_sk_info(skb, tsk))
3339 goto attr_msg_cancel;
3341 nla_nest_end(skb, attrs);
3342 genlmsg_end(skb, hdr);
3347 nla_nest_cancel(skb, attrs);
3349 genlmsg_cancel(skb, hdr);
3354 int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
3355 int (*skb_handler)(struct sk_buff *skb,
3356 struct netlink_callback *cb,
3357 struct tipc_sock *tsk))
3359 struct rhashtable_iter *iter = (void *)cb->args[4];
3360 struct tipc_sock *tsk;
3363 rhashtable_walk_start(iter);
3364 while ((tsk = rhashtable_walk_next(iter)) != NULL) {
3367 if (err == -EAGAIN) {
3374 sock_hold(&tsk->sk);
3375 rhashtable_walk_stop(iter);
3376 lock_sock(&tsk->sk);
3377 err = skb_handler(skb, cb, tsk);
3379 release_sock(&tsk->sk);
3383 release_sock(&tsk->sk);
3384 rhashtable_walk_start(iter);
3387 rhashtable_walk_stop(iter);
3391 EXPORT_SYMBOL(tipc_nl_sk_walk);
3393 int tipc_dump_start(struct netlink_callback *cb)
3395 return __tipc_dump_start(cb, sock_net(cb->skb->sk));
3397 EXPORT_SYMBOL(tipc_dump_start);
3399 int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
3401 /* tipc_nl_name_table_dump() uses cb->args[0...3]. */
3402 struct rhashtable_iter *iter = (void *)cb->args[4];
3403 struct tipc_net *tn = tipc_net(net);
3406 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3410 cb->args[4] = (long)iter;
3413 rhashtable_walk_enter(&tn->sk_rht, iter);
3417 int tipc_dump_done(struct netlink_callback *cb)
3419 struct rhashtable_iter *hti = (void *)cb->args[4];
3421 rhashtable_walk_exit(hti);
3425 EXPORT_SYMBOL(tipc_dump_done);
3427 int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
3428 struct tipc_sock *tsk, u32 sk_filter_state,
3429 u64 (*tipc_diag_gen_cookie)(struct sock *sk))
3431 struct sock *sk = &tsk->sk;
3432 struct nlattr *attrs;
3433 struct nlattr *stat;
3435 /*filter response w.r.t sk_state*/
3436 if (!(sk_filter_state & (1 << sk->sk_state)))
3439 attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
3443 if (__tipc_nl_add_sk_info(skb, tsk))
3444 goto attr_msg_cancel;
3446 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
3447 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
3448 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
3449 nla_put_u32(skb, TIPC_NLA_SOCK_UID,
3450 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
3452 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
3453 tipc_diag_gen_cookie(sk),
3455 goto attr_msg_cancel;
3457 stat = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_STAT);
3459 goto attr_msg_cancel;
3461 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
3462 skb_queue_len(&sk->sk_receive_queue)) ||
3463 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
3464 skb_queue_len(&sk->sk_write_queue)) ||
3465 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
3466 atomic_read(&sk->sk_drops)))
3467 goto stat_msg_cancel;
3469 if (tsk->cong_link_cnt &&
3470 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
3471 goto stat_msg_cancel;
3473 if (tsk_conn_cong(tsk) &&
3474 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
3475 goto stat_msg_cancel;
3477 nla_nest_end(skb, stat);
3480 if (tipc_group_fill_sock_diag(tsk->group, skb))
3481 goto stat_msg_cancel;
3483 nla_nest_end(skb, attrs);
3488 nla_nest_cancel(skb, stat);
3490 nla_nest_cancel(skb, attrs);
3494 EXPORT_SYMBOL(tipc_sk_fill_sock_diag);
3496 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
3498 return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
3501 /* Caller should hold socket lock for the passed tipc socket. */
3502 static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
3503 struct netlink_callback *cb,
3504 struct publication *publ)
3507 struct nlattr *attrs;
3509 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3510 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
3514 attrs = nla_nest_start_noflag(skb, TIPC_NLA_PUBL);
3516 goto genlmsg_cancel;
3518 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
3519 goto attr_msg_cancel;
3520 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
3521 goto attr_msg_cancel;
3522 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
3523 goto attr_msg_cancel;
3524 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
3525 goto attr_msg_cancel;
3527 nla_nest_end(skb, attrs);
3528 genlmsg_end(skb, hdr);
3533 nla_nest_cancel(skb, attrs);
3535 genlmsg_cancel(skb, hdr);
3540 /* Caller should hold socket lock for the passed tipc socket. */
3541 static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
3542 struct netlink_callback *cb,
3543 struct tipc_sock *tsk, u32 *last_publ)
3546 struct publication *p;
3549 list_for_each_entry(p, &tsk->publications, binding_sock) {
3550 if (p->key == *last_publ)
3553 if (p->key != *last_publ) {
3554 /* We never set seq or call nl_dump_check_consistent()
3555 * this means that setting prev_seq here will cause the
3556 * consistence check to fail in the netlink callback
3557 * handler. Resulting in the last NLMSG_DONE message
3558 * having the NLM_F_DUMP_INTR flag set.
3565 p = list_first_entry(&tsk->publications, struct publication,
3569 list_for_each_entry_from(p, &tsk->publications, binding_sock) {
3570 err = __tipc_nl_add_sk_publ(skb, cb, p);
3572 *last_publ = p->key;
3581 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
3584 u32 tsk_portid = cb->args[0];
3585 u32 last_publ = cb->args[1];
3586 u32 done = cb->args[2];
3587 struct net *net = sock_net(skb->sk);
3588 struct tipc_sock *tsk;
3591 struct nlattr **attrs;
3592 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
3594 err = tipc_nlmsg_parse(cb->nlh, &attrs);
3598 if (!attrs[TIPC_NLA_SOCK])
3601 err = nla_parse_nested_deprecated(sock, TIPC_NLA_SOCK_MAX,
3602 attrs[TIPC_NLA_SOCK],
3603 tipc_nl_sock_policy, NULL);
3607 if (!sock[TIPC_NLA_SOCK_REF])
3610 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
3616 tsk = tipc_sk_lookup(net, tsk_portid);
3620 lock_sock(&tsk->sk);
3621 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
3624 release_sock(&tsk->sk);
3627 cb->args[0] = tsk_portid;
3628 cb->args[1] = last_publ;
3635 * tipc_sk_filtering - check if a socket should be traced
3636 * @sk: the socket to be examined
3637 * @sysctl_tipc_sk_filter[]: the socket tuple for filtering,
3638 * (portid, sock type, name type, name lower, name upper)
3640 * Returns true if the socket meets the socket tuple data
3641 * (value 0 = 'any') or when there is no tuple set (all = 0),
3644 bool tipc_sk_filtering(struct sock *sk)
3646 struct tipc_sock *tsk;
3647 struct publication *p;
3648 u32 _port, _sktype, _type, _lower, _upper;
3649 u32 type = 0, lower = 0, upper = 0;
3656 _port = sysctl_tipc_sk_filter[0];
3657 _sktype = sysctl_tipc_sk_filter[1];
3658 _type = sysctl_tipc_sk_filter[2];
3659 _lower = sysctl_tipc_sk_filter[3];
3660 _upper = sysctl_tipc_sk_filter[4];
3662 if (!_port && !_sktype && !_type && !_lower && !_upper)
3666 return (_port == tsk->portid);
3668 if (_sktype && _sktype != sk->sk_type)
3671 if (tsk->published) {
3672 p = list_first_entry_or_null(&tsk->publications,
3673 struct publication, binding_sock);
3681 if (!tipc_sk_type_connectionless(sk)) {
3682 type = tsk->conn_type;
3683 lower = tsk->conn_instance;
3684 upper = tsk->conn_instance;
3687 if ((_type && _type != type) || (_lower && _lower != lower) ||
3688 (_upper && _upper != upper))
3694 u32 tipc_sock_get_portid(struct sock *sk)
3696 return (sk) ? (tipc_sk(sk))->portid : 0;
3700 * tipc_sk_overlimit1 - check if socket rx queue is about to be overloaded,
3701 * both the rcv and backlog queues are considered
3702 * @sk: tipc sk to be checked
3703 * @skb: tipc msg to be checked
3705 * Returns true if the socket rx queue allocation is > 90%, otherwise false
3708 bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb)
3710 atomic_t *dcnt = &tipc_sk(sk)->dupl_rcvcnt;
3711 unsigned int lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
3712 unsigned int qsize = sk->sk_backlog.len + sk_rmem_alloc_get(sk);
3714 return (qsize > lim * 90 / 100);
3718 * tipc_sk_overlimit2 - check if socket rx queue is about to be overloaded,
3719 * only the rcv queue is considered
3720 * @sk: tipc sk to be checked
3721 * @skb: tipc msg to be checked
3723 * Returns true if the socket rx queue allocation is > 90%, otherwise false
3726 bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb)
3728 unsigned int lim = rcvbuf_limit(sk, skb);
3729 unsigned int qsize = sk_rmem_alloc_get(sk);
3731 return (qsize > lim * 90 / 100);
3735 * tipc_sk_dump - dump TIPC socket
3736 * @sk: tipc sk to be dumped
3737 * @dqueues: bitmask to decide if any socket queue to be dumped?
3738 * - TIPC_DUMP_NONE: don't dump socket queues
3739 * - TIPC_DUMP_SK_SNDQ: dump socket send queue
3740 * - TIPC_DUMP_SK_RCVQ: dump socket rcv queue
3741 * - TIPC_DUMP_SK_BKLGQ: dump socket backlog queue
3742 * - TIPC_DUMP_ALL: dump all the socket queues above
3743 * @buf: returned buffer of dump data in format
3745 int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf)
3748 size_t sz = (dqueues) ? SK_LMAX : SK_LMIN;
3749 struct tipc_sock *tsk;
3750 struct publication *p;
3754 i += scnprintf(buf, sz, "sk data: (null)\n");
3759 tsk_connected = !tipc_sk_type_connectionless(sk);
3761 i += scnprintf(buf, sz, "sk data: %u", sk->sk_type);
3762 i += scnprintf(buf + i, sz - i, " %d", sk->sk_state);
3763 i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk));
3764 i += scnprintf(buf + i, sz - i, " %u", tsk->portid);
3765 i += scnprintf(buf + i, sz - i, " | %u", tsk_connected);
3766 if (tsk_connected) {
3767 i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk));
3768 i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk));
3769 i += scnprintf(buf + i, sz - i, " %u", tsk->conn_type);
3770 i += scnprintf(buf + i, sz - i, " %u", tsk->conn_instance);
3772 i += scnprintf(buf + i, sz - i, " | %u", tsk->published);
3773 if (tsk->published) {
3774 p = list_first_entry_or_null(&tsk->publications,
3775 struct publication, binding_sock);
3776 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->type : 0);
3777 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->lower : 0);
3778 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->upper : 0);
3780 i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win);
3781 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win);
3782 i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt);
3783 i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps);
3784 i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt);
3785 i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked);
3786 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked);
3787 i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt));
3788 i += scnprintf(buf + i, sz - i, " %u", sk->sk_shutdown);
3789 i += scnprintf(buf + i, sz - i, " | %d", sk_wmem_alloc_get(sk));
3790 i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
3791 i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
3792 i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
3793 i += scnprintf(buf + i, sz - i, " | %d\n", sk->sk_backlog.len);
3795 if (dqueues & TIPC_DUMP_SK_SNDQ) {
3796 i += scnprintf(buf + i, sz - i, "sk_write_queue: ");
3797 i += tipc_list_dump(&sk->sk_write_queue, false, buf + i);
3800 if (dqueues & TIPC_DUMP_SK_RCVQ) {
3801 i += scnprintf(buf + i, sz - i, "sk_receive_queue: ");
3802 i += tipc_list_dump(&sk->sk_receive_queue, false, buf + i);
3805 if (dqueues & TIPC_DUMP_SK_BKLGQ) {
3806 i += scnprintf(buf + i, sz - i, "sk_backlog:\n head ");
3807 i += tipc_skb_dump(sk->sk_backlog.head, false, buf + i);
3808 if (sk->sk_backlog.tail != sk->sk_backlog.head) {
3809 i += scnprintf(buf + i, sz - i, " tail ");
3810 i += tipc_skb_dump(sk->sk_backlog.tail, false,