2 * net/tipc/socket.c: TIPC socket API
4 * Copyright (c) 2001-2007, 2012-2015, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/rhashtable.h>
39 #include "name_table.h"
42 #include "name_distr.h"
47 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
48 #define CONN_PROBING_INTERVAL msecs_to_jiffies(3600000) /* [ms] => 1 h */
49 #define TIPC_FWD_MSG 1
50 #define TIPC_MAX_PORT 0xffffffff
51 #define TIPC_MIN_PORT 1
54 TIPC_LISTEN = TCP_LISTEN,
55 TIPC_ESTABLISHED = TCP_ESTABLISHED,
56 TIPC_OPEN = TCP_CLOSE,
57 TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
61 * struct tipc_sock - TIPC socket structure
62 * @sk: socket - interacts with 'port' and with user via the socket API
63 * @conn_type: TIPC type used when connection was established
64 * @conn_instance: TIPC instance used when connection was established
65 * @published: non-zero if port has one or more associated names
66 * @max_pkt: maximum packet size "hint" used when building messages sent by port
67 * @portid: unique port identity in TIPC socket hash table
68 * @phdr: preformatted message header used when sending messages
69 * @publications: list of publications for port
70 * @pub_count: total # of publications port has made during its lifetime
72 * @conn_timeout: the time we can wait for an unresponded setup request
73 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
74 * @link_cong: non-zero if owner must sleep because of link congestion
75 * @sent_unacked: # messages sent by socket, and not yet acked by peer
76 * @rcv_unacked: # messages read by user, but not yet acked back to peer
77 * @peer: 'connected' peer for dgram/rdm
78 * @node: hash table node
79 * @rcu: rcu struct for tipc_sock
89 struct list_head sock_list;
90 struct list_head publications;
101 struct sockaddr_tipc peer;
102 struct rhash_head node;
106 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
107 static void tipc_data_ready(struct sock *sk);
108 static void tipc_write_space(struct sock *sk);
109 static void tipc_sock_destruct(struct sock *sk);
110 static int tipc_release(struct socket *sock);
111 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
112 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
113 static void tipc_sk_timeout(unsigned long data);
114 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
115 struct tipc_name_seq const *seq);
116 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
117 struct tipc_name_seq const *seq);
118 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
119 static int tipc_sk_insert(struct tipc_sock *tsk);
120 static void tipc_sk_remove(struct tipc_sock *tsk);
121 static int __tipc_send_stream(struct socket *sock, struct msghdr *m,
123 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
125 static const struct proto_ops packet_ops;
126 static const struct proto_ops stream_ops;
127 static const struct proto_ops msg_ops;
128 static struct proto tipc_proto;
130 static const struct rhashtable_params tsk_rht_params;
133 * Revised TIPC socket locking policy:
135 * Most socket operations take the standard socket lock when they start
136 * and hold it until they finish (or until they need to sleep). Acquiring
137 * this lock grants the owner exclusive access to the fields of the socket
138 * data structures, with the exception of the backlog queue. A few socket
139 * operations can be done without taking the socket lock because they only
140 * read socket information that never changes during the life of the socket.
142 * Socket operations may acquire the lock for the associated TIPC port if they
143 * need to perform an operation on the port. If any routine needs to acquire
144 * both the socket lock and the port lock it must take the socket lock first
145 * to avoid the risk of deadlock.
147 * The dispatcher handling incoming messages cannot grab the socket lock in
148 * the standard fashion, since invoked it runs at the BH level and cannot block.
149 * Instead, it checks to see if the socket lock is currently owned by someone,
150 * and either handles the message itself or adds it to the socket's backlog
151 * queue; in the latter case the queued message is processed once the process
152 * owning the socket lock releases it.
154 * NOTE: Releasing the socket lock while an operation is sleeping overcomes
155 * the problem of a blocked socket operation preventing any other operations
156 * from occurring. However, applications must be careful if they have
157 * multiple threads trying to send (or receive) on the same socket, as these
158 * operations might interfere with each other. For example, doing a connect
159 * and a receive at the same time might allow the receive to consume the
160 * ACK message meant for the connect. While additional work could be done
161 * to try and overcome this, it doesn't seem to be worthwhile at the present.
163 * NOTE: Releasing the socket lock while an operation is sleeping also ensures
164 * that another operation that must be performed in a non-blocking manner is
165 * not delayed for very long because the lock has already been taken.
167 * NOTE: This code assumes that certain fields of a port/socket pair are
168 * constant over its lifetime; such fields can be examined without taking
169 * the socket lock and/or port lock, and do not need to be re-read even
170 * after resuming processing after waiting. These fields include:
172 * - pointer to socket sk structure (aka tipc_sock structure)
173 * - pointer to port structure
177 static u32 tsk_own_node(struct tipc_sock *tsk)
179 return msg_prevnode(&tsk->phdr);
182 static u32 tsk_peer_node(struct tipc_sock *tsk)
184 return msg_destnode(&tsk->phdr);
187 static u32 tsk_peer_port(struct tipc_sock *tsk)
189 return msg_destport(&tsk->phdr);
192 static bool tsk_unreliable(struct tipc_sock *tsk)
194 return msg_src_droppable(&tsk->phdr) != 0;
197 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
199 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
202 static bool tsk_unreturnable(struct tipc_sock *tsk)
204 return msg_dest_droppable(&tsk->phdr) != 0;
207 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
209 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
212 static int tsk_importance(struct tipc_sock *tsk)
214 return msg_importance(&tsk->phdr);
217 static int tsk_set_importance(struct tipc_sock *tsk, int imp)
219 if (imp > TIPC_CRITICAL_IMPORTANCE)
221 msg_set_importance(&tsk->phdr, (u32)imp);
225 static struct tipc_sock *tipc_sk(const struct sock *sk)
227 return container_of(sk, struct tipc_sock, sk);
230 static bool tsk_conn_cong(struct tipc_sock *tsk)
232 return tsk->snt_unacked >= tsk->snd_win;
235 /* tsk_blocks(): translate a buffer size in bytes to number of
236 * advertisable blocks, taking into account the ratio truesize(len)/len
237 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
239 static u16 tsk_adv_blocks(int len)
241 return len / FLOWCTL_BLK_SZ / 4;
244 /* tsk_inc(): increment counter for sent or received data
245 * - If block based flow control is not supported by peer we
246 * fall back to message based ditto, incrementing the counter
248 static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
250 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
251 return ((msglen / FLOWCTL_BLK_SZ) + 1);
256 * tsk_advance_rx_queue - discard first buffer in socket receive queue
258 * Caller must hold socket lock
260 static void tsk_advance_rx_queue(struct sock *sk)
262 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
265 /* tipc_sk_respond() : send response message back to sender
267 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
271 u32 onode = tipc_own_addr(sock_net(sk));
273 if (!tipc_msg_reverse(onode, &skb, err))
276 dnode = msg_destnode(buf_msg(skb));
277 selector = msg_origport(buf_msg(skb));
278 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
282 * tsk_rej_rx_queue - reject all buffers in socket receive queue
284 * Caller must hold socket lock
286 static void tsk_rej_rx_queue(struct sock *sk)
290 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
291 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
294 static bool tipc_sk_connected(struct sock *sk)
296 return sk->sk_socket->state == SS_CONNECTED;
299 /* tipc_sk_type_connectionless - check if the socket is datagram socket
302 * Returns true if connection less, false otherwise
304 static bool tipc_sk_type_connectionless(struct sock *sk)
306 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
309 /* tsk_peer_msg - verify if message was sent by connected port's peer
311 * Handles cases where the node's network address has changed from
312 * the default of <0.0.0> to its configured setting.
314 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
316 struct sock *sk = &tsk->sk;
317 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
318 u32 peer_port = tsk_peer_port(tsk);
322 if (unlikely(!tipc_sk_connected(sk)))
325 if (unlikely(msg_origport(msg) != peer_port))
328 orig_node = msg_orignode(msg);
329 peer_node = tsk_peer_node(tsk);
331 if (likely(orig_node == peer_node))
334 if (!orig_node && (peer_node == tn->own_addr))
337 if (!peer_node && (orig_node == tn->own_addr))
343 /* tipc_set_sk_state - set the sk_state of the socket
346 * Caller must hold socket lock
348 * Returns 0 on success, errno otherwise
350 static int tipc_set_sk_state(struct sock *sk, int state)
352 int oldstate = sk->sk_socket->state;
353 int oldsk_state = sk->sk_state;
361 if (oldsk_state == TIPC_OPEN)
364 case TIPC_ESTABLISHED:
365 if (oldstate == SS_CONNECTING ||
366 oldsk_state == TIPC_OPEN)
369 case TIPC_DISCONNECTING:
370 if (oldstate == SS_CONNECTING ||
371 oldsk_state == TIPC_ESTABLISHED)
377 sk->sk_state = state;
383 * tipc_sk_create - create a TIPC socket
384 * @net: network namespace (must be default network)
385 * @sock: pre-allocated socket structure
386 * @protocol: protocol indicator (must be 0)
387 * @kern: caused by kernel or by userspace?
389 * This routine creates additional data structures used by the TIPC socket,
390 * initializes them, and links them together.
392 * Returns 0 on success, errno otherwise
394 static int tipc_sk_create(struct net *net, struct socket *sock,
395 int protocol, int kern)
398 const struct proto_ops *ops;
400 struct tipc_sock *tsk;
401 struct tipc_msg *msg;
403 /* Validate arguments */
404 if (unlikely(protocol != 0))
405 return -EPROTONOSUPPORT;
407 switch (sock->type) {
422 /* Allocate socket's protocol area */
423 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
428 tsk->max_pkt = MAX_PKT_DEFAULT;
429 INIT_LIST_HEAD(&tsk->publications);
431 tn = net_generic(sock_net(sk), tipc_net_id);
432 tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
435 /* Finish initializing socket data structures */
437 sock_init_data(sock, sk);
438 tipc_set_sk_state(sk, TIPC_OPEN);
439 if (tipc_sk_insert(tsk)) {
440 pr_warn("Socket create failed; port number exhausted\n");
443 msg_set_origport(msg, tsk->portid);
444 setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
446 sk->sk_backlog_rcv = tipc_backlog_rcv;
447 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
448 sk->sk_data_ready = tipc_data_ready;
449 sk->sk_write_space = tipc_write_space;
450 sk->sk_destruct = tipc_sock_destruct;
451 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
452 atomic_set(&tsk->dupl_rcvcnt, 0);
454 /* Start out with safe limits until we receive an advertised window */
455 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
456 tsk->rcv_win = tsk->snd_win;
458 if (tipc_sk_type_connectionless(sk)) {
459 tsk_set_unreturnable(tsk, true);
460 if (sock->type == SOCK_DGRAM)
461 tsk_set_unreliable(tsk, true);
467 static void tipc_sk_callback(struct rcu_head *head)
469 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
474 /* Caller should hold socket lock for the socket. */
475 static void __tipc_shutdown(struct socket *sock, int error)
477 struct sock *sk = sock->sk;
478 struct tipc_sock *tsk = tipc_sk(sk);
479 struct net *net = sock_net(sk);
480 u32 dnode = tsk_peer_node(tsk);
483 /* Reject all unreceived messages, except on an active connection
484 * (which disconnects locally & sends a 'FIN+' to peer).
486 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
487 if (TIPC_SKB_CB(skb)->bytes_read) {
490 if (!tipc_sk_type_connectionless(sk) &&
491 sk->sk_state != TIPC_DISCONNECTING) {
492 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
493 tipc_node_remove_conn(net, dnode, tsk->portid);
495 tipc_sk_respond(sk, skb, error);
498 if (sk->sk_state != TIPC_DISCONNECTING) {
499 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
500 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
501 tsk_own_node(tsk), tsk_peer_port(tsk),
504 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
505 if (!tipc_sk_type_connectionless(sk)) {
506 tipc_node_remove_conn(net, dnode, tsk->portid);
507 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
513 * tipc_release - destroy a TIPC socket
514 * @sock: socket to destroy
516 * This routine cleans up any messages that are still queued on the socket.
517 * For DGRAM and RDM socket types, all queued messages are rejected.
518 * For SEQPACKET and STREAM socket types, the first message is rejected
519 * and any others are discarded. (If the first message on a STREAM socket
520 * is partially-read, it is discarded and the next one is rejected instead.)
522 * NOTE: Rejected messages are not necessarily returned to the sender! They
523 * are returned or discarded according to the "destination droppable" setting
524 * specified for the message by the sender.
526 * Returns 0 on success, errno otherwise
528 static int tipc_release(struct socket *sock)
530 struct sock *sk = sock->sk;
531 struct tipc_sock *tsk;
534 * Exit if socket isn't fully initialized (occurs when a failed accept()
535 * releases a pre-allocated child socket that was never used)
543 __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
544 sk->sk_shutdown = SHUTDOWN_MASK;
545 tipc_sk_withdraw(tsk, 0, NULL);
546 sk_stop_timer(sk, &sk->sk_timer);
549 /* Reject any messages that accumulated in backlog queue */
552 call_rcu(&tsk->rcu, tipc_sk_callback);
559 * tipc_bind - associate or disassocate TIPC name(s) with a socket
560 * @sock: socket structure
561 * @uaddr: socket address describing name(s) and desired operation
562 * @uaddr_len: size of socket address data structure
564 * Name and name sequence binding is indicated using a positive scope value;
565 * a negative scope value unbinds the specified name. Specifying no name
566 * (i.e. a socket address length of 0) unbinds all names from the socket.
568 * Returns 0 on success, errno otherwise
570 * NOTE: This routine doesn't need to take the socket lock since it doesn't
571 * access any non-constant socket information.
573 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
576 struct sock *sk = sock->sk;
577 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
578 struct tipc_sock *tsk = tipc_sk(sk);
582 if (unlikely(!uaddr_len)) {
583 res = tipc_sk_withdraw(tsk, 0, NULL);
587 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
591 if (addr->family != AF_TIPC) {
596 if (addr->addrtype == TIPC_ADDR_NAME)
597 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
598 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
603 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
604 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
605 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
610 res = (addr->scope > 0) ?
611 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
612 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
619 * tipc_getname - get port ID of socket or peer socket
620 * @sock: socket structure
621 * @uaddr: area for returned socket address
622 * @uaddr_len: area for returned length of socket address
623 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
625 * Returns 0 on success, errno otherwise
627 * NOTE: This routine doesn't need to take the socket lock since it only
628 * accesses socket information that is unchanging (or which changes in
629 * a completely predictable manner).
631 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
632 int *uaddr_len, int peer)
634 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
635 struct sock *sk = sock->sk;
636 struct tipc_sock *tsk = tipc_sk(sk);
637 struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);
639 memset(addr, 0, sizeof(*addr));
641 if ((sock->state != SS_CONNECTED) &&
642 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
644 addr->addr.id.ref = tsk_peer_port(tsk);
645 addr->addr.id.node = tsk_peer_node(tsk);
647 addr->addr.id.ref = tsk->portid;
648 addr->addr.id.node = tn->own_addr;
651 *uaddr_len = sizeof(*addr);
652 addr->addrtype = TIPC_ADDR_ID;
653 addr->family = AF_TIPC;
655 addr->addr.name.domain = 0;
661 * tipc_poll - read and possibly block on pollmask
662 * @file: file structure associated with the socket
663 * @sock: socket for which to calculate the poll bits
666 * Returns pollmask value
669 * It appears that the usual socket locking mechanisms are not useful here
670 * since the pollmask info is potentially out-of-date the moment this routine
671 * exits. TCP and other protocols seem to rely on higher level poll routines
672 * to handle any preventable race conditions, so TIPC will do the same ...
674 * IMPORTANT: The fact that a read or write operation is indicated does NOT
675 * imply that the operation will succeed, merely that it should be performed
676 * and will not block.
678 static unsigned int tipc_poll(struct file *file, struct socket *sock,
681 struct sock *sk = sock->sk;
682 struct tipc_sock *tsk = tipc_sk(sk);
685 sock_poll_wait(file, sk_sleep(sk), wait);
687 if (sk->sk_shutdown & RCV_SHUTDOWN)
688 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
689 if (sk->sk_shutdown == SHUTDOWN_MASK)
692 switch ((int)sock->state) {
694 if (!tsk->link_cong && !tsk_conn_cong(tsk))
698 if (!skb_queue_empty(&sk->sk_receive_queue))
699 mask |= (POLLIN | POLLRDNORM);
702 switch (sk->sk_state) {
706 if (tipc_sk_type_connectionless(sk) &&
707 (!skb_queue_empty(&sk->sk_receive_queue)))
708 mask |= (POLLIN | POLLRDNORM);
710 case TIPC_DISCONNECTING:
711 mask = (POLLIN | POLLRDNORM | POLLHUP);
714 if (!skb_queue_empty(&sk->sk_receive_queue))
715 mask |= (POLLIN | POLLRDNORM);
724 * tipc_sendmcast - send multicast message
725 * @sock: socket structure
726 * @seq: destination address
727 * @msg: message to send
728 * @dsz: total length of message data
729 * @timeo: timeout to wait for wakeup
731 * Called from function tipc_sendmsg(), which has done all sanity checks
732 * Returns the number of bytes sent on success, or errno
734 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
735 struct msghdr *msg, size_t dsz, long timeo)
737 struct sock *sk = sock->sk;
738 struct tipc_sock *tsk = tipc_sk(sk);
739 struct net *net = sock_net(sk);
740 struct tipc_msg *mhdr = &tsk->phdr;
741 struct sk_buff_head pktchain;
742 struct iov_iter save = msg->msg_iter;
746 if (!timeo && tsk->link_cong)
749 msg_set_type(mhdr, TIPC_MCAST_MSG);
750 msg_set_lookup_scope(mhdr, TIPC_CLUSTER_SCOPE);
751 msg_set_destport(mhdr, 0);
752 msg_set_destnode(mhdr, 0);
753 msg_set_nametype(mhdr, seq->type);
754 msg_set_namelower(mhdr, seq->lower);
755 msg_set_nameupper(mhdr, seq->upper);
756 msg_set_hdr_sz(mhdr, MCAST_H_SIZE);
758 skb_queue_head_init(&pktchain);
761 mtu = tipc_bcast_get_mtu(net);
762 rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &pktchain);
763 if (unlikely(rc < 0))
767 rc = tipc_bcast_xmit(net, &pktchain);
771 if (rc == -ELINKCONG) {
773 rc = tipc_wait_for_sndmsg(sock, &timeo);
777 __skb_queue_purge(&pktchain);
778 if (rc == -EMSGSIZE) {
779 msg->msg_iter = save;
788 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
789 * @arrvq: queue with arriving messages, to be cloned after destination lookup
790 * @inputq: queue with cloned messages, delivered to socket after dest lookup
792 * Multi-threaded: parallel calls with reference to same queues may occur
794 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
795 struct sk_buff_head *inputq)
797 struct tipc_msg *msg;
798 struct tipc_plist dports;
800 u32 scope = TIPC_CLUSTER_SCOPE;
801 struct sk_buff_head tmpq;
803 struct sk_buff *skb, *_skb;
805 __skb_queue_head_init(&tmpq);
806 tipc_plist_init(&dports);
808 skb = tipc_skb_peek(arrvq, &inputq->lock);
809 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
811 hsz = skb_headroom(skb) + msg_hdr_sz(msg);
813 if (in_own_node(net, msg_orignode(msg)))
814 scope = TIPC_NODE_SCOPE;
816 /* Create destination port list and message clones: */
817 tipc_nametbl_mc_translate(net,
818 msg_nametype(msg), msg_namelower(msg),
819 msg_nameupper(msg), scope, &dports);
820 portid = tipc_plist_pop(&dports);
821 for (; portid; portid = tipc_plist_pop(&dports)) {
822 _skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
824 msg_set_destport(buf_msg(_skb), portid);
825 __skb_queue_tail(&tmpq, _skb);
828 pr_warn("Failed to clone mcast rcv buffer\n");
830 /* Append to inputq if not already done by other thread */
831 spin_lock_bh(&inputq->lock);
832 if (skb_peek(arrvq) == skb) {
833 skb_queue_splice_tail_init(&tmpq, inputq);
834 kfree_skb(__skb_dequeue(arrvq));
836 spin_unlock_bh(&inputq->lock);
837 __skb_queue_purge(&tmpq);
840 tipc_sk_rcv(net, inputq);
844 * tipc_sk_proto_rcv - receive a connection mng protocol message
845 * @tsk: receiving socket
846 * @skb: pointer to message buffer.
848 static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
849 struct sk_buff_head *xmitq)
851 struct sock *sk = &tsk->sk;
852 u32 onode = tsk_own_node(tsk);
853 struct tipc_msg *hdr = buf_msg(skb);
854 int mtyp = msg_type(hdr);
857 /* Ignore if connection cannot be validated: */
858 if (!tsk_peer_msg(tsk, hdr))
861 tsk->probe_unacked = false;
863 if (mtyp == CONN_PROBE) {
864 msg_set_type(hdr, CONN_PROBE_REPLY);
865 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
866 __skb_queue_tail(xmitq, skb);
868 } else if (mtyp == CONN_ACK) {
869 conn_cong = tsk_conn_cong(tsk);
870 tsk->snt_unacked -= msg_conn_ack(hdr);
871 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
872 tsk->snd_win = msg_adv_win(hdr);
874 sk->sk_write_space(sk);
875 } else if (mtyp != CONN_PROBE_REPLY) {
876 pr_warn("Received unknown CONN_PROTO msg\n");
882 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
884 struct sock *sk = sock->sk;
885 struct tipc_sock *tsk = tipc_sk(sk);
890 int err = sock_error(sk);
893 if (sk->sk_shutdown & SEND_SHUTDOWN)
897 if (signal_pending(current))
898 return sock_intr_errno(*timeo_p);
900 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
901 done = sk_wait_event(sk, timeo_p, !tsk->link_cong);
902 finish_wait(sk_sleep(sk), &wait);
908 * tipc_sendmsg - send message in connectionless manner
909 * @sock: socket structure
910 * @m: message to send
911 * @dsz: amount of user data to be sent
913 * Message must have an destination specified explicitly.
914 * Used for SOCK_RDM and SOCK_DGRAM messages,
915 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
916 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
918 * Returns the number of bytes sent on success, or errno otherwise
920 static int tipc_sendmsg(struct socket *sock,
921 struct msghdr *m, size_t dsz)
923 struct sock *sk = sock->sk;
927 ret = __tipc_sendmsg(sock, m, dsz);
933 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz)
935 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
936 struct sock *sk = sock->sk;
937 struct tipc_sock *tsk = tipc_sk(sk);
938 struct net *net = sock_net(sk);
939 struct tipc_msg *mhdr = &tsk->phdr;
941 struct sk_buff_head pktchain;
942 bool is_connectionless = tipc_sk_type_connectionless(sk);
944 struct tipc_name_seq *seq;
945 struct iov_iter save;
950 if (dsz > TIPC_MAX_USER_MSG_SIZE)
952 if (unlikely(!dest)) {
953 if (is_connectionless && tsk->peer.family == AF_TIPC)
956 return -EDESTADDRREQ;
957 } else if (unlikely(m->msg_namelen < sizeof(*dest)) ||
958 dest->family != AF_TIPC) {
961 if (!is_connectionless) {
962 if (sk->sk_state == TIPC_LISTEN)
964 if (sk->sk_state != TIPC_OPEN)
968 if (dest->addrtype == TIPC_ADDR_NAME) {
969 tsk->conn_type = dest->addr.name.name.type;
970 tsk->conn_instance = dest->addr.name.name.instance;
973 seq = &dest->addr.nameseq;
974 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
976 if (dest->addrtype == TIPC_ADDR_MCAST) {
977 return tipc_sendmcast(sock, seq, m, dsz, timeo);
978 } else if (dest->addrtype == TIPC_ADDR_NAME) {
979 u32 type = dest->addr.name.name.type;
980 u32 inst = dest->addr.name.name.instance;
981 u32 domain = dest->addr.name.domain;
984 msg_set_type(mhdr, TIPC_NAMED_MSG);
985 msg_set_hdr_sz(mhdr, NAMED_H_SIZE);
986 msg_set_nametype(mhdr, type);
987 msg_set_nameinst(mhdr, inst);
988 msg_set_lookup_scope(mhdr, tipc_addr_scope(domain));
989 dport = tipc_nametbl_translate(net, type, inst, &dnode);
990 msg_set_destnode(mhdr, dnode);
991 msg_set_destport(mhdr, dport);
992 if (unlikely(!dport && !dnode))
993 return -EHOSTUNREACH;
994 } else if (dest->addrtype == TIPC_ADDR_ID) {
995 dnode = dest->addr.id.node;
996 msg_set_type(mhdr, TIPC_DIRECT_MSG);
997 msg_set_lookup_scope(mhdr, 0);
998 msg_set_destnode(mhdr, dnode);
999 msg_set_destport(mhdr, dest->addr.id.ref);
1000 msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
1003 skb_queue_head_init(&pktchain);
1006 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
1007 rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &pktchain);
1012 skb = skb_peek(&pktchain);
1013 TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
1014 rc = tipc_node_xmit(net, &pktchain, dnode, tsk->portid);
1016 if (!is_connectionless)
1017 sock->state = SS_CONNECTING;
1020 if (rc == -ELINKCONG) {
1022 rc = tipc_wait_for_sndmsg(sock, &timeo);
1026 __skb_queue_purge(&pktchain);
1027 if (rc == -EMSGSIZE) {
1037 static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
1039 struct sock *sk = sock->sk;
1040 struct tipc_sock *tsk = tipc_sk(sk);
1045 int err = sock_error(sk);
1048 if (sk->sk_state == TIPC_DISCONNECTING)
1050 else if (sock->state != SS_CONNECTED)
1054 if (signal_pending(current))
1055 return sock_intr_errno(*timeo_p);
1057 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1058 done = sk_wait_event(sk, timeo_p,
1060 !tsk_conn_cong(tsk)) ||
1061 !tipc_sk_connected(sk));
1062 finish_wait(sk_sleep(sk), &wait);
1068 * tipc_send_stream - send stream-oriented data
1069 * @sock: socket structure
1071 * @dsz: total length of data to be transmitted
1073 * Used for SOCK_STREAM data.
1075 * Returns the number of bytes sent on success (or partial success),
1076 * or errno if no data sent
1078 static int tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
1080 struct sock *sk = sock->sk;
1084 ret = __tipc_send_stream(sock, m, dsz);
1090 static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
1092 struct sock *sk = sock->sk;
1093 struct net *net = sock_net(sk);
1094 struct tipc_sock *tsk = tipc_sk(sk);
1095 struct tipc_msg *mhdr = &tsk->phdr;
1096 struct sk_buff_head pktchain;
1097 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1098 u32 portid = tsk->portid;
1102 uint mtu, send, sent = 0;
1103 struct iov_iter save;
1104 int hlen = MIN_H_SIZE;
1106 /* Handle implied connection establishment */
1107 if (unlikely(dest)) {
1108 rc = __tipc_sendmsg(sock, m, dsz);
1109 hlen = msg_hdr_sz(mhdr);
1110 if (dsz && (dsz == rc))
1111 tsk->snt_unacked = tsk_inc(tsk, dsz + hlen);
1114 if (dsz > (uint)INT_MAX)
1117 if (unlikely(sock->state != SS_CONNECTED)) {
1118 if (sk->sk_state == TIPC_DISCONNECTING)
1124 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1125 if (!timeo && tsk->link_cong)
1128 dnode = tsk_peer_node(tsk);
1129 skb_queue_head_init(&pktchain);
1134 send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
1135 rc = tipc_msg_build(mhdr, m, sent, send, mtu, &pktchain);
1136 if (unlikely(rc < 0))
1140 if (likely(!tsk_conn_cong(tsk))) {
1141 rc = tipc_node_xmit(net, &pktchain, dnode, portid);
1143 tsk->snt_unacked += tsk_inc(tsk, send + hlen);
1149 if (rc == -EMSGSIZE) {
1150 __skb_queue_purge(&pktchain);
1151 tsk->max_pkt = tipc_node_get_mtu(net, dnode,
1156 if (rc != -ELINKCONG)
1161 rc = tipc_wait_for_sndpkt(sock, &timeo);
1164 __skb_queue_purge(&pktchain);
1165 return sent ? sent : rc;
1169 * tipc_send_packet - send a connection-oriented message
1170 * @sock: socket structure
1171 * @m: message to send
1172 * @dsz: length of data to be transmitted
1174 * Used for SOCK_SEQPACKET messages.
1176 * Returns the number of bytes sent on success, or errno otherwise
1178 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1180 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1183 return tipc_send_stream(sock, m, dsz);
1186 /* tipc_sk_finish_conn - complete the setup of a connection
1188 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1191 struct sock *sk = &tsk->sk;
1192 struct net *net = sock_net(sk);
1193 struct tipc_msg *msg = &tsk->phdr;
1195 msg_set_destnode(msg, peer_node);
1196 msg_set_destport(msg, peer_port);
1197 msg_set_type(msg, TIPC_CONN_MSG);
1198 msg_set_lookup_scope(msg, 0);
1199 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1201 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTERVAL);
1202 tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1203 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1204 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1205 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1206 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1209 /* Fall back to message based flow control */
1210 tsk->rcv_win = FLOWCTL_MSG_WIN;
1211 tsk->snd_win = FLOWCTL_MSG_WIN;
1215 * set_orig_addr - capture sender's address for received message
1216 * @m: descriptor for message info
1217 * @msg: received message header
1219 * Note: Address is not captured if not requested by receiver.
1221 static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
1223 DECLARE_SOCKADDR(struct sockaddr_tipc *, addr, m->msg_name);
1226 addr->family = AF_TIPC;
1227 addr->addrtype = TIPC_ADDR_ID;
1228 memset(&addr->addr, 0, sizeof(addr->addr));
1229 addr->addr.id.ref = msg_origport(msg);
1230 addr->addr.id.node = msg_orignode(msg);
1231 addr->addr.name.domain = 0; /* could leave uninitialized */
1232 addr->scope = 0; /* could leave uninitialized */
1233 m->msg_namelen = sizeof(struct sockaddr_tipc);
1238 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1239 * @m: descriptor for message info
1240 * @msg: received message header
1241 * @tsk: TIPC port associated with message
1243 * Note: Ancillary data is not captured if not requested by receiver.
1245 * Returns 0 if successful, otherwise errno
1247 static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1248 struct tipc_sock *tsk)
1256 if (likely(m->msg_controllen == 0))
1259 /* Optionally capture errored message object(s) */
1260 err = msg ? msg_errcode(msg) : 0;
1261 if (unlikely(err)) {
1263 anc_data[1] = msg_data_sz(msg);
1264 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1268 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1275 /* Optionally capture message destination object */
1276 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1277 switch (dest_type) {
1278 case TIPC_NAMED_MSG:
1280 anc_data[0] = msg_nametype(msg);
1281 anc_data[1] = msg_namelower(msg);
1282 anc_data[2] = msg_namelower(msg);
1284 case TIPC_MCAST_MSG:
1286 anc_data[0] = msg_nametype(msg);
1287 anc_data[1] = msg_namelower(msg);
1288 anc_data[2] = msg_nameupper(msg);
1291 has_name = (tsk->conn_type != 0);
1292 anc_data[0] = tsk->conn_type;
1293 anc_data[1] = tsk->conn_instance;
1294 anc_data[2] = tsk->conn_instance;
1300 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1308 static void tipc_sk_send_ack(struct tipc_sock *tsk)
1310 struct sock *sk = &tsk->sk;
1311 struct net *net = sock_net(sk);
1312 struct sk_buff *skb = NULL;
1313 struct tipc_msg *msg;
1314 u32 peer_port = tsk_peer_port(tsk);
1315 u32 dnode = tsk_peer_node(tsk);
1317 if (!tipc_sk_connected(sk))
1319 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1320 dnode, tsk_own_node(tsk), peer_port,
1321 tsk->portid, TIPC_OK);
1325 msg_set_conn_ack(msg, tsk->rcv_unacked);
1326 tsk->rcv_unacked = 0;
1328 /* Adjust to and advertize the correct window limit */
1329 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1330 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1331 msg_set_adv_win(msg, tsk->rcv_win);
1333 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1336 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1338 struct sock *sk = sock->sk;
1340 long timeo = *timeop;
1344 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1345 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1346 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1351 timeo = schedule_timeout(timeo);
1355 if (!skb_queue_empty(&sk->sk_receive_queue))
1360 err = sock_intr_errno(timeo);
1361 if (signal_pending(current))
1364 finish_wait(sk_sleep(sk), &wait);
1370 * tipc_recvmsg - receive packet-oriented message
1371 * @m: descriptor for message info
1372 * @buf_len: total size of user buffer area
1373 * @flags: receive flags
1375 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1376 * If the complete message doesn't fit in user area, truncate it.
1378 * Returns size of returned message data, errno otherwise
1380 static int tipc_recvmsg(struct socket *sock, struct msghdr *m, size_t buf_len,
1383 struct sock *sk = sock->sk;
1384 struct tipc_sock *tsk = tipc_sk(sk);
1385 struct sk_buff *buf;
1386 struct tipc_msg *msg;
1387 bool is_connectionless = tipc_sk_type_connectionless(sk);
1393 /* Catch invalid receive requests */
1394 if (unlikely(!buf_len))
1399 if (!is_connectionless && unlikely(sk->sk_state == TIPC_OPEN)) {
1404 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1407 /* Look for a message in receive queue; wait if necessary */
1408 res = tipc_wait_for_rcvmsg(sock, &timeo);
1412 /* Look at first message in receive queue */
1413 buf = skb_peek(&sk->sk_receive_queue);
1415 sz = msg_data_sz(msg);
1416 hlen = msg_hdr_sz(msg);
1417 err = msg_errcode(msg);
1419 /* Discard an empty non-errored message & try again */
1420 if ((!sz) && (!err)) {
1421 tsk_advance_rx_queue(sk);
1425 /* Capture sender's address (optional) */
1426 set_orig_addr(m, msg);
1428 /* Capture ancillary data (optional) */
1429 res = tipc_sk_anc_data_recv(m, msg, tsk);
1433 /* Capture message data (if valid) & compute return value (always) */
1435 if (unlikely(buf_len < sz)) {
1437 m->msg_flags |= MSG_TRUNC;
1439 res = skb_copy_datagram_msg(buf, hlen, m, sz);
1444 if (is_connectionless || err == TIPC_CONN_SHUTDOWN ||
1451 if (unlikely(flags & MSG_PEEK))
1454 if (likely(!is_connectionless)) {
1455 tsk->rcv_unacked += tsk_inc(tsk, hlen + sz);
1456 if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
1457 tipc_sk_send_ack(tsk);
1459 tsk_advance_rx_queue(sk);
1466 * tipc_recv_stream - receive stream-oriented data
1467 * @m: descriptor for message info
1468 * @buf_len: total size of user buffer area
1469 * @flags: receive flags
1471 * Used for SOCK_STREAM messages only. If not enough data is available
1472 * will optionally wait for more; never truncates data.
1474 * Returns size of returned message data, errno otherwise
1476 static int tipc_recv_stream(struct socket *sock, struct msghdr *m,
1477 size_t buf_len, int flags)
1479 struct sock *sk = sock->sk;
1480 struct tipc_sock *tsk = tipc_sk(sk);
1481 struct sk_buff *buf;
1482 struct tipc_msg *msg;
1490 /* Catch invalid receive attempts */
1491 if (unlikely(!buf_len))
1496 if (unlikely(sk->sk_state == TIPC_OPEN)) {
1501 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1502 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1505 /* Look for a message in receive queue; wait if necessary */
1506 res = tipc_wait_for_rcvmsg(sock, &timeo);
1510 /* Look at first message in receive queue */
1511 buf = skb_peek(&sk->sk_receive_queue);
1513 sz = msg_data_sz(msg);
1514 hlen = msg_hdr_sz(msg);
1515 err = msg_errcode(msg);
1517 /* Discard an empty non-errored message & try again */
1518 if ((!sz) && (!err)) {
1519 tsk_advance_rx_queue(sk);
1523 /* Optionally capture sender's address & ancillary data of first msg */
1524 if (sz_copied == 0) {
1525 set_orig_addr(m, msg);
1526 res = tipc_sk_anc_data_recv(m, msg, tsk);
1531 /* Capture message data (if valid) & compute return value (always) */
1533 u32 offset = TIPC_SKB_CB(buf)->bytes_read;
1538 needed = (buf_len - sz_copied);
1539 sz_to_copy = min(sz, needed);
1541 res = skb_copy_datagram_msg(buf, hlen + offset, m, sz_to_copy);
1545 sz_copied += sz_to_copy;
1547 if (sz_to_copy < sz) {
1548 if (!(flags & MSG_PEEK))
1549 TIPC_SKB_CB(buf)->bytes_read =
1550 offset + sz_to_copy;
1555 goto exit; /* can't add error msg to valid data */
1557 if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
1563 if (unlikely(flags & MSG_PEEK))
1566 tsk->rcv_unacked += tsk_inc(tsk, hlen + sz);
1567 if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
1568 tipc_sk_send_ack(tsk);
1569 tsk_advance_rx_queue(sk);
1571 /* Loop around if more data is required */
1572 if ((sz_copied < buf_len) && /* didn't get all requested data */
1573 (!skb_queue_empty(&sk->sk_receive_queue) ||
1574 (sz_copied < target)) && /* and more is ready or required */
1575 (!err)) /* and haven't reached a FIN */
1580 return sz_copied ? sz_copied : res;
1584 * tipc_write_space - wake up thread if port congestion is released
1587 static void tipc_write_space(struct sock *sk)
1589 struct socket_wq *wq;
1592 wq = rcu_dereference(sk->sk_wq);
1593 if (skwq_has_sleeper(wq))
1594 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1595 POLLWRNORM | POLLWRBAND);
1600 * tipc_data_ready - wake up threads to indicate messages have been received
1602 * @len: the length of messages
1604 static void tipc_data_ready(struct sock *sk)
1606 struct socket_wq *wq;
1609 wq = rcu_dereference(sk->sk_wq);
1610 if (skwq_has_sleeper(wq))
1611 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
1612 POLLRDNORM | POLLRDBAND);
1616 static void tipc_sock_destruct(struct sock *sk)
1618 __skb_queue_purge(&sk->sk_receive_queue);
1622 * filter_connect - Handle all incoming messages for a connection-based socket
1624 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
1626 * Returns true if everything ok, false otherwise
1628 static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
1630 struct sock *sk = &tsk->sk;
1631 struct net *net = sock_net(sk);
1632 struct socket *sock = sk->sk_socket;
1633 struct tipc_msg *hdr = buf_msg(skb);
1635 if (unlikely(msg_mcast(hdr)))
1638 switch ((int)sock->state) {
1641 /* Accept only connection-based messages sent by peer */
1642 if (unlikely(!tsk_peer_msg(tsk, hdr)))
1645 if (unlikely(msg_errcode(hdr))) {
1646 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1647 /* Let timer expire on it's own */
1648 tipc_node_remove_conn(net, tsk_peer_node(tsk),
1650 sk->sk_state_change(sk);
1656 /* Accept only ACK or NACK message */
1657 if (unlikely(!msg_connected(hdr)))
1660 if (unlikely(msg_errcode(hdr))) {
1661 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1662 sk->sk_err = ECONNREFUSED;
1666 if (unlikely(!msg_isdata(hdr))) {
1667 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1668 sk->sk_err = EINVAL;
1672 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
1673 msg_set_importance(&tsk->phdr, msg_importance(hdr));
1674 sock->state = SS_CONNECTED;
1676 /* If 'ACK+' message, add to socket receive queue */
1677 if (msg_data_sz(hdr))
1680 /* If empty 'ACK-' message, wake up sleeping connect() */
1681 if (waitqueue_active(sk_sleep(sk)))
1682 wake_up_interruptible(sk_sleep(sk));
1684 /* 'ACK-' message is neither accepted nor rejected: */
1685 msg_set_dest_droppable(hdr, 1);
1689 switch (sk->sk_state) {
1691 case TIPC_DISCONNECTING:
1694 /* Accept only SYN message */
1695 if (!msg_connected(hdr) && !(msg_errcode(hdr)))
1699 pr_err("Unknown sk_state %u\n", sk->sk_state);
1706 * rcvbuf_limit - get proper overload limit of socket receive queue
1710 * For connection oriented messages, irrespective of importance,
1711 * default queue limit is 2 MB.
1713 * For connectionless messages, queue limits are based on message
1714 * importance as follows:
1716 * TIPC_LOW_IMPORTANCE (2 MB)
1717 * TIPC_MEDIUM_IMPORTANCE (4 MB)
1718 * TIPC_HIGH_IMPORTANCE (8 MB)
1719 * TIPC_CRITICAL_IMPORTANCE (16 MB)
1721 * Returns overload limit according to corresponding message importance
1723 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
1725 struct tipc_sock *tsk = tipc_sk(sk);
1726 struct tipc_msg *hdr = buf_msg(skb);
1728 if (unlikely(!msg_connected(hdr)))
1729 return sk->sk_rcvbuf << msg_importance(hdr);
1731 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
1732 return sk->sk_rcvbuf;
1734 return FLOWCTL_MSG_LIM;
1738 * filter_rcv - validate incoming message
1740 * @skb: pointer to message.
1742 * Enqueues message on receive queue if acceptable; optionally handles
1743 * disconnect indication for a connected socket.
1745 * Called with socket lock already taken
1747 * Returns true if message was added to socket receive queue, otherwise false
1749 static bool filter_rcv(struct sock *sk, struct sk_buff *skb,
1750 struct sk_buff_head *xmitq)
1752 struct tipc_sock *tsk = tipc_sk(sk);
1753 struct tipc_msg *hdr = buf_msg(skb);
1754 unsigned int limit = rcvbuf_limit(sk, skb);
1756 int usr = msg_user(hdr);
1758 if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
1759 tipc_sk_proto_rcv(tsk, skb, xmitq);
1763 if (unlikely(usr == SOCK_WAKEUP)) {
1766 sk->sk_write_space(sk);
1770 /* Drop if illegal message type */
1771 if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG)) {
1776 /* Reject if wrong message type for current socket state */
1777 if (tipc_sk_type_connectionless(sk)) {
1778 if (msg_connected(hdr)) {
1779 err = TIPC_ERR_NO_PORT;
1782 } else if (unlikely(!filter_connect(tsk, skb))) {
1783 err = TIPC_ERR_NO_PORT;
1787 /* Reject message if there isn't room to queue it */
1788 if (unlikely(sk_rmem_alloc_get(sk) + skb->truesize >= limit)) {
1789 err = TIPC_ERR_OVERLOAD;
1793 /* Enqueue message */
1794 TIPC_SKB_CB(skb)->bytes_read = 0;
1795 __skb_queue_tail(&sk->sk_receive_queue, skb);
1796 skb_set_owner_r(skb, sk);
1798 sk->sk_data_ready(sk);
1802 if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err))
1803 __skb_queue_tail(xmitq, skb);
1808 * tipc_backlog_rcv - handle incoming message from backlog queue
1812 * Caller must hold socket lock
1816 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1818 unsigned int truesize = skb->truesize;
1819 struct sk_buff_head xmitq;
1820 u32 dnode, selector;
1822 __skb_queue_head_init(&xmitq);
1824 if (likely(filter_rcv(sk, skb, &xmitq))) {
1825 atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
1829 if (skb_queue_empty(&xmitq))
1832 /* Send response/rejected message */
1833 skb = __skb_dequeue(&xmitq);
1834 dnode = msg_destnode(buf_msg(skb));
1835 selector = msg_origport(buf_msg(skb));
1836 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
1841 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
1842 * inputq and try adding them to socket or backlog queue
1843 * @inputq: list of incoming buffers with potentially different destinations
1844 * @sk: socket where the buffers should be enqueued
1845 * @dport: port number for the socket
1847 * Caller must hold socket lock
1849 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
1850 u32 dport, struct sk_buff_head *xmitq)
1852 unsigned long time_limit = jiffies + 2;
1853 struct sk_buff *skb;
1858 while (skb_queue_len(inputq)) {
1859 if (unlikely(time_after_eq(jiffies, time_limit)))
1862 skb = tipc_skb_dequeue(inputq, dport);
1866 /* Add message directly to receive queue if possible */
1867 if (!sock_owned_by_user(sk)) {
1868 filter_rcv(sk, skb, xmitq);
1872 /* Try backlog, compensating for double-counted bytes */
1873 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
1874 if (!sk->sk_backlog.len)
1875 atomic_set(dcnt, 0);
1876 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
1877 if (likely(!sk_add_backlog(sk, skb, lim)))
1880 /* Overload => reject message back to sender */
1881 onode = tipc_own_addr(sock_net(sk));
1882 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
1883 __skb_queue_tail(xmitq, skb);
1889 * tipc_sk_rcv - handle a chain of incoming buffers
1890 * @inputq: buffer list containing the buffers
1891 * Consumes all buffers in list until inputq is empty
1892 * Note: may be called in multiple threads referring to the same queue
1894 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
1896 struct sk_buff_head xmitq;
1897 u32 dnode, dport = 0;
1899 struct tipc_sock *tsk;
1901 struct sk_buff *skb;
1903 __skb_queue_head_init(&xmitq);
1904 while (skb_queue_len(inputq)) {
1905 dport = tipc_skb_peek_port(inputq, dport);
1906 tsk = tipc_sk_lookup(net, dport);
1910 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
1911 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
1912 spin_unlock_bh(&sk->sk_lock.slock);
1914 /* Send pending response/rejected messages, if any */
1915 while ((skb = __skb_dequeue(&xmitq))) {
1916 dnode = msg_destnode(buf_msg(skb));
1917 tipc_node_xmit_skb(net, skb, dnode, dport);
1923 /* No destination socket => dequeue skb if still there */
1924 skb = tipc_skb_dequeue(inputq, dport);
1928 /* Try secondary lookup if unresolved named message */
1929 err = TIPC_ERR_NO_PORT;
1930 if (tipc_msg_lookup_dest(net, skb, &err))
1933 /* Prepare for message rejection */
1934 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
1937 dnode = msg_destnode(buf_msg(skb));
1938 tipc_node_xmit_skb(net, skb, dnode, dport);
1942 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
1944 struct sock *sk = sock->sk;
1949 int err = sock_error(sk);
1954 if (signal_pending(current))
1955 return sock_intr_errno(*timeo_p);
1957 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1958 done = sk_wait_event(sk, timeo_p, sock->state != SS_CONNECTING);
1959 finish_wait(sk_sleep(sk), &wait);
1965 * tipc_connect - establish a connection to another TIPC port
1966 * @sock: socket structure
1967 * @dest: socket address for destination port
1968 * @destlen: size of socket address data structure
1969 * @flags: file-related flags associated with socket
1971 * Returns 0 on success, errno otherwise
1973 static int tipc_connect(struct socket *sock, struct sockaddr *dest,
1974 int destlen, int flags)
1976 struct sock *sk = sock->sk;
1977 struct tipc_sock *tsk = tipc_sk(sk);
1978 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1979 struct msghdr m = {NULL,};
1980 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
1981 socket_state previous;
1986 /* DGRAM/RDM connect(), just save the destaddr */
1987 if (tipc_sk_type_connectionless(sk)) {
1988 if (dst->family == AF_UNSPEC) {
1989 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
1990 } else if (destlen != sizeof(struct sockaddr_tipc)) {
1993 memcpy(&tsk->peer, dest, destlen);
1999 * Reject connection attempt using multicast address
2001 * Note: send_msg() validates the rest of the address fields,
2002 * so there's no need to do it here
2004 if (dst->addrtype == TIPC_ADDR_MCAST) {
2009 previous = sock->state;
2011 switch (sk->sk_state) {
2013 /* Send a 'SYN-' to destination */
2015 m.msg_namelen = destlen;
2017 /* If connect is in non-blocking case, set MSG_DONTWAIT to
2018 * indicate send_msg() is never blocked.
2021 m.msg_flags = MSG_DONTWAIT;
2023 res = __tipc_sendmsg(sock, &m, 0);
2024 if ((res < 0) && (res != -EWOULDBLOCK))
2027 /* Just entered SS_CONNECTING state; the only
2028 * difference is that return value in non-blocking
2029 * case is EINPROGRESS, rather than EALREADY.
2035 switch (sock->state) {
2037 if (previous == SS_CONNECTING)
2041 timeout = msecs_to_jiffies(timeout);
2042 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
2043 res = tipc_wait_for_connect(sock, &timeout);
2058 * tipc_listen - allow socket to listen for incoming connections
2059 * @sock: socket structure
2062 * Returns 0 on success, errno otherwise
2064 static int tipc_listen(struct socket *sock, int len)
2066 struct sock *sk = sock->sk;
2070 res = tipc_set_sk_state(sk, TIPC_LISTEN);
2076 static int tipc_wait_for_accept(struct socket *sock, long timeo)
2078 struct sock *sk = sock->sk;
2082 /* True wake-one mechanism for incoming connections: only
2083 * one process gets woken up, not the 'whole herd'.
2084 * Since we do not 'race & poll' for established sockets
2085 * anymore, the common case will execute the loop only once.
2088 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
2089 TASK_INTERRUPTIBLE);
2090 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2092 timeo = schedule_timeout(timeo);
2096 if (!skb_queue_empty(&sk->sk_receive_queue))
2101 err = sock_intr_errno(timeo);
2102 if (signal_pending(current))
2105 finish_wait(sk_sleep(sk), &wait);
2110 * tipc_accept - wait for connection request
2111 * @sock: listening socket
2112 * @newsock: new socket that is to be connected
2113 * @flags: file-related flags associated with socket
2115 * Returns 0 on success, errno otherwise
2117 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
2119 struct sock *new_sk, *sk = sock->sk;
2120 struct sk_buff *buf;
2121 struct tipc_sock *new_tsock;
2122 struct tipc_msg *msg;
2128 if (sk->sk_state != TIPC_LISTEN) {
2132 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2133 res = tipc_wait_for_accept(sock, timeo);
2137 buf = skb_peek(&sk->sk_receive_queue);
2139 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 0);
2142 security_sk_clone(sock->sk, new_sock->sk);
2144 new_sk = new_sock->sk;
2145 new_tsock = tipc_sk(new_sk);
2148 /* we lock on new_sk; but lockdep sees the lock on sk */
2149 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2152 * Reject any stray messages received by new socket
2153 * before the socket lock was taken (very, very unlikely)
2155 tsk_rej_rx_queue(new_sk);
2157 /* Connect new socket to it's peer */
2158 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2159 new_sock->state = SS_CONNECTED;
2161 tsk_set_importance(new_tsock, msg_importance(msg));
2162 if (msg_named(msg)) {
2163 new_tsock->conn_type = msg_nametype(msg);
2164 new_tsock->conn_instance = msg_nameinst(msg);
2168 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2169 * Respond to 'SYN+' by queuing it on new socket.
2171 if (!msg_data_sz(msg)) {
2172 struct msghdr m = {NULL,};
2174 tsk_advance_rx_queue(sk);
2175 __tipc_send_stream(new_sock, &m, 0);
2177 __skb_dequeue(&sk->sk_receive_queue);
2178 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2179 skb_set_owner_r(buf, new_sk);
2181 release_sock(new_sk);
2188 * tipc_shutdown - shutdown socket connection
2189 * @sock: socket structure
2190 * @how: direction to close (must be SHUT_RDWR)
2192 * Terminates connection (if necessary), then purges socket's receive queue.
2194 * Returns 0 on success, errno otherwise
2196 static int tipc_shutdown(struct socket *sock, int how)
2198 struct sock *sk = sock->sk;
2201 if (how != SHUT_RDWR)
2206 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2207 sk->sk_shutdown = SEND_SHUTDOWN;
2209 if (sk->sk_state == TIPC_DISCONNECTING) {
2210 /* Discard any unreceived messages */
2211 __skb_queue_purge(&sk->sk_receive_queue);
2213 /* Wake up anyone sleeping in poll */
2214 sk->sk_state_change(sk);
2224 static void tipc_sk_timeout(unsigned long data)
2226 struct tipc_sock *tsk = (struct tipc_sock *)data;
2227 struct sock *sk = &tsk->sk;
2228 struct sk_buff *skb = NULL;
2229 u32 peer_port, peer_node;
2230 u32 own_node = tsk_own_node(tsk);
2233 if (!tipc_sk_connected(sk)) {
2237 peer_port = tsk_peer_port(tsk);
2238 peer_node = tsk_peer_node(tsk);
2240 if (tsk->probe_unacked) {
2241 if (!sock_owned_by_user(sk)) {
2242 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2243 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
2244 tsk_peer_port(tsk));
2245 sk->sk_state_change(sk);
2247 /* Try again later */
2248 sk_reset_timer(sk, &sk->sk_timer, (HZ / 20));
2255 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE,
2256 INT_H_SIZE, 0, peer_node, own_node,
2257 peer_port, tsk->portid, TIPC_OK);
2258 tsk->probe_unacked = true;
2259 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTERVAL);
2262 tipc_node_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
2267 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2268 struct tipc_name_seq const *seq)
2270 struct sock *sk = &tsk->sk;
2271 struct net *net = sock_net(sk);
2272 struct publication *publ;
2275 if (tipc_sk_connected(sk))
2277 key = tsk->portid + tsk->pub_count + 1;
2278 if (key == tsk->portid)
2281 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2282 scope, tsk->portid, key);
2283 if (unlikely(!publ))
2286 list_add(&publ->pport_list, &tsk->publications);
2292 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2293 struct tipc_name_seq const *seq)
2295 struct net *net = sock_net(&tsk->sk);
2296 struct publication *publ;
2297 struct publication *safe;
2300 list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) {
2302 if (publ->scope != scope)
2304 if (publ->type != seq->type)
2306 if (publ->lower != seq->lower)
2308 if (publ->upper != seq->upper)
2310 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2311 publ->ref, publ->key);
2315 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2316 publ->ref, publ->key);
2319 if (list_empty(&tsk->publications))
2324 /* tipc_sk_reinit: set non-zero address in all existing sockets
2325 * when we go from standalone to network mode.
2327 void tipc_sk_reinit(struct net *net)
2329 struct tipc_net *tn = net_generic(net, tipc_net_id);
2330 const struct bucket_table *tbl;
2331 struct rhash_head *pos;
2332 struct tipc_sock *tsk;
2333 struct tipc_msg *msg;
2337 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2338 for (i = 0; i < tbl->size; i++) {
2339 rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
2340 spin_lock_bh(&tsk->sk.sk_lock.slock);
2342 msg_set_prevnode(msg, tn->own_addr);
2343 msg_set_orignode(msg, tn->own_addr);
2344 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2350 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2352 struct tipc_net *tn = net_generic(net, tipc_net_id);
2353 struct tipc_sock *tsk;
2356 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2358 sock_hold(&tsk->sk);
2364 static int tipc_sk_insert(struct tipc_sock *tsk)
2366 struct sock *sk = &tsk->sk;
2367 struct net *net = sock_net(sk);
2368 struct tipc_net *tn = net_generic(net, tipc_net_id);
2369 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2370 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2372 while (remaining--) {
2374 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2375 portid = TIPC_MIN_PORT;
2376 tsk->portid = portid;
2377 sock_hold(&tsk->sk);
2378 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2387 static void tipc_sk_remove(struct tipc_sock *tsk)
2389 struct sock *sk = &tsk->sk;
2390 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2392 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2393 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
2398 static const struct rhashtable_params tsk_rht_params = {
2400 .head_offset = offsetof(struct tipc_sock, node),
2401 .key_offset = offsetof(struct tipc_sock, portid),
2402 .key_len = sizeof(u32), /* portid */
2403 .max_size = 1048576,
2405 .automatic_shrinking = true,
2408 int tipc_sk_rht_init(struct net *net)
2410 struct tipc_net *tn = net_generic(net, tipc_net_id);
2412 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2415 void tipc_sk_rht_destroy(struct net *net)
2417 struct tipc_net *tn = net_generic(net, tipc_net_id);
2419 /* Wait for socket readers to complete */
2422 rhashtable_destroy(&tn->sk_rht);
2426 * tipc_setsockopt - set socket option
2427 * @sock: socket structure
2428 * @lvl: option level
2429 * @opt: option identifier
2430 * @ov: pointer to new option value
2431 * @ol: length of option value
2433 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2434 * (to ease compatibility).
2436 * Returns 0 on success, errno otherwise
2438 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2439 char __user *ov, unsigned int ol)
2441 struct sock *sk = sock->sk;
2442 struct tipc_sock *tsk = tipc_sk(sk);
2446 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2448 if (lvl != SOL_TIPC)
2449 return -ENOPROTOOPT;
2450 if (ol < sizeof(value))
2452 res = get_user(value, (u32 __user *)ov);
2459 case TIPC_IMPORTANCE:
2460 res = tsk_set_importance(tsk, value);
2462 case TIPC_SRC_DROPPABLE:
2463 if (sock->type != SOCK_STREAM)
2464 tsk_set_unreliable(tsk, value);
2468 case TIPC_DEST_DROPPABLE:
2469 tsk_set_unreturnable(tsk, value);
2471 case TIPC_CONN_TIMEOUT:
2472 tipc_sk(sk)->conn_timeout = value;
2473 /* no need to set "res", since already 0 at this point */
2485 * tipc_getsockopt - get socket option
2486 * @sock: socket structure
2487 * @lvl: option level
2488 * @opt: option identifier
2489 * @ov: receptacle for option value
2490 * @ol: receptacle for length of option value
2492 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
2493 * (to ease compatibility).
2495 * Returns 0 on success, errno otherwise
2497 static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2498 char __user *ov, int __user *ol)
2500 struct sock *sk = sock->sk;
2501 struct tipc_sock *tsk = tipc_sk(sk);
2506 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2507 return put_user(0, ol);
2508 if (lvl != SOL_TIPC)
2509 return -ENOPROTOOPT;
2510 res = get_user(len, ol);
2517 case TIPC_IMPORTANCE:
2518 value = tsk_importance(tsk);
2520 case TIPC_SRC_DROPPABLE:
2521 value = tsk_unreliable(tsk);
2523 case TIPC_DEST_DROPPABLE:
2524 value = tsk_unreturnable(tsk);
2526 case TIPC_CONN_TIMEOUT:
2527 value = tsk->conn_timeout;
2528 /* no need to set "res", since already 0 at this point */
2530 case TIPC_NODE_RECVQ_DEPTH:
2531 value = 0; /* was tipc_queue_size, now obsolete */
2533 case TIPC_SOCK_RECVQ_DEPTH:
2534 value = skb_queue_len(&sk->sk_receive_queue);
2543 return res; /* "get" failed */
2545 if (len < sizeof(value))
2548 if (copy_to_user(ov, &value, sizeof(value)))
2551 return put_user(sizeof(value), ol);
2554 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2556 struct sock *sk = sock->sk;
2557 struct tipc_sioc_ln_req lnr;
2558 void __user *argp = (void __user *)arg;
2561 case SIOCGETLINKNAME:
2562 if (copy_from_user(&lnr, argp, sizeof(lnr)))
2564 if (!tipc_node_get_linkname(sock_net(sk),
2565 lnr.bearer_id & 0xffff, lnr.peer,
2566 lnr.linkname, TIPC_MAX_LINK_NAME)) {
2567 if (copy_to_user(argp, &lnr, sizeof(lnr)))
2571 return -EADDRNOTAVAIL;
2573 return -ENOIOCTLCMD;
2577 /* Protocol switches for the various types of TIPC sockets */
2579 static const struct proto_ops msg_ops = {
2580 .owner = THIS_MODULE,
2582 .release = tipc_release,
2584 .connect = tipc_connect,
2585 .socketpair = sock_no_socketpair,
2586 .accept = sock_no_accept,
2587 .getname = tipc_getname,
2589 .ioctl = tipc_ioctl,
2590 .listen = sock_no_listen,
2591 .shutdown = tipc_shutdown,
2592 .setsockopt = tipc_setsockopt,
2593 .getsockopt = tipc_getsockopt,
2594 .sendmsg = tipc_sendmsg,
2595 .recvmsg = tipc_recvmsg,
2596 .mmap = sock_no_mmap,
2597 .sendpage = sock_no_sendpage
2600 static const struct proto_ops packet_ops = {
2601 .owner = THIS_MODULE,
2603 .release = tipc_release,
2605 .connect = tipc_connect,
2606 .socketpair = sock_no_socketpair,
2607 .accept = tipc_accept,
2608 .getname = tipc_getname,
2610 .ioctl = tipc_ioctl,
2611 .listen = tipc_listen,
2612 .shutdown = tipc_shutdown,
2613 .setsockopt = tipc_setsockopt,
2614 .getsockopt = tipc_getsockopt,
2615 .sendmsg = tipc_send_packet,
2616 .recvmsg = tipc_recvmsg,
2617 .mmap = sock_no_mmap,
2618 .sendpage = sock_no_sendpage
2621 static const struct proto_ops stream_ops = {
2622 .owner = THIS_MODULE,
2624 .release = tipc_release,
2626 .connect = tipc_connect,
2627 .socketpair = sock_no_socketpair,
2628 .accept = tipc_accept,
2629 .getname = tipc_getname,
2631 .ioctl = tipc_ioctl,
2632 .listen = tipc_listen,
2633 .shutdown = tipc_shutdown,
2634 .setsockopt = tipc_setsockopt,
2635 .getsockopt = tipc_getsockopt,
2636 .sendmsg = tipc_send_stream,
2637 .recvmsg = tipc_recv_stream,
2638 .mmap = sock_no_mmap,
2639 .sendpage = sock_no_sendpage
2642 static const struct net_proto_family tipc_family_ops = {
2643 .owner = THIS_MODULE,
2645 .create = tipc_sk_create
2648 static struct proto tipc_proto = {
2650 .owner = THIS_MODULE,
2651 .obj_size = sizeof(struct tipc_sock),
2652 .sysctl_rmem = sysctl_tipc_rmem
2656 * tipc_socket_init - initialize TIPC socket interface
2658 * Returns 0 on success, errno otherwise
2660 int tipc_socket_init(void)
2664 res = proto_register(&tipc_proto, 1);
2666 pr_err("Failed to register TIPC protocol type\n");
2670 res = sock_register(&tipc_family_ops);
2672 pr_err("Failed to register TIPC socket type\n");
2673 proto_unregister(&tipc_proto);
2681 * tipc_socket_stop - stop TIPC socket interface
2683 void tipc_socket_stop(void)
2685 sock_unregister(tipc_family_ops.family);
2686 proto_unregister(&tipc_proto);
2689 /* Caller should hold socket lock for the passed tipc socket. */
2690 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
2694 struct nlattr *nest;
2696 peer_node = tsk_peer_node(tsk);
2697 peer_port = tsk_peer_port(tsk);
2699 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
2701 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
2703 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
2706 if (tsk->conn_type != 0) {
2707 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
2709 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
2711 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
2714 nla_nest_end(skb, nest);
2719 nla_nest_cancel(skb, nest);
2724 /* Caller should hold socket lock for the passed tipc socket. */
2725 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
2726 struct tipc_sock *tsk)
2730 struct nlattr *attrs;
2731 struct net *net = sock_net(skb->sk);
2732 struct tipc_net *tn = net_generic(net, tipc_net_id);
2733 struct sock *sk = &tsk->sk;
2735 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2736 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
2740 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
2742 goto genlmsg_cancel;
2743 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
2744 goto attr_msg_cancel;
2745 if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
2746 goto attr_msg_cancel;
2748 if (tipc_sk_connected(sk)) {
2749 err = __tipc_nl_add_sk_con(skb, tsk);
2751 goto attr_msg_cancel;
2752 } else if (!list_empty(&tsk->publications)) {
2753 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
2754 goto attr_msg_cancel;
2756 nla_nest_end(skb, attrs);
2757 genlmsg_end(skb, hdr);
2762 nla_nest_cancel(skb, attrs);
2764 genlmsg_cancel(skb, hdr);
2769 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
2772 struct tipc_sock *tsk;
2773 const struct bucket_table *tbl;
2774 struct rhash_head *pos;
2775 struct net *net = sock_net(skb->sk);
2776 struct tipc_net *tn = net_generic(net, tipc_net_id);
2777 u32 tbl_id = cb->args[0];
2778 u32 prev_portid = cb->args[1];
2781 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2782 for (; tbl_id < tbl->size; tbl_id++) {
2783 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
2784 spin_lock_bh(&tsk->sk.sk_lock.slock);
2785 if (prev_portid && prev_portid != tsk->portid) {
2786 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2790 err = __tipc_nl_add_sk(skb, cb, tsk);
2792 prev_portid = tsk->portid;
2793 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2797 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2802 cb->args[0] = tbl_id;
2803 cb->args[1] = prev_portid;
2808 /* Caller should hold socket lock for the passed tipc socket. */
2809 static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
2810 struct netlink_callback *cb,
2811 struct publication *publ)
2814 struct nlattr *attrs;
2816 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2817 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
2821 attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
2823 goto genlmsg_cancel;
2825 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
2826 goto attr_msg_cancel;
2827 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
2828 goto attr_msg_cancel;
2829 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
2830 goto attr_msg_cancel;
2831 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
2832 goto attr_msg_cancel;
2834 nla_nest_end(skb, attrs);
2835 genlmsg_end(skb, hdr);
2840 nla_nest_cancel(skb, attrs);
2842 genlmsg_cancel(skb, hdr);
2847 /* Caller should hold socket lock for the passed tipc socket. */
2848 static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
2849 struct netlink_callback *cb,
2850 struct tipc_sock *tsk, u32 *last_publ)
2853 struct publication *p;
2856 list_for_each_entry(p, &tsk->publications, pport_list) {
2857 if (p->key == *last_publ)
2860 if (p->key != *last_publ) {
2861 /* We never set seq or call nl_dump_check_consistent()
2862 * this means that setting prev_seq here will cause the
2863 * consistence check to fail in the netlink callback
2864 * handler. Resulting in the last NLMSG_DONE message
2865 * having the NLM_F_DUMP_INTR flag set.
2872 p = list_first_entry(&tsk->publications, struct publication,
2876 list_for_each_entry_from(p, &tsk->publications, pport_list) {
2877 err = __tipc_nl_add_sk_publ(skb, cb, p);
2879 *last_publ = p->key;
2888 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
2891 u32 tsk_portid = cb->args[0];
2892 u32 last_publ = cb->args[1];
2893 u32 done = cb->args[2];
2894 struct net *net = sock_net(skb->sk);
2895 struct tipc_sock *tsk;
2898 struct nlattr **attrs;
2899 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
2901 err = tipc_nlmsg_parse(cb->nlh, &attrs);
2905 if (!attrs[TIPC_NLA_SOCK])
2908 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
2909 attrs[TIPC_NLA_SOCK],
2910 tipc_nl_sock_policy);
2914 if (!sock[TIPC_NLA_SOCK_REF])
2917 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
2923 tsk = tipc_sk_lookup(net, tsk_portid);
2927 lock_sock(&tsk->sk);
2928 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
2931 release_sock(&tsk->sk);
2934 cb->args[0] = tsk_portid;
2935 cb->args[1] = last_publ;