2 * net/tipc/socket.c: TIPC socket API
4 * Copyright (c) 2001-2007, 2012-2015, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/rhashtable.h>
39 #include "name_table.h"
42 #include "name_distr.h"
47 #define SS_LISTENING -1 /* socket is listening */
48 #define SS_READY -2 /* socket is connectionless */
50 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
51 #define CONN_PROBING_INTERVAL msecs_to_jiffies(3600000) /* [ms] => 1 h */
52 #define TIPC_FWD_MSG 1
53 #define TIPC_CONN_OK 0
54 #define TIPC_CONN_PROBING 1
55 #define TIPC_MAX_PORT 0xffffffff
56 #define TIPC_MIN_PORT 1
59 * struct tipc_sock - TIPC socket structure
60 * @sk: socket - interacts with 'port' and with user via the socket API
61 * @connected: non-zero if port is currently connected to a peer port
62 * @conn_type: TIPC type used when connection was established
63 * @conn_instance: TIPC instance used when connection was established
64 * @published: non-zero if port has one or more associated names
65 * @max_pkt: maximum packet size "hint" used when building messages sent by port
66 * @portid: unique port identity in TIPC socket hash table
67 * @phdr: preformatted message header used when sending messages
68 * @port_list: adjacent ports in TIPC's global list of ports
69 * @publications: list of publications for port
70 * @pub_count: total # of publications port has made during its lifetime
73 * @conn_timeout: the time we can wait for an unresponded setup request
74 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
75 * @link_cong: non-zero if owner must sleep because of link congestion
76 * @sent_unacked: # messages sent by socket, and not yet acked by peer
77 * @rcv_unacked: # messages read by user, but not yet acked back to peer
78 * @remote: 'connected' peer for dgram/rdm
79 * @node: hash table node
80 * @rcu: rcu struct for tipc_sock
91 struct list_head sock_list;
92 struct list_head publications;
95 unsigned long probing_intv;
104 struct sockaddr_tipc remote;
105 struct rhash_head node;
109 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
110 static void tipc_data_ready(struct sock *sk);
111 static void tipc_write_space(struct sock *sk);
112 static void tipc_sock_destruct(struct sock *sk);
113 static int tipc_release(struct socket *sock);
114 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
115 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
116 static void tipc_sk_timeout(unsigned long data);
117 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
118 struct tipc_name_seq const *seq);
119 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
120 struct tipc_name_seq const *seq);
121 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
122 static int tipc_sk_insert(struct tipc_sock *tsk);
123 static void tipc_sk_remove(struct tipc_sock *tsk);
124 static int __tipc_send_stream(struct socket *sock, struct msghdr *m,
126 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
128 static const struct proto_ops packet_ops;
129 static const struct proto_ops stream_ops;
130 static const struct proto_ops msg_ops;
131 static struct proto tipc_proto;
133 static const struct rhashtable_params tsk_rht_params;
136 * Revised TIPC socket locking policy:
138 * Most socket operations take the standard socket lock when they start
139 * and hold it until they finish (or until they need to sleep). Acquiring
140 * this lock grants the owner exclusive access to the fields of the socket
141 * data structures, with the exception of the backlog queue. A few socket
142 * operations can be done without taking the socket lock because they only
143 * read socket information that never changes during the life of the socket.
145 * Socket operations may acquire the lock for the associated TIPC port if they
146 * need to perform an operation on the port. If any routine needs to acquire
147 * both the socket lock and the port lock it must take the socket lock first
148 * to avoid the risk of deadlock.
150 * The dispatcher handling incoming messages cannot grab the socket lock in
151 * the standard fashion, since invoked it runs at the BH level and cannot block.
152 * Instead, it checks to see if the socket lock is currently owned by someone,
153 * and either handles the message itself or adds it to the socket's backlog
154 * queue; in the latter case the queued message is processed once the process
155 * owning the socket lock releases it.
157 * NOTE: Releasing the socket lock while an operation is sleeping overcomes
158 * the problem of a blocked socket operation preventing any other operations
159 * from occurring. However, applications must be careful if they have
160 * multiple threads trying to send (or receive) on the same socket, as these
161 * operations might interfere with each other. For example, doing a connect
162 * and a receive at the same time might allow the receive to consume the
163 * ACK message meant for the connect. While additional work could be done
164 * to try and overcome this, it doesn't seem to be worthwhile at the present.
166 * NOTE: Releasing the socket lock while an operation is sleeping also ensures
167 * that another operation that must be performed in a non-blocking manner is
168 * not delayed for very long because the lock has already been taken.
170 * NOTE: This code assumes that certain fields of a port/socket pair are
171 * constant over its lifetime; such fields can be examined without taking
172 * the socket lock and/or port lock, and do not need to be re-read even
173 * after resuming processing after waiting. These fields include:
175 * - pointer to socket sk structure (aka tipc_sock structure)
176 * - pointer to port structure
180 static u32 tsk_own_node(struct tipc_sock *tsk)
182 return msg_prevnode(&tsk->phdr);
185 static u32 tsk_peer_node(struct tipc_sock *tsk)
187 return msg_destnode(&tsk->phdr);
190 static u32 tsk_peer_port(struct tipc_sock *tsk)
192 return msg_destport(&tsk->phdr);
195 static bool tsk_unreliable(struct tipc_sock *tsk)
197 return msg_src_droppable(&tsk->phdr) != 0;
200 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
202 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
205 static bool tsk_unreturnable(struct tipc_sock *tsk)
207 return msg_dest_droppable(&tsk->phdr) != 0;
210 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
212 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
215 static int tsk_importance(struct tipc_sock *tsk)
217 return msg_importance(&tsk->phdr);
220 static int tsk_set_importance(struct tipc_sock *tsk, int imp)
222 if (imp > TIPC_CRITICAL_IMPORTANCE)
224 msg_set_importance(&tsk->phdr, (u32)imp);
228 static struct tipc_sock *tipc_sk(const struct sock *sk)
230 return container_of(sk, struct tipc_sock, sk);
233 static bool tsk_conn_cong(struct tipc_sock *tsk)
235 return tsk->snt_unacked >= tsk->snd_win;
238 /* tsk_blocks(): translate a buffer size in bytes to number of
239 * advertisable blocks, taking into account the ratio truesize(len)/len
240 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
242 static u16 tsk_adv_blocks(int len)
244 return len / FLOWCTL_BLK_SZ / 4;
247 /* tsk_inc(): increment counter for sent or received data
248 * - If block based flow control is not supported by peer we
249 * fall back to message based ditto, incrementing the counter
251 static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
253 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
254 return ((msglen / FLOWCTL_BLK_SZ) + 1);
259 * tsk_advance_rx_queue - discard first buffer in socket receive queue
261 * Caller must hold socket lock
263 static void tsk_advance_rx_queue(struct sock *sk)
265 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
268 /* tipc_sk_respond() : send response message back to sender
270 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
274 u32 onode = tipc_own_addr(sock_net(sk));
276 if (!tipc_msg_reverse(onode, &skb, err))
279 dnode = msg_destnode(buf_msg(skb));
280 selector = msg_origport(buf_msg(skb));
281 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
285 * tsk_rej_rx_queue - reject all buffers in socket receive queue
287 * Caller must hold socket lock
289 static void tsk_rej_rx_queue(struct sock *sk)
293 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
294 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
297 /* tsk_peer_msg - verify if message was sent by connected port's peer
299 * Handles cases where the node's network address has changed from
300 * the default of <0.0.0> to its configured setting.
302 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
304 struct tipc_net *tn = net_generic(sock_net(&tsk->sk), tipc_net_id);
305 u32 peer_port = tsk_peer_port(tsk);
309 if (unlikely(!tsk->connected))
312 if (unlikely(msg_origport(msg) != peer_port))
315 orig_node = msg_orignode(msg);
316 peer_node = tsk_peer_node(tsk);
318 if (likely(orig_node == peer_node))
321 if (!orig_node && (peer_node == tn->own_addr))
324 if (!peer_node && (orig_node == tn->own_addr))
331 * tipc_sk_create - create a TIPC socket
332 * @net: network namespace (must be default network)
333 * @sock: pre-allocated socket structure
334 * @protocol: protocol indicator (must be 0)
335 * @kern: caused by kernel or by userspace?
337 * This routine creates additional data structures used by the TIPC socket,
338 * initializes them, and links them together.
340 * Returns 0 on success, errno otherwise
342 static int tipc_sk_create(struct net *net, struct socket *sock,
343 int protocol, int kern)
346 const struct proto_ops *ops;
349 struct tipc_sock *tsk;
350 struct tipc_msg *msg;
352 /* Validate arguments */
353 if (unlikely(protocol != 0))
354 return -EPROTONOSUPPORT;
356 switch (sock->type) {
359 state = SS_UNCONNECTED;
363 state = SS_UNCONNECTED;
374 /* Allocate socket's protocol area */
375 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
380 tsk->max_pkt = MAX_PKT_DEFAULT;
381 INIT_LIST_HEAD(&tsk->publications);
383 tn = net_generic(sock_net(sk), tipc_net_id);
384 tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
387 /* Finish initializing socket data structures */
390 sock_init_data(sock, sk);
391 if (tipc_sk_insert(tsk)) {
392 pr_warn("Socket create failed; port number exhausted\n");
395 msg_set_origport(msg, tsk->portid);
396 setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
397 sk->sk_backlog_rcv = tipc_backlog_rcv;
398 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
399 sk->sk_data_ready = tipc_data_ready;
400 sk->sk_write_space = tipc_write_space;
401 sk->sk_destruct = tipc_sock_destruct;
402 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
403 atomic_set(&tsk->dupl_rcvcnt, 0);
405 /* Start out with safe limits until we receive an advertised window */
406 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
407 tsk->rcv_win = tsk->snd_win;
409 if (sock->state == SS_READY) {
410 tsk_set_unreturnable(tsk, true);
411 if (sock->type == SOCK_DGRAM)
412 tsk_set_unreliable(tsk, true);
417 static void tipc_sk_callback(struct rcu_head *head)
419 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
425 * tipc_release - destroy a TIPC socket
426 * @sock: socket to destroy
428 * This routine cleans up any messages that are still queued on the socket.
429 * For DGRAM and RDM socket types, all queued messages are rejected.
430 * For SEQPACKET and STREAM socket types, the first message is rejected
431 * and any others are discarded. (If the first message on a STREAM socket
432 * is partially-read, it is discarded and the next one is rejected instead.)
434 * NOTE: Rejected messages are not necessarily returned to the sender! They
435 * are returned or discarded according to the "destination droppable" setting
436 * specified for the message by the sender.
438 * Returns 0 on success, errno otherwise
440 static int tipc_release(struct socket *sock)
442 struct sock *sk = sock->sk;
444 struct tipc_sock *tsk;
449 * Exit if socket isn't fully initialized (occurs when a failed accept()
450 * releases a pre-allocated child socket that was never used)
460 * Reject all unreceived messages, except on an active connection
461 * (which disconnects locally & sends a 'FIN+' to peer)
463 dnode = tsk_peer_node(tsk);
464 while (sock->state != SS_DISCONNECTING) {
465 skb = __skb_dequeue(&sk->sk_receive_queue);
468 if (TIPC_SKB_CB(skb)->handle != NULL)
471 if ((sock->state == SS_CONNECTING) ||
472 (sock->state == SS_CONNECTED)) {
473 sock->state = SS_DISCONNECTING;
475 tipc_node_remove_conn(net, dnode, tsk->portid);
477 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
481 tipc_sk_withdraw(tsk, 0, NULL);
482 sk_stop_timer(sk, &sk->sk_timer);
484 if (tsk->connected) {
485 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
486 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
487 tsk_own_node(tsk), tsk_peer_port(tsk),
488 tsk->portid, TIPC_ERR_NO_PORT);
490 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
491 tipc_node_remove_conn(net, dnode, tsk->portid);
494 /* Reject any messages that accumulated in backlog queue */
495 sock->state = SS_DISCONNECTING;
498 call_rcu(&tsk->rcu, tipc_sk_callback);
505 * tipc_bind - associate or disassocate TIPC name(s) with a socket
506 * @sock: socket structure
507 * @uaddr: socket address describing name(s) and desired operation
508 * @uaddr_len: size of socket address data structure
510 * Name and name sequence binding is indicated using a positive scope value;
511 * a negative scope value unbinds the specified name. Specifying no name
512 * (i.e. a socket address length of 0) unbinds all names from the socket.
514 * Returns 0 on success, errno otherwise
516 * NOTE: This routine doesn't need to take the socket lock since it doesn't
517 * access any non-constant socket information.
519 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
522 struct sock *sk = sock->sk;
523 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
524 struct tipc_sock *tsk = tipc_sk(sk);
528 if (unlikely(!uaddr_len)) {
529 res = tipc_sk_withdraw(tsk, 0, NULL);
533 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
537 if (addr->family != AF_TIPC) {
542 if (addr->addrtype == TIPC_ADDR_NAME)
543 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
544 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
549 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
550 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
551 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
556 res = (addr->scope > 0) ?
557 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
558 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
565 * tipc_getname - get port ID of socket or peer socket
566 * @sock: socket structure
567 * @uaddr: area for returned socket address
568 * @uaddr_len: area for returned length of socket address
569 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
571 * Returns 0 on success, errno otherwise
573 * NOTE: This routine doesn't need to take the socket lock since it only
574 * accesses socket information that is unchanging (or which changes in
575 * a completely predictable manner).
577 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
578 int *uaddr_len, int peer)
580 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
581 struct tipc_sock *tsk = tipc_sk(sock->sk);
582 struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);
584 memset(addr, 0, sizeof(*addr));
586 if ((sock->state != SS_CONNECTED) &&
587 ((peer != 2) || (sock->state != SS_DISCONNECTING)))
589 addr->addr.id.ref = tsk_peer_port(tsk);
590 addr->addr.id.node = tsk_peer_node(tsk);
592 addr->addr.id.ref = tsk->portid;
593 addr->addr.id.node = tn->own_addr;
596 *uaddr_len = sizeof(*addr);
597 addr->addrtype = TIPC_ADDR_ID;
598 addr->family = AF_TIPC;
600 addr->addr.name.domain = 0;
606 * tipc_poll - read and possibly block on pollmask
607 * @file: file structure associated with the socket
608 * @sock: socket for which to calculate the poll bits
611 * Returns pollmask value
614 * It appears that the usual socket locking mechanisms are not useful here
615 * since the pollmask info is potentially out-of-date the moment this routine
616 * exits. TCP and other protocols seem to rely on higher level poll routines
617 * to handle any preventable race conditions, so TIPC will do the same ...
619 * TIPC sets the returned events as follows:
621 * socket state flags set
622 * ------------ ---------
623 * unconnected no read flags
624 * POLLOUT if port is not congested
626 * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue
629 * connected POLLIN/POLLRDNORM if data in rx queue
630 * POLLOUT if port is not congested
632 * disconnecting POLLIN/POLLRDNORM/POLLHUP
635 * listening POLLIN if SYN in rx queue
638 * ready POLLIN/POLLRDNORM if data in rx queue
639 * [connectionless] POLLOUT (since port cannot be congested)
641 * IMPORTANT: The fact that a read or write operation is indicated does NOT
642 * imply that the operation will succeed, merely that it should be performed
643 * and will not block.
645 static unsigned int tipc_poll(struct file *file, struct socket *sock,
648 struct sock *sk = sock->sk;
649 struct tipc_sock *tsk = tipc_sk(sk);
652 sock_poll_wait(file, sk_sleep(sk), wait);
654 switch ((int)sock->state) {
661 if (!tsk->link_cong && !tsk_conn_cong(tsk))
666 if (!skb_queue_empty(&sk->sk_receive_queue))
667 mask |= (POLLIN | POLLRDNORM);
669 case SS_DISCONNECTING:
670 mask = (POLLIN | POLLRDNORM | POLLHUP);
678 * tipc_sendmcast - send multicast message
679 * @sock: socket structure
680 * @seq: destination address
681 * @msg: message to send
682 * @dsz: total length of message data
683 * @timeo: timeout to wait for wakeup
685 * Called from function tipc_sendmsg(), which has done all sanity checks
686 * Returns the number of bytes sent on success, or errno
688 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
689 struct msghdr *msg, size_t dsz, long timeo)
691 struct sock *sk = sock->sk;
692 struct tipc_sock *tsk = tipc_sk(sk);
693 struct net *net = sock_net(sk);
694 struct tipc_msg *mhdr = &tsk->phdr;
695 struct sk_buff_head pktchain;
696 struct iov_iter save = msg->msg_iter;
700 msg_set_type(mhdr, TIPC_MCAST_MSG);
701 msg_set_lookup_scope(mhdr, TIPC_CLUSTER_SCOPE);
702 msg_set_destport(mhdr, 0);
703 msg_set_destnode(mhdr, 0);
704 msg_set_nametype(mhdr, seq->type);
705 msg_set_namelower(mhdr, seq->lower);
706 msg_set_nameupper(mhdr, seq->upper);
707 msg_set_hdr_sz(mhdr, MCAST_H_SIZE);
709 skb_queue_head_init(&pktchain);
712 mtu = tipc_bcast_get_mtu(net);
713 rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &pktchain);
714 if (unlikely(rc < 0))
718 rc = tipc_bcast_xmit(net, &pktchain);
722 if (rc == -ELINKCONG) {
724 rc = tipc_wait_for_sndmsg(sock, &timeo);
728 __skb_queue_purge(&pktchain);
729 if (rc == -EMSGSIZE) {
730 msg->msg_iter = save;
739 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
740 * @arrvq: queue with arriving messages, to be cloned after destination lookup
741 * @inputq: queue with cloned messages, delivered to socket after dest lookup
743 * Multi-threaded: parallel calls with reference to same queues may occur
745 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
746 struct sk_buff_head *inputq)
748 struct tipc_msg *msg;
749 struct tipc_plist dports;
751 u32 scope = TIPC_CLUSTER_SCOPE;
752 struct sk_buff_head tmpq;
754 struct sk_buff *skb, *_skb;
756 __skb_queue_head_init(&tmpq);
757 tipc_plist_init(&dports);
759 skb = tipc_skb_peek(arrvq, &inputq->lock);
760 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
762 hsz = skb_headroom(skb) + msg_hdr_sz(msg);
764 if (in_own_node(net, msg_orignode(msg)))
765 scope = TIPC_NODE_SCOPE;
767 /* Create destination port list and message clones: */
768 tipc_nametbl_mc_translate(net,
769 msg_nametype(msg), msg_namelower(msg),
770 msg_nameupper(msg), scope, &dports);
771 portid = tipc_plist_pop(&dports);
772 for (; portid; portid = tipc_plist_pop(&dports)) {
773 _skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
775 msg_set_destport(buf_msg(_skb), portid);
776 __skb_queue_tail(&tmpq, _skb);
779 pr_warn("Failed to clone mcast rcv buffer\n");
781 /* Append to inputq if not already done by other thread */
782 spin_lock_bh(&inputq->lock);
783 if (skb_peek(arrvq) == skb) {
784 skb_queue_splice_tail_init(&tmpq, inputq);
785 kfree_skb(__skb_dequeue(arrvq));
787 spin_unlock_bh(&inputq->lock);
788 __skb_queue_purge(&tmpq);
791 tipc_sk_rcv(net, inputq);
795 * tipc_sk_proto_rcv - receive a connection mng protocol message
796 * @tsk: receiving socket
797 * @skb: pointer to message buffer.
799 static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
801 struct sock *sk = &tsk->sk;
802 struct tipc_msg *hdr = buf_msg(skb);
803 int mtyp = msg_type(hdr);
806 /* Ignore if connection cannot be validated: */
807 if (!tsk_peer_msg(tsk, hdr))
810 tsk->probing_state = TIPC_CONN_OK;
812 if (mtyp == CONN_PROBE) {
813 msg_set_type(hdr, CONN_PROBE_REPLY);
814 tipc_sk_respond(sk, skb, TIPC_OK);
816 } else if (mtyp == CONN_ACK) {
817 conn_cong = tsk_conn_cong(tsk);
818 tsk->snt_unacked -= msg_conn_ack(hdr);
819 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
820 tsk->snd_win = msg_adv_win(hdr);
822 sk->sk_write_space(sk);
823 } else if (mtyp != CONN_PROBE_REPLY) {
824 pr_warn("Received unknown CONN_PROTO msg\n");
830 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
832 struct sock *sk = sock->sk;
833 struct tipc_sock *tsk = tipc_sk(sk);
838 int err = sock_error(sk);
841 if (sock->state == SS_DISCONNECTING)
845 if (signal_pending(current))
846 return sock_intr_errno(*timeo_p);
848 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
849 done = sk_wait_event(sk, timeo_p, !tsk->link_cong);
850 finish_wait(sk_sleep(sk), &wait);
856 * tipc_sendmsg - send message in connectionless manner
857 * @sock: socket structure
858 * @m: message to send
859 * @dsz: amount of user data to be sent
861 * Message must have an destination specified explicitly.
862 * Used for SOCK_RDM and SOCK_DGRAM messages,
863 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
864 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
866 * Returns the number of bytes sent on success, or errno otherwise
868 static int tipc_sendmsg(struct socket *sock,
869 struct msghdr *m, size_t dsz)
871 struct sock *sk = sock->sk;
875 ret = __tipc_sendmsg(sock, m, dsz);
881 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz)
883 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
884 struct sock *sk = sock->sk;
885 struct tipc_sock *tsk = tipc_sk(sk);
886 struct net *net = sock_net(sk);
887 struct tipc_msg *mhdr = &tsk->phdr;
889 struct sk_buff_head pktchain;
891 struct tipc_name_seq *seq;
892 struct iov_iter save;
897 if (dsz > TIPC_MAX_USER_MSG_SIZE)
899 if (unlikely(!dest)) {
900 if (tsk->connected && sock->state == SS_READY)
903 return -EDESTADDRREQ;
904 } else if (unlikely(m->msg_namelen < sizeof(*dest)) ||
905 dest->family != AF_TIPC) {
908 if (unlikely(sock->state != SS_READY)) {
909 if (sock->state == SS_LISTENING)
911 if (sock->state != SS_UNCONNECTED)
915 if (dest->addrtype == TIPC_ADDR_NAME) {
916 tsk->conn_type = dest->addr.name.name.type;
917 tsk->conn_instance = dest->addr.name.name.instance;
920 seq = &dest->addr.nameseq;
921 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
923 if (dest->addrtype == TIPC_ADDR_MCAST) {
924 return tipc_sendmcast(sock, seq, m, dsz, timeo);
925 } else if (dest->addrtype == TIPC_ADDR_NAME) {
926 u32 type = dest->addr.name.name.type;
927 u32 inst = dest->addr.name.name.instance;
928 u32 domain = dest->addr.name.domain;
931 msg_set_type(mhdr, TIPC_NAMED_MSG);
932 msg_set_hdr_sz(mhdr, NAMED_H_SIZE);
933 msg_set_nametype(mhdr, type);
934 msg_set_nameinst(mhdr, inst);
935 msg_set_lookup_scope(mhdr, tipc_addr_scope(domain));
936 dport = tipc_nametbl_translate(net, type, inst, &dnode);
937 msg_set_destnode(mhdr, dnode);
938 msg_set_destport(mhdr, dport);
939 if (unlikely(!dport && !dnode))
940 return -EHOSTUNREACH;
941 } else if (dest->addrtype == TIPC_ADDR_ID) {
942 dnode = dest->addr.id.node;
943 msg_set_type(mhdr, TIPC_DIRECT_MSG);
944 msg_set_lookup_scope(mhdr, 0);
945 msg_set_destnode(mhdr, dnode);
946 msg_set_destport(mhdr, dest->addr.id.ref);
947 msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
950 skb_queue_head_init(&pktchain);
953 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
954 rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &pktchain);
959 skb = skb_peek(&pktchain);
960 TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
961 rc = tipc_node_xmit(net, &pktchain, dnode, tsk->portid);
963 if (sock->state != SS_READY)
964 sock->state = SS_CONNECTING;
967 if (rc == -ELINKCONG) {
969 rc = tipc_wait_for_sndmsg(sock, &timeo);
973 __skb_queue_purge(&pktchain);
974 if (rc == -EMSGSIZE) {
984 static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
986 struct sock *sk = sock->sk;
987 struct tipc_sock *tsk = tipc_sk(sk);
992 int err = sock_error(sk);
995 if (sock->state == SS_DISCONNECTING)
997 else if (sock->state != SS_CONNECTED)
1001 if (signal_pending(current))
1002 return sock_intr_errno(*timeo_p);
1004 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1005 done = sk_wait_event(sk, timeo_p,
1007 !tsk_conn_cong(tsk)) ||
1009 finish_wait(sk_sleep(sk), &wait);
1015 * tipc_send_stream - send stream-oriented data
1016 * @sock: socket structure
1018 * @dsz: total length of data to be transmitted
1020 * Used for SOCK_STREAM data.
1022 * Returns the number of bytes sent on success (or partial success),
1023 * or errno if no data sent
1025 static int tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
1027 struct sock *sk = sock->sk;
1031 ret = __tipc_send_stream(sock, m, dsz);
1037 static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
1039 struct sock *sk = sock->sk;
1040 struct net *net = sock_net(sk);
1041 struct tipc_sock *tsk = tipc_sk(sk);
1042 struct tipc_msg *mhdr = &tsk->phdr;
1043 struct sk_buff_head pktchain;
1044 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1045 u32 portid = tsk->portid;
1049 uint mtu, send, sent = 0;
1050 struct iov_iter save;
1051 int hlen = MIN_H_SIZE;
1053 /* Handle implied connection establishment */
1054 if (unlikely(dest)) {
1055 rc = __tipc_sendmsg(sock, m, dsz);
1056 hlen = msg_hdr_sz(mhdr);
1057 if (dsz && (dsz == rc))
1058 tsk->snt_unacked = tsk_inc(tsk, dsz + hlen);
1061 if (dsz > (uint)INT_MAX)
1064 if (unlikely(sock->state != SS_CONNECTED)) {
1065 if (sock->state == SS_DISCONNECTING)
1071 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1072 dnode = tsk_peer_node(tsk);
1073 skb_queue_head_init(&pktchain);
1078 send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
1079 rc = tipc_msg_build(mhdr, m, sent, send, mtu, &pktchain);
1080 if (unlikely(rc < 0))
1084 if (likely(!tsk_conn_cong(tsk))) {
1085 rc = tipc_node_xmit(net, &pktchain, dnode, portid);
1087 tsk->snt_unacked += tsk_inc(tsk, send + hlen);
1093 if (rc == -EMSGSIZE) {
1094 __skb_queue_purge(&pktchain);
1095 tsk->max_pkt = tipc_node_get_mtu(net, dnode,
1100 if (rc != -ELINKCONG)
1105 rc = tipc_wait_for_sndpkt(sock, &timeo);
1108 __skb_queue_purge(&pktchain);
1109 return sent ? sent : rc;
1113 * tipc_send_packet - send a connection-oriented message
1114 * @sock: socket structure
1115 * @m: message to send
1116 * @dsz: length of data to be transmitted
1118 * Used for SOCK_SEQPACKET messages.
1120 * Returns the number of bytes sent on success, or errno otherwise
1122 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1124 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1127 return tipc_send_stream(sock, m, dsz);
1130 /* tipc_sk_finish_conn - complete the setup of a connection
1132 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1135 struct sock *sk = &tsk->sk;
1136 struct net *net = sock_net(sk);
1137 struct tipc_msg *msg = &tsk->phdr;
1139 msg_set_destnode(msg, peer_node);
1140 msg_set_destport(msg, peer_port);
1141 msg_set_type(msg, TIPC_CONN_MSG);
1142 msg_set_lookup_scope(msg, 0);
1143 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1145 tsk->probing_intv = CONN_PROBING_INTERVAL;
1146 tsk->probing_state = TIPC_CONN_OK;
1148 sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv);
1149 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1150 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1151 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1152 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1155 /* Fall back to message based flow control */
1156 tsk->rcv_win = FLOWCTL_MSG_WIN;
1157 tsk->snd_win = FLOWCTL_MSG_WIN;
1161 * set_orig_addr - capture sender's address for received message
1162 * @m: descriptor for message info
1163 * @msg: received message header
1165 * Note: Address is not captured if not requested by receiver.
1167 static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
1169 DECLARE_SOCKADDR(struct sockaddr_tipc *, addr, m->msg_name);
1172 addr->family = AF_TIPC;
1173 addr->addrtype = TIPC_ADDR_ID;
1174 memset(&addr->addr, 0, sizeof(addr->addr));
1175 addr->addr.id.ref = msg_origport(msg);
1176 addr->addr.id.node = msg_orignode(msg);
1177 addr->addr.name.domain = 0; /* could leave uninitialized */
1178 addr->scope = 0; /* could leave uninitialized */
1179 m->msg_namelen = sizeof(struct sockaddr_tipc);
1184 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1185 * @m: descriptor for message info
1186 * @msg: received message header
1187 * @tsk: TIPC port associated with message
1189 * Note: Ancillary data is not captured if not requested by receiver.
1191 * Returns 0 if successful, otherwise errno
1193 static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1194 struct tipc_sock *tsk)
1202 if (likely(m->msg_controllen == 0))
1205 /* Optionally capture errored message object(s) */
1206 err = msg ? msg_errcode(msg) : 0;
1207 if (unlikely(err)) {
1209 anc_data[1] = msg_data_sz(msg);
1210 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1214 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1221 /* Optionally capture message destination object */
1222 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1223 switch (dest_type) {
1224 case TIPC_NAMED_MSG:
1226 anc_data[0] = msg_nametype(msg);
1227 anc_data[1] = msg_namelower(msg);
1228 anc_data[2] = msg_namelower(msg);
1230 case TIPC_MCAST_MSG:
1232 anc_data[0] = msg_nametype(msg);
1233 anc_data[1] = msg_namelower(msg);
1234 anc_data[2] = msg_nameupper(msg);
1237 has_name = (tsk->conn_type != 0);
1238 anc_data[0] = tsk->conn_type;
1239 anc_data[1] = tsk->conn_instance;
1240 anc_data[2] = tsk->conn_instance;
1246 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1254 static void tipc_sk_send_ack(struct tipc_sock *tsk)
1256 struct net *net = sock_net(&tsk->sk);
1257 struct sk_buff *skb = NULL;
1258 struct tipc_msg *msg;
1259 u32 peer_port = tsk_peer_port(tsk);
1260 u32 dnode = tsk_peer_node(tsk);
1262 if (!tsk->connected)
1264 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1265 dnode, tsk_own_node(tsk), peer_port,
1266 tsk->portid, TIPC_OK);
1270 msg_set_conn_ack(msg, tsk->rcv_unacked);
1271 tsk->rcv_unacked = 0;
1273 /* Adjust to and advertize the correct window limit */
1274 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1275 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1276 msg_set_adv_win(msg, tsk->rcv_win);
1278 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1281 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1283 struct sock *sk = sock->sk;
1285 long timeo = *timeop;
1289 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1290 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1291 if (sock->state == SS_DISCONNECTING) {
1296 timeo = schedule_timeout(timeo);
1300 if (!skb_queue_empty(&sk->sk_receive_queue))
1305 err = sock_intr_errno(timeo);
1306 if (signal_pending(current))
1309 finish_wait(sk_sleep(sk), &wait);
1315 * tipc_recvmsg - receive packet-oriented message
1316 * @m: descriptor for message info
1317 * @buf_len: total size of user buffer area
1318 * @flags: receive flags
1320 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1321 * If the complete message doesn't fit in user area, truncate it.
1323 * Returns size of returned message data, errno otherwise
1325 static int tipc_recvmsg(struct socket *sock, struct msghdr *m, size_t buf_len,
1328 struct sock *sk = sock->sk;
1329 struct tipc_sock *tsk = tipc_sk(sk);
1330 struct sk_buff *buf;
1331 struct tipc_msg *msg;
1337 /* Catch invalid receive requests */
1338 if (unlikely(!buf_len))
1343 if (unlikely(sock->state == SS_UNCONNECTED)) {
1348 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1351 /* Look for a message in receive queue; wait if necessary */
1352 res = tipc_wait_for_rcvmsg(sock, &timeo);
1356 /* Look at first message in receive queue */
1357 buf = skb_peek(&sk->sk_receive_queue);
1359 sz = msg_data_sz(msg);
1360 hlen = msg_hdr_sz(msg);
1361 err = msg_errcode(msg);
1363 /* Discard an empty non-errored message & try again */
1364 if ((!sz) && (!err)) {
1365 tsk_advance_rx_queue(sk);
1369 /* Capture sender's address (optional) */
1370 set_orig_addr(m, msg);
1372 /* Capture ancillary data (optional) */
1373 res = tipc_sk_anc_data_recv(m, msg, tsk);
1377 /* Capture message data (if valid) & compute return value (always) */
1379 if (unlikely(buf_len < sz)) {
1381 m->msg_flags |= MSG_TRUNC;
1383 res = skb_copy_datagram_msg(buf, hlen, m, sz);
1388 if ((sock->state == SS_READY) ||
1389 ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
1395 if (unlikely(flags & MSG_PEEK))
1398 if (likely(sock->state != SS_READY)) {
1399 tsk->rcv_unacked += tsk_inc(tsk, hlen + sz);
1400 if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
1401 tipc_sk_send_ack(tsk);
1403 tsk_advance_rx_queue(sk);
1410 * tipc_recv_stream - receive stream-oriented data
1411 * @m: descriptor for message info
1412 * @buf_len: total size of user buffer area
1413 * @flags: receive flags
1415 * Used for SOCK_STREAM messages only. If not enough data is available
1416 * will optionally wait for more; never truncates data.
1418 * Returns size of returned message data, errno otherwise
1420 static int tipc_recv_stream(struct socket *sock, struct msghdr *m,
1421 size_t buf_len, int flags)
1423 struct sock *sk = sock->sk;
1424 struct tipc_sock *tsk = tipc_sk(sk);
1425 struct sk_buff *buf;
1426 struct tipc_msg *msg;
1429 int sz_to_copy, target, needed;
1434 /* Catch invalid receive attempts */
1435 if (unlikely(!buf_len))
1440 if (unlikely(sock->state == SS_UNCONNECTED)) {
1445 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1446 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1449 /* Look for a message in receive queue; wait if necessary */
1450 res = tipc_wait_for_rcvmsg(sock, &timeo);
1454 /* Look at first message in receive queue */
1455 buf = skb_peek(&sk->sk_receive_queue);
1457 sz = msg_data_sz(msg);
1458 hlen = msg_hdr_sz(msg);
1459 err = msg_errcode(msg);
1461 /* Discard an empty non-errored message & try again */
1462 if ((!sz) && (!err)) {
1463 tsk_advance_rx_queue(sk);
1467 /* Optionally capture sender's address & ancillary data of first msg */
1468 if (sz_copied == 0) {
1469 set_orig_addr(m, msg);
1470 res = tipc_sk_anc_data_recv(m, msg, tsk);
1475 /* Capture message data (if valid) & compute return value (always) */
1477 u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);
1480 needed = (buf_len - sz_copied);
1481 sz_to_copy = (sz <= needed) ? sz : needed;
1483 res = skb_copy_datagram_msg(buf, hlen + offset, m, sz_to_copy);
1487 sz_copied += sz_to_copy;
1489 if (sz_to_copy < sz) {
1490 if (!(flags & MSG_PEEK))
1491 TIPC_SKB_CB(buf)->handle =
1492 (void *)(unsigned long)(offset + sz_to_copy);
1497 goto exit; /* can't add error msg to valid data */
1499 if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
1505 if (unlikely(flags & MSG_PEEK))
1508 tsk->rcv_unacked += tsk_inc(tsk, hlen + sz);
1509 if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
1510 tipc_sk_send_ack(tsk);
1511 tsk_advance_rx_queue(sk);
1513 /* Loop around if more data is required */
1514 if ((sz_copied < buf_len) && /* didn't get all requested data */
1515 (!skb_queue_empty(&sk->sk_receive_queue) ||
1516 (sz_copied < target)) && /* and more is ready or required */
1517 (!err)) /* and haven't reached a FIN */
1522 return sz_copied ? sz_copied : res;
1526 * tipc_write_space - wake up thread if port congestion is released
1529 static void tipc_write_space(struct sock *sk)
1531 struct socket_wq *wq;
1534 wq = rcu_dereference(sk->sk_wq);
1535 if (skwq_has_sleeper(wq))
1536 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1537 POLLWRNORM | POLLWRBAND);
1542 * tipc_data_ready - wake up threads to indicate messages have been received
1544 * @len: the length of messages
1546 static void tipc_data_ready(struct sock *sk)
1548 struct socket_wq *wq;
1551 wq = rcu_dereference(sk->sk_wq);
1552 if (skwq_has_sleeper(wq))
1553 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
1554 POLLRDNORM | POLLRDBAND);
1558 static void tipc_sock_destruct(struct sock *sk)
1560 __skb_queue_purge(&sk->sk_receive_queue);
1564 * filter_connect - Handle all incoming messages for a connection-based socket
1566 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
1568 * Returns true if everything ok, false otherwise
1570 static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
1572 struct sock *sk = &tsk->sk;
1573 struct net *net = sock_net(sk);
1574 struct socket *sock = sk->sk_socket;
1575 struct tipc_msg *hdr = buf_msg(skb);
1577 if (unlikely(msg_mcast(hdr)))
1580 switch ((int)sock->state) {
1583 /* Accept only connection-based messages sent by peer */
1584 if (unlikely(!tsk_peer_msg(tsk, hdr)))
1587 if (unlikely(msg_errcode(hdr))) {
1588 sock->state = SS_DISCONNECTING;
1590 /* Let timer expire on it's own */
1591 tipc_node_remove_conn(net, tsk_peer_node(tsk),
1598 /* Accept only ACK or NACK message */
1599 if (unlikely(!msg_connected(hdr)))
1602 if (unlikely(msg_errcode(hdr))) {
1603 sock->state = SS_DISCONNECTING;
1604 sk->sk_err = ECONNREFUSED;
1608 if (unlikely(!msg_isdata(hdr))) {
1609 sock->state = SS_DISCONNECTING;
1610 sk->sk_err = EINVAL;
1614 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
1615 msg_set_importance(&tsk->phdr, msg_importance(hdr));
1616 sock->state = SS_CONNECTED;
1618 /* If 'ACK+' message, add to socket receive queue */
1619 if (msg_data_sz(hdr))
1622 /* If empty 'ACK-' message, wake up sleeping connect() */
1623 if (waitqueue_active(sk_sleep(sk)))
1624 wake_up_interruptible(sk_sleep(sk));
1626 /* 'ACK-' message is neither accepted nor rejected: */
1627 msg_set_dest_droppable(hdr, 1);
1631 case SS_UNCONNECTED:
1633 /* Accept only SYN message */
1634 if (!msg_connected(hdr) && !(msg_errcode(hdr)))
1637 case SS_DISCONNECTING:
1640 pr_err("Unknown socket state %u\n", sock->state);
1646 * rcvbuf_limit - get proper overload limit of socket receive queue
1650 * For connection oriented messages, irrespective of importance,
1651 * default queue limit is 2 MB.
1653 * For connectionless messages, queue limits are based on message
1654 * importance as follows:
1656 * TIPC_LOW_IMPORTANCE (2 MB)
1657 * TIPC_MEDIUM_IMPORTANCE (4 MB)
1658 * TIPC_HIGH_IMPORTANCE (8 MB)
1659 * TIPC_CRITICAL_IMPORTANCE (16 MB)
1661 * Returns overload limit according to corresponding message importance
1663 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
1665 struct tipc_sock *tsk = tipc_sk(sk);
1666 struct tipc_msg *hdr = buf_msg(skb);
1668 if (unlikely(!msg_connected(hdr)))
1669 return sk->sk_rcvbuf << msg_importance(hdr);
1671 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
1672 return sk->sk_rcvbuf;
1674 return FLOWCTL_MSG_LIM;
1678 * filter_rcv - validate incoming message
1680 * @skb: pointer to message.
1682 * Enqueues message on receive queue if acceptable; optionally handles
1683 * disconnect indication for a connected socket.
1685 * Called with socket lock already taken
1687 * Returns true if message was added to socket receive queue, otherwise false
1689 static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
1691 struct socket *sock = sk->sk_socket;
1692 struct tipc_sock *tsk = tipc_sk(sk);
1693 struct tipc_msg *hdr = buf_msg(skb);
1694 unsigned int limit = rcvbuf_limit(sk, skb);
1696 int usr = msg_user(hdr);
1698 if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
1699 tipc_sk_proto_rcv(tsk, skb);
1703 if (unlikely(usr == SOCK_WAKEUP)) {
1706 sk->sk_write_space(sk);
1710 /* Drop if illegal message type */
1711 if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG)) {
1716 /* Reject if wrong message type for current socket state */
1717 if (unlikely(sock->state == SS_READY)) {
1718 if (msg_connected(hdr)) {
1719 err = TIPC_ERR_NO_PORT;
1722 } else if (unlikely(!filter_connect(tsk, skb))) {
1723 err = TIPC_ERR_NO_PORT;
1727 /* Reject message if there isn't room to queue it */
1728 if (unlikely(sk_rmem_alloc_get(sk) + skb->truesize >= limit)) {
1729 err = TIPC_ERR_OVERLOAD;
1733 /* Enqueue message */
1734 TIPC_SKB_CB(skb)->handle = NULL;
1735 __skb_queue_tail(&sk->sk_receive_queue, skb);
1736 skb_set_owner_r(skb, sk);
1738 sk->sk_data_ready(sk);
1742 tipc_sk_respond(sk, skb, err);
1747 * tipc_backlog_rcv - handle incoming message from backlog queue
1751 * Caller must hold socket lock
1755 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1757 unsigned int truesize = skb->truesize;
1759 if (likely(filter_rcv(sk, skb)))
1760 atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
1765 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
1766 * inputq and try adding them to socket or backlog queue
1767 * @inputq: list of incoming buffers with potentially different destinations
1768 * @sk: socket where the buffers should be enqueued
1769 * @dport: port number for the socket
1771 * Caller must hold socket lock
1773 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
1778 struct sk_buff *skb;
1779 unsigned long time_limit = jiffies + 2;
1781 while (skb_queue_len(inputq)) {
1782 if (unlikely(time_after_eq(jiffies, time_limit)))
1785 skb = tipc_skb_dequeue(inputq, dport);
1789 /* Add message directly to receive queue if possible */
1790 if (!sock_owned_by_user(sk)) {
1791 filter_rcv(sk, skb);
1795 /* Try backlog, compensating for double-counted bytes */
1796 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
1797 if (!sk->sk_backlog.len)
1798 atomic_set(dcnt, 0);
1799 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
1800 if (likely(!sk_add_backlog(sk, skb, lim)))
1803 /* Overload => reject message back to sender */
1804 tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD);
1810 * tipc_sk_rcv - handle a chain of incoming buffers
1811 * @inputq: buffer list containing the buffers
1812 * Consumes all buffers in list until inputq is empty
1813 * Note: may be called in multiple threads referring to the same queue
1815 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
1817 u32 dnode, dport = 0;
1819 struct tipc_sock *tsk;
1821 struct sk_buff *skb;
1823 while (skb_queue_len(inputq)) {
1824 dport = tipc_skb_peek_port(inputq, dport);
1825 tsk = tipc_sk_lookup(net, dport);
1829 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
1830 tipc_sk_enqueue(inputq, sk, dport);
1831 spin_unlock_bh(&sk->sk_lock.slock);
1837 /* No destination socket => dequeue skb if still there */
1838 skb = tipc_skb_dequeue(inputq, dport);
1842 /* Try secondary lookup if unresolved named message */
1843 err = TIPC_ERR_NO_PORT;
1844 if (tipc_msg_lookup_dest(net, skb, &err))
1847 /* Prepare for message rejection */
1848 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
1851 dnode = msg_destnode(buf_msg(skb));
1852 tipc_node_xmit_skb(net, skb, dnode, dport);
1856 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
1858 struct sock *sk = sock->sk;
1863 int err = sock_error(sk);
1868 if (signal_pending(current))
1869 return sock_intr_errno(*timeo_p);
1871 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1872 done = sk_wait_event(sk, timeo_p, sock->state != SS_CONNECTING);
1873 finish_wait(sk_sleep(sk), &wait);
1879 * tipc_connect - establish a connection to another TIPC port
1880 * @sock: socket structure
1881 * @dest: socket address for destination port
1882 * @destlen: size of socket address data structure
1883 * @flags: file-related flags associated with socket
1885 * Returns 0 on success, errno otherwise
1887 static int tipc_connect(struct socket *sock, struct sockaddr *dest,
1888 int destlen, int flags)
1890 struct sock *sk = sock->sk;
1891 struct tipc_sock *tsk = tipc_sk(sk);
1892 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1893 struct msghdr m = {NULL,};
1894 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
1895 socket_state previous;
1900 /* DGRAM/RDM connect(), just save the destaddr */
1901 if (sock->state == SS_READY) {
1902 if (dst->family == AF_UNSPEC) {
1903 memset(&tsk->remote, 0, sizeof(struct sockaddr_tipc));
1905 } else if (destlen != sizeof(struct sockaddr_tipc)) {
1908 memcpy(&tsk->remote, dest, destlen);
1915 * Reject connection attempt using multicast address
1917 * Note: send_msg() validates the rest of the address fields,
1918 * so there's no need to do it here
1920 if (dst->addrtype == TIPC_ADDR_MCAST) {
1925 previous = sock->state;
1926 switch (sock->state) {
1927 case SS_UNCONNECTED:
1928 /* Send a 'SYN-' to destination */
1930 m.msg_namelen = destlen;
1932 /* If connect is in non-blocking case, set MSG_DONTWAIT to
1933 * indicate send_msg() is never blocked.
1936 m.msg_flags = MSG_DONTWAIT;
1938 res = __tipc_sendmsg(sock, &m, 0);
1939 if ((res < 0) && (res != -EWOULDBLOCK))
1942 /* Just entered SS_CONNECTING state; the only
1943 * difference is that return value in non-blocking
1944 * case is EINPROGRESS, rather than EALREADY.
1948 if (previous == SS_CONNECTING)
1952 timeout = msecs_to_jiffies(timeout);
1953 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1954 res = tipc_wait_for_connect(sock, &timeout);
1969 * tipc_listen - allow socket to listen for incoming connections
1970 * @sock: socket structure
1973 * Returns 0 on success, errno otherwise
1975 static int tipc_listen(struct socket *sock, int len)
1977 struct sock *sk = sock->sk;
1982 if (sock->state != SS_UNCONNECTED)
1985 sock->state = SS_LISTENING;
1993 static int tipc_wait_for_accept(struct socket *sock, long timeo)
1995 struct sock *sk = sock->sk;
1999 /* True wake-one mechanism for incoming connections: only
2000 * one process gets woken up, not the 'whole herd'.
2001 * Since we do not 'race & poll' for established sockets
2002 * anymore, the common case will execute the loop only once.
2005 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
2006 TASK_INTERRUPTIBLE);
2007 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2009 timeo = schedule_timeout(timeo);
2013 if (!skb_queue_empty(&sk->sk_receive_queue))
2016 if (sock->state != SS_LISTENING)
2021 err = sock_intr_errno(timeo);
2022 if (signal_pending(current))
2025 finish_wait(sk_sleep(sk), &wait);
2030 * tipc_accept - wait for connection request
2031 * @sock: listening socket
2032 * @newsock: new socket that is to be connected
2033 * @flags: file-related flags associated with socket
2035 * Returns 0 on success, errno otherwise
2037 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
2039 struct sock *new_sk, *sk = sock->sk;
2040 struct sk_buff *buf;
2041 struct tipc_sock *new_tsock;
2042 struct tipc_msg *msg;
2048 if (sock->state != SS_LISTENING) {
2052 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2053 res = tipc_wait_for_accept(sock, timeo);
2057 buf = skb_peek(&sk->sk_receive_queue);
2059 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1);
2062 security_sk_clone(sock->sk, new_sock->sk);
2064 new_sk = new_sock->sk;
2065 new_tsock = tipc_sk(new_sk);
2068 /* we lock on new_sk; but lockdep sees the lock on sk */
2069 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2072 * Reject any stray messages received by new socket
2073 * before the socket lock was taken (very, very unlikely)
2075 tsk_rej_rx_queue(new_sk);
2077 /* Connect new socket to it's peer */
2078 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2079 new_sock->state = SS_CONNECTED;
2081 tsk_set_importance(new_tsock, msg_importance(msg));
2082 if (msg_named(msg)) {
2083 new_tsock->conn_type = msg_nametype(msg);
2084 new_tsock->conn_instance = msg_nameinst(msg);
2088 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2089 * Respond to 'SYN+' by queuing it on new socket.
2091 if (!msg_data_sz(msg)) {
2092 struct msghdr m = {NULL,};
2094 tsk_advance_rx_queue(sk);
2095 __tipc_send_stream(new_sock, &m, 0);
2097 __skb_dequeue(&sk->sk_receive_queue);
2098 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2099 skb_set_owner_r(buf, new_sk);
2101 release_sock(new_sk);
2108 * tipc_shutdown - shutdown socket connection
2109 * @sock: socket structure
2110 * @how: direction to close (must be SHUT_RDWR)
2112 * Terminates connection (if necessary), then purges socket's receive queue.
2114 * Returns 0 on success, errno otherwise
2116 static int tipc_shutdown(struct socket *sock, int how)
2118 struct sock *sk = sock->sk;
2119 struct net *net = sock_net(sk);
2120 struct tipc_sock *tsk = tipc_sk(sk);
2121 struct sk_buff *skb;
2122 u32 dnode = tsk_peer_node(tsk);
2123 u32 dport = tsk_peer_port(tsk);
2124 u32 onode = tipc_own_addr(net);
2125 u32 oport = tsk->portid;
2128 if (how != SHUT_RDWR)
2133 switch (sock->state) {
2138 dnode = tsk_peer_node(tsk);
2140 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
2141 skb = __skb_dequeue(&sk->sk_receive_queue);
2143 if (TIPC_SKB_CB(skb)->handle != NULL) {
2147 tipc_sk_respond(sk, skb, TIPC_CONN_SHUTDOWN);
2149 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
2150 TIPC_CONN_MSG, SHORT_H_SIZE,
2151 0, dnode, onode, dport, oport,
2152 TIPC_CONN_SHUTDOWN);
2153 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
2156 sock->state = SS_DISCONNECTING;
2157 tipc_node_remove_conn(net, dnode, tsk->portid);
2160 case SS_DISCONNECTING:
2162 /* Discard any unreceived messages */
2163 __skb_queue_purge(&sk->sk_receive_queue);
2165 /* Wake up anyone sleeping in poll */
2166 sk->sk_state_change(sk);
2178 static void tipc_sk_timeout(unsigned long data)
2180 struct tipc_sock *tsk = (struct tipc_sock *)data;
2181 struct sock *sk = &tsk->sk;
2182 struct sk_buff *skb = NULL;
2183 u32 peer_port, peer_node;
2184 u32 own_node = tsk_own_node(tsk);
2187 if (!tsk->connected) {
2191 peer_port = tsk_peer_port(tsk);
2192 peer_node = tsk_peer_node(tsk);
2194 if (tsk->probing_state == TIPC_CONN_PROBING) {
2195 if (!sock_owned_by_user(sk)) {
2196 sk->sk_socket->state = SS_DISCONNECTING;
2198 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
2199 tsk_peer_port(tsk));
2200 sk->sk_state_change(sk);
2202 /* Try again later */
2203 sk_reset_timer(sk, &sk->sk_timer, (HZ / 20));
2207 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE,
2208 INT_H_SIZE, 0, peer_node, own_node,
2209 peer_port, tsk->portid, TIPC_OK);
2210 tsk->probing_state = TIPC_CONN_PROBING;
2211 sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv);
2215 tipc_node_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
2220 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2221 struct tipc_name_seq const *seq)
2223 struct net *net = sock_net(&tsk->sk);
2224 struct publication *publ;
2229 key = tsk->portid + tsk->pub_count + 1;
2230 if (key == tsk->portid)
2233 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2234 scope, tsk->portid, key);
2235 if (unlikely(!publ))
2238 list_add(&publ->pport_list, &tsk->publications);
2244 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2245 struct tipc_name_seq const *seq)
2247 struct net *net = sock_net(&tsk->sk);
2248 struct publication *publ;
2249 struct publication *safe;
2252 list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) {
2254 if (publ->scope != scope)
2256 if (publ->type != seq->type)
2258 if (publ->lower != seq->lower)
2260 if (publ->upper != seq->upper)
2262 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2263 publ->ref, publ->key);
2267 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2268 publ->ref, publ->key);
2271 if (list_empty(&tsk->publications))
2276 /* tipc_sk_reinit: set non-zero address in all existing sockets
2277 * when we go from standalone to network mode.
2279 void tipc_sk_reinit(struct net *net)
2281 struct tipc_net *tn = net_generic(net, tipc_net_id);
2282 const struct bucket_table *tbl;
2283 struct rhash_head *pos;
2284 struct tipc_sock *tsk;
2285 struct tipc_msg *msg;
2289 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2290 for (i = 0; i < tbl->size; i++) {
2291 rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
2292 spin_lock_bh(&tsk->sk.sk_lock.slock);
2294 msg_set_prevnode(msg, tn->own_addr);
2295 msg_set_orignode(msg, tn->own_addr);
2296 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2302 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2304 struct tipc_net *tn = net_generic(net, tipc_net_id);
2305 struct tipc_sock *tsk;
2308 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2310 sock_hold(&tsk->sk);
2316 static int tipc_sk_insert(struct tipc_sock *tsk)
2318 struct sock *sk = &tsk->sk;
2319 struct net *net = sock_net(sk);
2320 struct tipc_net *tn = net_generic(net, tipc_net_id);
2321 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2322 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2324 while (remaining--) {
2326 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2327 portid = TIPC_MIN_PORT;
2328 tsk->portid = portid;
2329 sock_hold(&tsk->sk);
2330 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2339 static void tipc_sk_remove(struct tipc_sock *tsk)
2341 struct sock *sk = &tsk->sk;
2342 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2344 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2345 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
2350 static const struct rhashtable_params tsk_rht_params = {
2352 .head_offset = offsetof(struct tipc_sock, node),
2353 .key_offset = offsetof(struct tipc_sock, portid),
2354 .key_len = sizeof(u32), /* portid */
2355 .max_size = 1048576,
2357 .automatic_shrinking = true,
2360 int tipc_sk_rht_init(struct net *net)
2362 struct tipc_net *tn = net_generic(net, tipc_net_id);
2364 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2367 void tipc_sk_rht_destroy(struct net *net)
2369 struct tipc_net *tn = net_generic(net, tipc_net_id);
2371 /* Wait for socket readers to complete */
2374 rhashtable_destroy(&tn->sk_rht);
2378 * tipc_setsockopt - set socket option
2379 * @sock: socket structure
2380 * @lvl: option level
2381 * @opt: option identifier
2382 * @ov: pointer to new option value
2383 * @ol: length of option value
2385 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2386 * (to ease compatibility).
2388 * Returns 0 on success, errno otherwise
2390 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2391 char __user *ov, unsigned int ol)
2393 struct sock *sk = sock->sk;
2394 struct tipc_sock *tsk = tipc_sk(sk);
2398 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2400 if (lvl != SOL_TIPC)
2401 return -ENOPROTOOPT;
2402 if (ol < sizeof(value))
2404 res = get_user(value, (u32 __user *)ov);
2411 case TIPC_IMPORTANCE:
2412 res = tsk_set_importance(tsk, value);
2414 case TIPC_SRC_DROPPABLE:
2415 if (sock->type != SOCK_STREAM)
2416 tsk_set_unreliable(tsk, value);
2420 case TIPC_DEST_DROPPABLE:
2421 tsk_set_unreturnable(tsk, value);
2423 case TIPC_CONN_TIMEOUT:
2424 tipc_sk(sk)->conn_timeout = value;
2425 /* no need to set "res", since already 0 at this point */
2437 * tipc_getsockopt - get socket option
2438 * @sock: socket structure
2439 * @lvl: option level
2440 * @opt: option identifier
2441 * @ov: receptacle for option value
2442 * @ol: receptacle for length of option value
2444 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
2445 * (to ease compatibility).
2447 * Returns 0 on success, errno otherwise
2449 static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2450 char __user *ov, int __user *ol)
2452 struct sock *sk = sock->sk;
2453 struct tipc_sock *tsk = tipc_sk(sk);
2458 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2459 return put_user(0, ol);
2460 if (lvl != SOL_TIPC)
2461 return -ENOPROTOOPT;
2462 res = get_user(len, ol);
2469 case TIPC_IMPORTANCE:
2470 value = tsk_importance(tsk);
2472 case TIPC_SRC_DROPPABLE:
2473 value = tsk_unreliable(tsk);
2475 case TIPC_DEST_DROPPABLE:
2476 value = tsk_unreturnable(tsk);
2478 case TIPC_CONN_TIMEOUT:
2479 value = tsk->conn_timeout;
2480 /* no need to set "res", since already 0 at this point */
2482 case TIPC_NODE_RECVQ_DEPTH:
2483 value = 0; /* was tipc_queue_size, now obsolete */
2485 case TIPC_SOCK_RECVQ_DEPTH:
2486 value = skb_queue_len(&sk->sk_receive_queue);
2495 return res; /* "get" failed */
2497 if (len < sizeof(value))
2500 if (copy_to_user(ov, &value, sizeof(value)))
2503 return put_user(sizeof(value), ol);
2506 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2508 struct sock *sk = sock->sk;
2509 struct tipc_sioc_ln_req lnr;
2510 void __user *argp = (void __user *)arg;
2513 case SIOCGETLINKNAME:
2514 if (copy_from_user(&lnr, argp, sizeof(lnr)))
2516 if (!tipc_node_get_linkname(sock_net(sk),
2517 lnr.bearer_id & 0xffff, lnr.peer,
2518 lnr.linkname, TIPC_MAX_LINK_NAME)) {
2519 if (copy_to_user(argp, &lnr, sizeof(lnr)))
2523 return -EADDRNOTAVAIL;
2525 return -ENOIOCTLCMD;
2529 /* Protocol switches for the various types of TIPC sockets */
2531 static const struct proto_ops msg_ops = {
2532 .owner = THIS_MODULE,
2534 .release = tipc_release,
2536 .connect = tipc_connect,
2537 .socketpair = sock_no_socketpair,
2538 .accept = sock_no_accept,
2539 .getname = tipc_getname,
2541 .ioctl = tipc_ioctl,
2542 .listen = sock_no_listen,
2543 .shutdown = tipc_shutdown,
2544 .setsockopt = tipc_setsockopt,
2545 .getsockopt = tipc_getsockopt,
2546 .sendmsg = tipc_sendmsg,
2547 .recvmsg = tipc_recvmsg,
2548 .mmap = sock_no_mmap,
2549 .sendpage = sock_no_sendpage
2552 static const struct proto_ops packet_ops = {
2553 .owner = THIS_MODULE,
2555 .release = tipc_release,
2557 .connect = tipc_connect,
2558 .socketpair = sock_no_socketpair,
2559 .accept = tipc_accept,
2560 .getname = tipc_getname,
2562 .ioctl = tipc_ioctl,
2563 .listen = tipc_listen,
2564 .shutdown = tipc_shutdown,
2565 .setsockopt = tipc_setsockopt,
2566 .getsockopt = tipc_getsockopt,
2567 .sendmsg = tipc_send_packet,
2568 .recvmsg = tipc_recvmsg,
2569 .mmap = sock_no_mmap,
2570 .sendpage = sock_no_sendpage
2573 static const struct proto_ops stream_ops = {
2574 .owner = THIS_MODULE,
2576 .release = tipc_release,
2578 .connect = tipc_connect,
2579 .socketpair = sock_no_socketpair,
2580 .accept = tipc_accept,
2581 .getname = tipc_getname,
2583 .ioctl = tipc_ioctl,
2584 .listen = tipc_listen,
2585 .shutdown = tipc_shutdown,
2586 .setsockopt = tipc_setsockopt,
2587 .getsockopt = tipc_getsockopt,
2588 .sendmsg = tipc_send_stream,
2589 .recvmsg = tipc_recv_stream,
2590 .mmap = sock_no_mmap,
2591 .sendpage = sock_no_sendpage
2594 static const struct net_proto_family tipc_family_ops = {
2595 .owner = THIS_MODULE,
2597 .create = tipc_sk_create
2600 static struct proto tipc_proto = {
2602 .owner = THIS_MODULE,
2603 .obj_size = sizeof(struct tipc_sock),
2604 .sysctl_rmem = sysctl_tipc_rmem
2608 * tipc_socket_init - initialize TIPC socket interface
2610 * Returns 0 on success, errno otherwise
2612 int tipc_socket_init(void)
2616 res = proto_register(&tipc_proto, 1);
2618 pr_err("Failed to register TIPC protocol type\n");
2622 res = sock_register(&tipc_family_ops);
2624 pr_err("Failed to register TIPC socket type\n");
2625 proto_unregister(&tipc_proto);
2633 * tipc_socket_stop - stop TIPC socket interface
2635 void tipc_socket_stop(void)
2637 sock_unregister(tipc_family_ops.family);
2638 proto_unregister(&tipc_proto);
2641 /* Caller should hold socket lock for the passed tipc socket. */
2642 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
2646 struct nlattr *nest;
2648 peer_node = tsk_peer_node(tsk);
2649 peer_port = tsk_peer_port(tsk);
2651 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
2653 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
2655 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
2658 if (tsk->conn_type != 0) {
2659 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
2661 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
2663 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
2666 nla_nest_end(skb, nest);
2671 nla_nest_cancel(skb, nest);
2676 /* Caller should hold socket lock for the passed tipc socket. */
2677 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
2678 struct tipc_sock *tsk)
2682 struct nlattr *attrs;
2683 struct net *net = sock_net(skb->sk);
2684 struct tipc_net *tn = net_generic(net, tipc_net_id);
2686 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2687 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
2691 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
2693 goto genlmsg_cancel;
2694 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
2695 goto attr_msg_cancel;
2696 if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
2697 goto attr_msg_cancel;
2699 if (tsk->connected) {
2700 err = __tipc_nl_add_sk_con(skb, tsk);
2702 goto attr_msg_cancel;
2703 } else if (!list_empty(&tsk->publications)) {
2704 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
2705 goto attr_msg_cancel;
2707 nla_nest_end(skb, attrs);
2708 genlmsg_end(skb, hdr);
2713 nla_nest_cancel(skb, attrs);
2715 genlmsg_cancel(skb, hdr);
2720 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
2723 struct tipc_sock *tsk;
2724 const struct bucket_table *tbl;
2725 struct rhash_head *pos;
2726 struct net *net = sock_net(skb->sk);
2727 struct tipc_net *tn = net_generic(net, tipc_net_id);
2728 u32 tbl_id = cb->args[0];
2729 u32 prev_portid = cb->args[1];
2732 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2733 for (; tbl_id < tbl->size; tbl_id++) {
2734 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
2735 spin_lock_bh(&tsk->sk.sk_lock.slock);
2736 if (prev_portid && prev_portid != tsk->portid) {
2737 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2741 err = __tipc_nl_add_sk(skb, cb, tsk);
2743 prev_portid = tsk->portid;
2744 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2748 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2753 cb->args[0] = tbl_id;
2754 cb->args[1] = prev_portid;
2759 /* Caller should hold socket lock for the passed tipc socket. */
2760 static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
2761 struct netlink_callback *cb,
2762 struct publication *publ)
2765 struct nlattr *attrs;
2767 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2768 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
2772 attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
2774 goto genlmsg_cancel;
2776 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
2777 goto attr_msg_cancel;
2778 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
2779 goto attr_msg_cancel;
2780 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
2781 goto attr_msg_cancel;
2782 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
2783 goto attr_msg_cancel;
2785 nla_nest_end(skb, attrs);
2786 genlmsg_end(skb, hdr);
2791 nla_nest_cancel(skb, attrs);
2793 genlmsg_cancel(skb, hdr);
2798 /* Caller should hold socket lock for the passed tipc socket. */
2799 static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
2800 struct netlink_callback *cb,
2801 struct tipc_sock *tsk, u32 *last_publ)
2804 struct publication *p;
2807 list_for_each_entry(p, &tsk->publications, pport_list) {
2808 if (p->key == *last_publ)
2811 if (p->key != *last_publ) {
2812 /* We never set seq or call nl_dump_check_consistent()
2813 * this means that setting prev_seq here will cause the
2814 * consistence check to fail in the netlink callback
2815 * handler. Resulting in the last NLMSG_DONE message
2816 * having the NLM_F_DUMP_INTR flag set.
2823 p = list_first_entry(&tsk->publications, struct publication,
2827 list_for_each_entry_from(p, &tsk->publications, pport_list) {
2828 err = __tipc_nl_add_sk_publ(skb, cb, p);
2830 *last_publ = p->key;
2839 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
2842 u32 tsk_portid = cb->args[0];
2843 u32 last_publ = cb->args[1];
2844 u32 done = cb->args[2];
2845 struct net *net = sock_net(skb->sk);
2846 struct tipc_sock *tsk;
2849 struct nlattr **attrs;
2850 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
2852 err = tipc_nlmsg_parse(cb->nlh, &attrs);
2856 if (!attrs[TIPC_NLA_SOCK])
2859 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
2860 attrs[TIPC_NLA_SOCK],
2861 tipc_nl_sock_policy);
2865 if (!sock[TIPC_NLA_SOCK_REF])
2868 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
2874 tsk = tipc_sk_lookup(net, tsk_portid);
2878 lock_sock(&tsk->sk);
2879 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
2882 release_sock(&tsk->sk);
2885 cb->args[0] = tsk_portid;
2886 cb->args[1] = last_publ;