2 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/gfp.h>
38 #include <linux/ipv6.h>
39 #include <linux/poll.h>
44 /* this is just used for stats gathering :/ */
45 static DEFINE_SPINLOCK(rds_sock_lock);
46 static unsigned long rds_sock_count;
47 static LIST_HEAD(rds_sock_list);
48 DECLARE_WAIT_QUEUE_HEAD(rds_poll_waitq);
51 * This is called as the final descriptor referencing this socket is closed.
52 * We have to unbind the socket so that another socket can be bound to the
53 * address it was using.
55 * We have to be careful about racing with the incoming path. sock_orphan()
56 * sets SOCK_DEAD and we use that as an indicator to the rx path that new
57 * messages shouldn't be queued.
59 static int rds_release(struct socket *sock)
61 struct sock *sk = sock->sk;
67 rs = rds_sk_to_rs(sk);
70 /* Note - rds_clear_recv_queue grabs rs_recv_lock, so
71 * that ensures the recv path has completed messing
73 rds_clear_recv_queue(rs);
74 rds_cong_remove_socket(rs);
78 rds_send_drop_to(rs, NULL);
79 rds_rdma_drop_keys(rs);
80 rds_notify_queue_get(rs, NULL);
81 rds_notify_msg_zcopy_purge(&rs->rs_zcookie_queue);
83 spin_lock_bh(&rds_sock_lock);
84 list_del_init(&rs->rs_item);
86 spin_unlock_bh(&rds_sock_lock);
88 rds_trans_put(rs->rs_transport);
97 * Careful not to race with rds_release -> sock_orphan which clears sk_sleep.
98 * _bh() isn't OK here, we're called from interrupt handlers. It's probably OK
99 * to wake the waitqueue after sk_sleep is clear as we hold a sock ref, but
100 * this seems more conservative.
101 * NB - normally, one would use sk_callback_lock for this, but we can
102 * get here from interrupts, whereas the network code grabs sk_callback_lock
103 * with _lock_bh only - so relying on sk_callback_lock introduces livelocks.
105 void rds_wake_sk_sleep(struct rds_sock *rs)
109 read_lock_irqsave(&rs->rs_recv_lock, flags);
110 __rds_wake_sk_sleep(rds_rs_to_sk(rs));
111 read_unlock_irqrestore(&rs->rs_recv_lock, flags);
114 static int rds_getname(struct socket *sock, struct sockaddr *uaddr,
117 struct rds_sock *rs = rds_sk_to_rs(sock->sk);
118 struct sockaddr_in6 *sin6;
119 struct sockaddr_in *sin;
122 /* racey, don't care */
124 if (ipv6_addr_any(&rs->rs_conn_addr))
127 if (ipv6_addr_v4mapped(&rs->rs_conn_addr)) {
128 sin = (struct sockaddr_in *)uaddr;
129 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
130 sin->sin_family = AF_INET;
131 sin->sin_port = rs->rs_conn_port;
132 sin->sin_addr.s_addr = rs->rs_conn_addr_v4;
133 uaddr_len = sizeof(*sin);
135 sin6 = (struct sockaddr_in6 *)uaddr;
136 sin6->sin6_family = AF_INET6;
137 sin6->sin6_port = rs->rs_conn_port;
138 sin6->sin6_addr = rs->rs_conn_addr;
139 sin6->sin6_flowinfo = 0;
140 /* scope_id is the same as in the bound address. */
141 sin6->sin6_scope_id = rs->rs_bound_scope_id;
142 uaddr_len = sizeof(*sin6);
145 /* If socket is not yet bound, set the return address family
146 * to be AF_UNSPEC (value 0) and the address size to be that
147 * of an IPv4 address.
149 if (ipv6_addr_any(&rs->rs_bound_addr)) {
150 sin = (struct sockaddr_in *)uaddr;
151 memset(sin, 0, sizeof(*sin));
152 sin->sin_family = AF_UNSPEC;
155 if (ipv6_addr_v4mapped(&rs->rs_bound_addr)) {
156 sin = (struct sockaddr_in *)uaddr;
157 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
158 sin->sin_family = AF_INET;
159 sin->sin_port = rs->rs_bound_port;
160 sin->sin_addr.s_addr = rs->rs_bound_addr_v4;
161 uaddr_len = sizeof(*sin);
163 sin6 = (struct sockaddr_in6 *)uaddr;
164 sin6->sin6_family = AF_INET6;
165 sin6->sin6_port = rs->rs_bound_port;
166 sin6->sin6_addr = rs->rs_bound_addr;
167 sin6->sin6_flowinfo = 0;
168 sin6->sin6_scope_id = rs->rs_bound_scope_id;
169 uaddr_len = sizeof(*sin6);
177 * RDS' poll is without a doubt the least intuitive part of the interface,
178 * as EPOLLIN and EPOLLOUT do not behave entirely as you would expect from
179 * a network protocol.
181 * EPOLLIN is asserted if
182 * - there is data on the receive queue.
183 * - to signal that a previously congested destination may have become
185 * - A notification has been queued to the socket (this can be a congestion
186 * update, or a RDMA completion, or a MSG_ZEROCOPY completion).
188 * EPOLLOUT is asserted if there is room on the send queue. This does not mean
189 * however, that the next sendmsg() call will succeed. If the application tries
190 * to send to a congested destination, the system call may still fail (and
193 static __poll_t rds_poll(struct file *file, struct socket *sock,
196 struct sock *sk = sock->sk;
197 struct rds_sock *rs = rds_sk_to_rs(sk);
201 poll_wait(file, sk_sleep(sk), wait);
203 if (rs->rs_seen_congestion)
204 poll_wait(file, &rds_poll_waitq, wait);
206 read_lock_irqsave(&rs->rs_recv_lock, flags);
207 if (!rs->rs_cong_monitor) {
208 /* When a congestion map was updated, we signal EPOLLIN for
209 * "historical" reasons. Applications can also poll for
211 if (rds_cong_updated_since(&rs->rs_cong_track))
212 mask |= (EPOLLIN | EPOLLRDNORM | EPOLLWRBAND);
214 spin_lock(&rs->rs_lock);
215 if (rs->rs_cong_notify)
216 mask |= (EPOLLIN | EPOLLRDNORM);
217 spin_unlock(&rs->rs_lock);
219 if (!list_empty(&rs->rs_recv_queue) ||
220 !list_empty(&rs->rs_notify_queue) ||
221 !list_empty(&rs->rs_zcookie_queue.zcookie_head))
222 mask |= (EPOLLIN | EPOLLRDNORM);
223 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs))
224 mask |= (EPOLLOUT | EPOLLWRNORM);
225 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
227 read_unlock_irqrestore(&rs->rs_recv_lock, flags);
229 /* clear state any time we wake a seen-congested socket */
231 rs->rs_seen_congestion = 0;
236 static int rds_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
241 static int rds_cancel_sent_to(struct rds_sock *rs, char __user *optval,
244 struct sockaddr_in6 sin6;
245 struct sockaddr_in sin;
248 /* racing with another thread binding seems ok here */
249 if (ipv6_addr_any(&rs->rs_bound_addr)) {
250 ret = -ENOTCONN; /* XXX not a great errno */
254 if (len < sizeof(struct sockaddr_in)) {
257 } else if (len < sizeof(struct sockaddr_in6)) {
259 if (copy_from_user(&sin, optval, sizeof(struct sockaddr_in))) {
263 ipv6_addr_set_v4mapped(sin.sin_addr.s_addr, &sin6.sin6_addr);
264 sin6.sin6_port = sin.sin_port;
266 if (copy_from_user(&sin6, optval,
267 sizeof(struct sockaddr_in6))) {
273 rds_send_drop_to(rs, &sin6);
278 static int rds_set_bool_option(unsigned char *optvar, char __user *optval,
283 if (optlen < sizeof(int))
285 if (get_user(value, (int __user *) optval))
291 static int rds_cong_monitor(struct rds_sock *rs, char __user *optval,
296 ret = rds_set_bool_option(&rs->rs_cong_monitor, optval, optlen);
298 if (rs->rs_cong_monitor) {
299 rds_cong_add_socket(rs);
301 rds_cong_remove_socket(rs);
302 rs->rs_cong_mask = 0;
303 rs->rs_cong_notify = 0;
309 static int rds_set_transport(struct rds_sock *rs, char __user *optval,
314 if (rs->rs_transport)
315 return -EOPNOTSUPP; /* previously attached to transport */
317 if (optlen != sizeof(int))
320 if (copy_from_user(&t_type, (int __user *)optval, sizeof(t_type)))
323 if (t_type < 0 || t_type >= RDS_TRANS_COUNT)
326 rs->rs_transport = rds_trans_get(t_type);
328 return rs->rs_transport ? 0 : -ENOPROTOOPT;
331 static int rds_enable_recvtstamp(struct sock *sk, char __user *optval,
336 if (optlen != sizeof(int))
339 if (get_user(val, (int __user *)optval))
342 valbool = val ? 1 : 0;
345 sock_set_flag(sk, SOCK_RCVTSTAMP);
347 sock_reset_flag(sk, SOCK_RCVTSTAMP);
352 static int rds_recv_track_latency(struct rds_sock *rs, char __user *optval,
355 struct rds_rx_trace_so trace;
358 if (optlen != sizeof(struct rds_rx_trace_so))
361 if (copy_from_user(&trace, optval, sizeof(trace)))
364 if (trace.rx_traces > RDS_MSG_RX_DGRAM_TRACE_MAX)
367 rs->rs_rx_traces = trace.rx_traces;
368 for (i = 0; i < rs->rs_rx_traces; i++) {
369 if (trace.rx_trace_pos[i] > RDS_MSG_RX_DGRAM_TRACE_MAX) {
370 rs->rs_rx_traces = 0;
373 rs->rs_rx_trace[i] = trace.rx_trace_pos[i];
379 static int rds_setsockopt(struct socket *sock, int level, int optname,
380 char __user *optval, unsigned int optlen)
382 struct rds_sock *rs = rds_sk_to_rs(sock->sk);
385 if (level != SOL_RDS) {
391 case RDS_CANCEL_SENT_TO:
392 ret = rds_cancel_sent_to(rs, optval, optlen);
395 ret = rds_get_mr(rs, optval, optlen);
397 case RDS_GET_MR_FOR_DEST:
398 ret = rds_get_mr_for_dest(rs, optval, optlen);
401 ret = rds_free_mr(rs, optval, optlen);
404 ret = rds_set_bool_option(&rs->rs_recverr, optval, optlen);
406 case RDS_CONG_MONITOR:
407 ret = rds_cong_monitor(rs, optval, optlen);
409 case SO_RDS_TRANSPORT:
411 ret = rds_set_transport(rs, optval, optlen);
412 release_sock(sock->sk);
416 ret = rds_enable_recvtstamp(sock->sk, optval, optlen);
417 release_sock(sock->sk);
419 case SO_RDS_MSG_RXPATH_LATENCY:
420 ret = rds_recv_track_latency(rs, optval, optlen);
429 static int rds_getsockopt(struct socket *sock, int level, int optname,
430 char __user *optval, int __user *optlen)
432 struct rds_sock *rs = rds_sk_to_rs(sock->sk);
433 int ret = -ENOPROTOOPT, len;
436 if (level != SOL_RDS)
439 if (get_user(len, optlen)) {
445 case RDS_INFO_FIRST ... RDS_INFO_LAST:
446 ret = rds_info_getsockopt(sock, optname, optval,
451 if (len < sizeof(int))
454 if (put_user(rs->rs_recverr, (int __user *) optval) ||
455 put_user(sizeof(int), optlen))
460 case SO_RDS_TRANSPORT:
461 if (len < sizeof(int)) {
465 trans = (rs->rs_transport ? rs->rs_transport->t_type :
466 RDS_TRANS_NONE); /* unbound */
467 if (put_user(trans, (int __user *)optval) ||
468 put_user(sizeof(int), optlen))
482 static int rds_connect(struct socket *sock, struct sockaddr *uaddr,
483 int addr_len, int flags)
485 struct sock *sk = sock->sk;
486 struct sockaddr_in *sin;
487 struct rds_sock *rs = rds_sk_to_rs(sk);
493 case sizeof(struct sockaddr_in):
494 sin = (struct sockaddr_in *)uaddr;
495 if (sin->sin_family != AF_INET) {
499 if (sin->sin_addr.s_addr == htonl(INADDR_ANY)) {
503 if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr)) ||
504 sin->sin_addr.s_addr == htonl(INADDR_BROADCAST)) {
508 ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &rs->rs_conn_addr);
509 rs->rs_conn_port = sin->sin_port;
512 case sizeof(struct sockaddr_in6):
513 ret = -EPROTONOSUPPORT;
525 static struct proto rds_proto = {
527 .owner = THIS_MODULE,
528 .obj_size = sizeof(struct rds_sock),
531 static const struct proto_ops rds_proto_ops = {
533 .owner = THIS_MODULE,
534 .release = rds_release,
536 .connect = rds_connect,
537 .socketpair = sock_no_socketpair,
538 .accept = sock_no_accept,
539 .getname = rds_getname,
542 .listen = sock_no_listen,
543 .shutdown = sock_no_shutdown,
544 .setsockopt = rds_setsockopt,
545 .getsockopt = rds_getsockopt,
546 .sendmsg = rds_sendmsg,
547 .recvmsg = rds_recvmsg,
548 .mmap = sock_no_mmap,
549 .sendpage = sock_no_sendpage,
552 static void rds_sock_destruct(struct sock *sk)
554 struct rds_sock *rs = rds_sk_to_rs(sk);
556 WARN_ON((&rs->rs_item != rs->rs_item.next ||
557 &rs->rs_item != rs->rs_item.prev));
560 static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
564 sock_init_data(sock, sk);
565 sock->ops = &rds_proto_ops;
566 sk->sk_protocol = protocol;
567 sk->sk_destruct = rds_sock_destruct;
569 rs = rds_sk_to_rs(sk);
570 spin_lock_init(&rs->rs_lock);
571 rwlock_init(&rs->rs_recv_lock);
572 INIT_LIST_HEAD(&rs->rs_send_queue);
573 INIT_LIST_HEAD(&rs->rs_recv_queue);
574 INIT_LIST_HEAD(&rs->rs_notify_queue);
575 INIT_LIST_HEAD(&rs->rs_cong_list);
576 rds_message_zcopy_queue_init(&rs->rs_zcookie_queue);
577 spin_lock_init(&rs->rs_rdma_lock);
578 rs->rs_rdma_keys = RB_ROOT;
579 rs->rs_rx_traces = 0;
581 spin_lock_bh(&rds_sock_lock);
582 list_add_tail(&rs->rs_item, &rds_sock_list);
584 spin_unlock_bh(&rds_sock_lock);
589 static int rds_create(struct net *net, struct socket *sock, int protocol,
594 if (sock->type != SOCK_SEQPACKET || protocol)
595 return -ESOCKTNOSUPPORT;
597 sk = sk_alloc(net, AF_RDS, GFP_ATOMIC, &rds_proto, kern);
601 return __rds_create(sock, sk, protocol);
604 void rds_sock_addref(struct rds_sock *rs)
606 sock_hold(rds_rs_to_sk(rs));
609 void rds_sock_put(struct rds_sock *rs)
611 sock_put(rds_rs_to_sk(rs));
614 static const struct net_proto_family rds_family_ops = {
616 .create = rds_create,
617 .owner = THIS_MODULE,
620 static void rds_sock_inc_info(struct socket *sock, unsigned int len,
621 struct rds_info_iterator *iter,
622 struct rds_info_lengths *lens)
625 struct rds_incoming *inc;
626 unsigned int total = 0;
628 len /= sizeof(struct rds_info_message);
630 spin_lock_bh(&rds_sock_lock);
632 list_for_each_entry(rs, &rds_sock_list, rs_item) {
633 read_lock(&rs->rs_recv_lock);
635 /* XXX too lazy to maintain counts.. */
636 list_for_each_entry(inc, &rs->rs_recv_queue, i_item) {
639 rds_inc_info_copy(inc, iter,
640 inc->i_saddr.s6_addr32[3],
641 rs->rs_bound_addr_v4,
645 read_unlock(&rs->rs_recv_lock);
648 spin_unlock_bh(&rds_sock_lock);
651 lens->each = sizeof(struct rds_info_message);
654 static void rds_sock_info(struct socket *sock, unsigned int len,
655 struct rds_info_iterator *iter,
656 struct rds_info_lengths *lens)
658 struct rds_info_socket sinfo;
661 len /= sizeof(struct rds_info_socket);
663 spin_lock_bh(&rds_sock_lock);
665 if (len < rds_sock_count)
668 list_for_each_entry(rs, &rds_sock_list, rs_item) {
669 sinfo.sndbuf = rds_sk_sndbuf(rs);
670 sinfo.rcvbuf = rds_sk_rcvbuf(rs);
671 sinfo.bound_addr = rs->rs_bound_addr_v4;
672 sinfo.connected_addr = rs->rs_conn_addr_v4;
673 sinfo.bound_port = rs->rs_bound_port;
674 sinfo.connected_port = rs->rs_conn_port;
675 sinfo.inum = sock_i_ino(rds_rs_to_sk(rs));
677 rds_info_copy(iter, &sinfo, sizeof(sinfo));
681 lens->nr = rds_sock_count;
682 lens->each = sizeof(struct rds_info_socket);
684 spin_unlock_bh(&rds_sock_lock);
687 static void rds_exit(void)
689 sock_unregister(rds_family_ops.family);
690 proto_unregister(&rds_proto);
697 rds_bind_lock_destroy();
698 rds_info_deregister_func(RDS_INFO_SOCKETS, rds_sock_info);
699 rds_info_deregister_func(RDS_INFO_RECV_MESSAGES, rds_sock_inc_info);
701 module_exit(rds_exit);
705 static int rds_init(void)
709 net_get_random_once(&rds_gen_num, sizeof(rds_gen_num));
711 ret = rds_bind_lock_init();
715 ret = rds_conn_init();
719 ret = rds_threads_init();
722 ret = rds_sysctl_init();
725 ret = rds_stats_init();
728 ret = proto_register(&rds_proto, 1);
731 ret = sock_register(&rds_family_ops);
735 rds_info_register_func(RDS_INFO_SOCKETS, rds_sock_info);
736 rds_info_register_func(RDS_INFO_RECV_MESSAGES, rds_sock_inc_info);
741 proto_unregister(&rds_proto);
753 rds_bind_lock_destroy();
757 module_init(rds_init);
759 #define DRV_VERSION "4.0"
760 #define DRV_RELDATE "Feb 12, 2009"
762 MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>");
763 MODULE_DESCRIPTION("RDS: Reliable Datagram Sockets"
764 " v" DRV_VERSION " (" DRV_RELDATE ")");
765 MODULE_VERSION(DRV_VERSION);
766 MODULE_LICENSE("Dual BSD/GPL");
767 MODULE_ALIAS_NETPROTO(PF_RDS);