2 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 * AF_SMC protocol family socket handler keeping the AF_INET sock address type
5 * applies to SOCK_STREAM sockets only
6 * offers an alternative communication option for TCP-protocol sockets
7 * applicable with RoCE-cards only
9 * Initial restrictions:
10 * - support for alternate links postponed
12 * Copyright IBM Corp. 2016, 2018
14 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
15 * based on prototype from Frank Blaschka
18 #define KMSG_COMPONENT "smc"
19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21 #include <linux/module.h>
22 #include <linux/socket.h>
23 #include <linux/workqueue.h>
25 #include <linux/sched/signal.h>
26 #include <linux/if_vlan.h>
31 #include <asm/ioctls.h>
33 #include <net/net_namespace.h>
34 #include <net/netns/generic.h>
35 #include "smc_netns.h"
47 #include "smc_close.h"
49 static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group
52 static DEFINE_MUTEX(smc_client_lgr_pending); /* serialize link group
56 static void smc_tcp_listen_work(struct work_struct *);
57 static void smc_connect_work(struct work_struct *);
59 static void smc_set_keepalive(struct sock *sk, int val)
61 struct smc_sock *smc = smc_sk(sk);
63 smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
66 static struct smc_hashinfo smc_v4_hashinfo = {
67 .lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
70 static struct smc_hashinfo smc_v6_hashinfo = {
71 .lock = __RW_LOCK_UNLOCKED(smc_v6_hashinfo.lock),
74 int smc_hash_sk(struct sock *sk)
76 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
77 struct hlist_head *head;
81 write_lock_bh(&h->lock);
82 sk_add_node(sk, head);
83 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
84 write_unlock_bh(&h->lock);
88 EXPORT_SYMBOL_GPL(smc_hash_sk);
90 void smc_unhash_sk(struct sock *sk)
92 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
94 write_lock_bh(&h->lock);
95 if (sk_del_node_init(sk))
96 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
97 write_unlock_bh(&h->lock);
99 EXPORT_SYMBOL_GPL(smc_unhash_sk);
101 struct proto smc_proto = {
103 .owner = THIS_MODULE,
104 .keepalive = smc_set_keepalive,
106 .unhash = smc_unhash_sk,
107 .obj_size = sizeof(struct smc_sock),
108 .h.smc_hash = &smc_v4_hashinfo,
109 .slab_flags = SLAB_TYPESAFE_BY_RCU,
111 EXPORT_SYMBOL_GPL(smc_proto);
113 struct proto smc_proto6 = {
115 .owner = THIS_MODULE,
116 .keepalive = smc_set_keepalive,
118 .unhash = smc_unhash_sk,
119 .obj_size = sizeof(struct smc_sock),
120 .h.smc_hash = &smc_v6_hashinfo,
121 .slab_flags = SLAB_TYPESAFE_BY_RCU,
123 EXPORT_SYMBOL_GPL(smc_proto6);
125 static int smc_release(struct socket *sock)
127 struct sock *sk = sock->sk;
128 struct smc_sock *smc;
136 /* cleanup for a dangling non-blocking connect */
137 if (smc->connect_nonblock && sk->sk_state == SMC_INIT)
138 tcp_abort(smc->clcsock->sk, ECONNABORTED);
139 flush_work(&smc->connect_work);
141 if (sk->sk_state == SMC_LISTEN)
142 /* smc_close_non_accepted() is called and acquires
143 * sock lock for child sockets again
145 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
149 if (!smc->use_fallback) {
150 rc = smc_close_active(smc);
151 sock_set_flag(sk, SOCK_DEAD);
152 sk->sk_shutdown |= SHUTDOWN_MASK;
154 if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
155 sock_put(sk); /* passive closing */
156 if (sk->sk_state == SMC_LISTEN) {
157 /* wake up clcsock accept */
158 rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
160 sk->sk_state = SMC_CLOSED;
161 sk->sk_state_change(sk);
164 sk->sk_prot->unhash(sk);
166 if (sk->sk_state == SMC_CLOSED) {
168 mutex_lock(&smc->clcsock_release_lock);
169 sock_release(smc->clcsock);
171 mutex_unlock(&smc->clcsock_release_lock);
173 if (!smc->use_fallback)
174 smc_conn_free(&smc->conn);
182 sock_put(sk); /* final sock_put */
187 static void smc_destruct(struct sock *sk)
189 if (sk->sk_state != SMC_CLOSED)
191 if (!sock_flag(sk, SOCK_DEAD))
194 sk_refcnt_debug_dec(sk);
197 static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
200 struct smc_sock *smc;
204 prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto;
205 sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0);
209 sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
210 sk->sk_state = SMC_INIT;
211 sk->sk_destruct = smc_destruct;
212 sk->sk_protocol = protocol;
214 INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
215 INIT_WORK(&smc->connect_work, smc_connect_work);
216 INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
217 INIT_LIST_HEAD(&smc->accept_q);
218 spin_lock_init(&smc->accept_q_lock);
219 spin_lock_init(&smc->conn.send_lock);
220 sk->sk_prot->hash(sk);
221 sk_refcnt_debug_inc(sk);
222 mutex_init(&smc->clcsock_release_lock);
227 static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
230 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
231 struct sock *sk = sock->sk;
232 struct smc_sock *smc;
237 /* replicate tests from inet_bind(), to be safe wrt. future changes */
239 if (addr_len < sizeof(struct sockaddr_in))
243 if (addr->sin_family != AF_INET &&
244 addr->sin_family != AF_INET6 &&
245 addr->sin_family != AF_UNSPEC)
247 /* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
248 if (addr->sin_family == AF_UNSPEC &&
249 addr->sin_addr.s_addr != htonl(INADDR_ANY))
254 /* Check if socket is already active */
256 if (sk->sk_state != SMC_INIT)
259 smc->clcsock->sk->sk_reuse = sk->sk_reuse;
260 rc = kernel_bind(smc->clcsock, uaddr, addr_len);
268 static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
271 /* options we don't get control via setsockopt for */
272 nsk->sk_type = osk->sk_type;
273 nsk->sk_sndbuf = osk->sk_sndbuf;
274 nsk->sk_rcvbuf = osk->sk_rcvbuf;
275 nsk->sk_sndtimeo = osk->sk_sndtimeo;
276 nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
277 nsk->sk_mark = osk->sk_mark;
278 nsk->sk_priority = osk->sk_priority;
279 nsk->sk_rcvlowat = osk->sk_rcvlowat;
280 nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
281 nsk->sk_err = osk->sk_err;
283 nsk->sk_flags &= ~mask;
284 nsk->sk_flags |= osk->sk_flags & mask;
287 #define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
288 (1UL << SOCK_KEEPOPEN) | \
289 (1UL << SOCK_LINGER) | \
290 (1UL << SOCK_BROADCAST) | \
291 (1UL << SOCK_TIMESTAMP) | \
292 (1UL << SOCK_DBG) | \
293 (1UL << SOCK_RCVTSTAMP) | \
294 (1UL << SOCK_RCVTSTAMPNS) | \
295 (1UL << SOCK_LOCALROUTE) | \
296 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
297 (1UL << SOCK_RXQ_OVFL) | \
298 (1UL << SOCK_WIFI_STATUS) | \
299 (1UL << SOCK_NOFCS) | \
300 (1UL << SOCK_FILTER_LOCKED) | \
301 (1UL << SOCK_TSTAMP_NEW))
302 /* copy only relevant settings and flags of SOL_SOCKET level from smc to
303 * clc socket (since smc is not called for these options from net/core)
305 static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
307 smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
310 #define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
311 (1UL << SOCK_KEEPOPEN) | \
312 (1UL << SOCK_LINGER) | \
314 /* copy only settings and flags relevant for smc from clc to smc socket */
315 static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
317 smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
320 /* register a new rmb, send confirm_rkey msg to register with peer */
321 static int smc_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc,
324 if (!rmb_desc->wr_reg) {
325 /* register memory region for new rmb */
326 if (smc_wr_reg_send(link, rmb_desc->mr_rx[SMC_SINGLE_LINK])) {
327 rmb_desc->regerr = 1;
330 rmb_desc->wr_reg = 1;
334 /* exchange confirm_rkey msg with peer */
335 if (smc_llc_do_confirm_rkey(link, rmb_desc)) {
336 rmb_desc->regerr = 1;
342 static int smc_clnt_conf_first_link(struct smc_sock *smc)
344 struct net *net = sock_net(smc->clcsock->sk);
345 struct smc_link_group *lgr = smc->conn.lgr;
346 struct smc_link *link;
350 link = &lgr->lnk[SMC_SINGLE_LINK];
351 /* receive CONFIRM LINK request from server over RoCE fabric */
352 rest = wait_for_completion_interruptible_timeout(
354 SMC_LLC_WAIT_FIRST_TIME);
356 struct smc_clc_msg_decline dclc;
358 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
359 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
360 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
363 if (link->llc_confirm_rc)
364 return SMC_CLC_DECL_RMBE_EC;
366 rc = smc_ib_modify_qp_rts(link);
368 return SMC_CLC_DECL_ERR_RDYLNK;
370 smc_wr_remember_qp_attr(link);
372 if (smc_reg_rmb(link, smc->conn.rmb_desc, false))
373 return SMC_CLC_DECL_ERR_REGRMB;
375 /* send CONFIRM LINK response over RoCE fabric */
376 rc = smc_llc_send_confirm_link(link, SMC_LLC_RESP);
378 return SMC_CLC_DECL_TIMEOUT_CL;
380 /* receive ADD LINK request from server over RoCE fabric */
381 rest = wait_for_completion_interruptible_timeout(&link->llc_add,
384 struct smc_clc_msg_decline dclc;
386 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
387 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
388 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_AL : rc;
391 /* send add link reject message, only one link supported for now */
392 rc = smc_llc_send_add_link(link,
393 link->smcibdev->mac[link->ibport - 1],
394 link->gid, SMC_LLC_RESP);
396 return SMC_CLC_DECL_TIMEOUT_AL;
398 smc_llc_link_active(link, net->ipv4.sysctl_tcp_keepalive_time);
403 static void smcr_conn_save_peer_info(struct smc_sock *smc,
404 struct smc_clc_msg_accept_confirm *clc)
406 int bufsize = smc_uncompress_bufsize(clc->rmbe_size);
408 smc->conn.peer_rmbe_idx = clc->rmbe_idx;
409 smc->conn.local_tx_ctrl.token = ntohl(clc->rmbe_alert_token);
410 smc->conn.peer_rmbe_size = bufsize;
411 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
412 smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
415 static void smcd_conn_save_peer_info(struct smc_sock *smc,
416 struct smc_clc_msg_accept_confirm *clc)
418 int bufsize = smc_uncompress_bufsize(clc->dmbe_size);
420 smc->conn.peer_rmbe_idx = clc->dmbe_idx;
421 smc->conn.peer_token = clc->token;
422 /* msg header takes up space in the buffer */
423 smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
424 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
425 smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx;
428 static void smc_conn_save_peer_info(struct smc_sock *smc,
429 struct smc_clc_msg_accept_confirm *clc)
431 if (smc->conn.lgr->is_smcd)
432 smcd_conn_save_peer_info(smc, clc);
434 smcr_conn_save_peer_info(smc, clc);
437 static void smc_link_save_peer_info(struct smc_link *link,
438 struct smc_clc_msg_accept_confirm *clc)
440 link->peer_qpn = ntoh24(clc->qpn);
441 memcpy(link->peer_gid, clc->lcl.gid, SMC_GID_SIZE);
442 memcpy(link->peer_mac, clc->lcl.mac, sizeof(link->peer_mac));
443 link->peer_psn = ntoh24(clc->psn);
444 link->peer_mtu = clc->qp_mtu;
447 /* fall back during connect */
448 static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
450 smc->use_fallback = true;
451 smc->fallback_rsn = reason_code;
452 smc_copy_sock_settings_to_clc(smc);
453 smc->connect_nonblock = 0;
454 if (smc->sk.sk_state == SMC_INIT)
455 smc->sk.sk_state = SMC_ACTIVE;
459 /* decline and fall back during connect */
460 static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code)
464 if (reason_code < 0) { /* error, fallback is not possible */
465 if (smc->sk.sk_state == SMC_INIT)
466 sock_put(&smc->sk); /* passive closing */
469 if (reason_code != SMC_CLC_DECL_PEERDECL) {
470 rc = smc_clc_send_decline(smc, reason_code);
472 if (smc->sk.sk_state == SMC_INIT)
473 sock_put(&smc->sk); /* passive closing */
477 return smc_connect_fallback(smc, reason_code);
480 /* abort connecting */
481 static int smc_connect_abort(struct smc_sock *smc, int reason_code,
484 if (local_contact == SMC_FIRST_CONTACT)
485 smc_lgr_forget(smc->conn.lgr);
486 if (smc->conn.lgr->is_smcd)
487 /* there is only one lgr role for SMC-D; use server lock */
488 mutex_unlock(&smc_server_lgr_pending);
490 mutex_unlock(&smc_client_lgr_pending);
492 smc_conn_free(&smc->conn);
493 smc->connect_nonblock = 0;
497 /* check if there is a rdma device available for this connection. */
498 /* called for connect and listen */
499 static int smc_find_rdma_device(struct smc_sock *smc, struct smc_init_info *ini)
501 /* PNET table look up: search active ib_device and port
502 * within same PNETID that also contains the ethernet device
503 * used for the internal TCP socket
505 smc_pnet_find_roce_resource(smc->clcsock->sk, ini);
507 return SMC_CLC_DECL_NOSMCRDEV;
511 /* check if there is an ISM device available for this connection. */
512 /* called for connect and listen */
513 static int smc_find_ism_device(struct smc_sock *smc, struct smc_init_info *ini)
515 /* Find ISM device with same PNETID as connecting interface */
516 smc_pnet_find_ism_resource(smc->clcsock->sk, ini);
518 return SMC_CLC_DECL_NOSMCDDEV;
522 /* Check for VLAN ID and register it on ISM device just for CLC handshake */
523 static int smc_connect_ism_vlan_setup(struct smc_sock *smc,
524 struct smc_init_info *ini)
526 if (ini->vlan_id && smc_ism_get_vlan(ini->ism_dev, ini->vlan_id))
527 return SMC_CLC_DECL_ISMVLANERR;
531 /* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is
532 * used, the VLAN ID will be registered again during the connection setup.
534 static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc, bool is_smcd,
535 struct smc_init_info *ini)
539 if (ini->vlan_id && smc_ism_put_vlan(ini->ism_dev, ini->vlan_id))
540 return SMC_CLC_DECL_CNFERR;
544 /* CLC handshake during connect */
545 static int smc_connect_clc(struct smc_sock *smc, int smc_type,
546 struct smc_clc_msg_accept_confirm *aclc,
547 struct smc_init_info *ini)
551 /* do inband token exchange */
552 rc = smc_clc_send_proposal(smc, smc_type, ini);
555 /* receive SMC Accept CLC message */
556 return smc_clc_wait_msg(smc, aclc, sizeof(*aclc), SMC_CLC_ACCEPT,
560 /* setup for RDMA connection of client */
561 static int smc_connect_rdma(struct smc_sock *smc,
562 struct smc_clc_msg_accept_confirm *aclc,
563 struct smc_init_info *ini)
565 struct smc_link *link;
568 ini->is_smcd = false;
569 ini->ib_lcl = &aclc->lcl;
570 ini->ib_clcqpn = ntoh24(aclc->qpn);
571 ini->srv_first_contact = aclc->hdr.flag;
573 mutex_lock(&smc_client_lgr_pending);
574 reason_code = smc_conn_create(smc, ini);
576 mutex_unlock(&smc_client_lgr_pending);
579 link = &smc->conn.lgr->lnk[SMC_SINGLE_LINK];
581 smc_conn_save_peer_info(smc, aclc);
583 /* create send buffer and rmb */
584 if (smc_buf_create(smc, false))
585 return smc_connect_abort(smc, SMC_CLC_DECL_MEM,
586 ini->cln_first_contact);
588 if (ini->cln_first_contact == SMC_FIRST_CONTACT)
589 smc_link_save_peer_info(link, aclc);
591 if (smc_rmb_rtoken_handling(&smc->conn, aclc))
592 return smc_connect_abort(smc, SMC_CLC_DECL_ERR_RTOK,
593 ini->cln_first_contact);
598 if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
599 if (smc_ib_ready_link(link))
600 return smc_connect_abort(smc, SMC_CLC_DECL_ERR_RDYLNK,
601 ini->cln_first_contact);
603 if (smc_reg_rmb(link, smc->conn.rmb_desc, true))
604 return smc_connect_abort(smc, SMC_CLC_DECL_ERR_REGRMB,
605 ini->cln_first_contact);
607 smc_rmb_sync_sg_for_device(&smc->conn);
609 reason_code = smc_clc_send_confirm(smc);
611 return smc_connect_abort(smc, reason_code,
612 ini->cln_first_contact);
616 if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
617 /* QP confirmation over RoCE fabric */
618 reason_code = smc_clnt_conf_first_link(smc);
620 return smc_connect_abort(smc, reason_code,
621 ini->cln_first_contact);
623 mutex_unlock(&smc_client_lgr_pending);
625 smc_copy_sock_settings_to_clc(smc);
626 smc->connect_nonblock = 0;
627 if (smc->sk.sk_state == SMC_INIT)
628 smc->sk.sk_state = SMC_ACTIVE;
633 /* setup for ISM connection of client */
634 static int smc_connect_ism(struct smc_sock *smc,
635 struct smc_clc_msg_accept_confirm *aclc,
636 struct smc_init_info *ini)
641 ini->ism_gid = aclc->gid;
642 ini->srv_first_contact = aclc->hdr.flag;
644 /* there is only one lgr role for SMC-D; use server lock */
645 mutex_lock(&smc_server_lgr_pending);
646 rc = smc_conn_create(smc, ini);
648 mutex_unlock(&smc_server_lgr_pending);
652 /* Create send and receive buffers */
653 if (smc_buf_create(smc, true))
654 return smc_connect_abort(smc, SMC_CLC_DECL_MEM,
655 ini->cln_first_contact);
657 smc_conn_save_peer_info(smc, aclc);
662 rc = smc_clc_send_confirm(smc);
664 return smc_connect_abort(smc, rc, ini->cln_first_contact);
665 mutex_unlock(&smc_server_lgr_pending);
667 smc_copy_sock_settings_to_clc(smc);
668 smc->connect_nonblock = 0;
669 if (smc->sk.sk_state == SMC_INIT)
670 smc->sk.sk_state = SMC_ACTIVE;
675 /* perform steps before actually connecting */
676 static int __smc_connect(struct smc_sock *smc)
678 bool ism_supported = false, rdma_supported = false;
679 struct smc_clc_msg_accept_confirm aclc;
680 struct smc_init_info ini = {0};
684 sock_hold(&smc->sk); /* sock put in passive closing */
686 if (smc->use_fallback)
687 return smc_connect_fallback(smc, smc->fallback_rsn);
689 /* if peer has not signalled SMC-capability, fall back */
690 if (!tcp_sk(smc->clcsock->sk)->syn_smc)
691 return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC);
693 /* IPSec connections opt out of SMC-R optimizations */
694 if (using_ipsec(smc))
695 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC);
697 /* get vlan id from IP device */
698 if (smc_vlan_by_tcpsk(smc->clcsock, &ini))
699 return smc_connect_decline_fallback(smc,
700 SMC_CLC_DECL_GETVLANERR);
702 /* check if there is an ism device available */
703 if (!smc_find_ism_device(smc, &ini) &&
704 !smc_connect_ism_vlan_setup(smc, &ini)) {
705 /* ISM is supported for this connection */
706 ism_supported = true;
707 smc_type = SMC_TYPE_D;
710 /* check if there is a rdma device available */
711 if (!smc_find_rdma_device(smc, &ini)) {
712 /* RDMA is supported for this connection */
713 rdma_supported = true;
715 smc_type = SMC_TYPE_B; /* both */
717 smc_type = SMC_TYPE_R; /* only RDMA */
720 /* if neither ISM nor RDMA are supported, fallback */
721 if (!rdma_supported && !ism_supported)
722 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_NOSMCDEV);
724 /* perform CLC handshake */
725 rc = smc_connect_clc(smc, smc_type, &aclc, &ini);
727 smc_connect_ism_vlan_cleanup(smc, ism_supported, &ini);
728 return smc_connect_decline_fallback(smc, rc);
731 /* depending on previous steps, connect using rdma or ism */
732 if (rdma_supported && aclc.hdr.path == SMC_TYPE_R)
733 rc = smc_connect_rdma(smc, &aclc, &ini);
734 else if (ism_supported && aclc.hdr.path == SMC_TYPE_D)
735 rc = smc_connect_ism(smc, &aclc, &ini);
737 rc = SMC_CLC_DECL_MODEUNSUPP;
739 smc_connect_ism_vlan_cleanup(smc, ism_supported, &ini);
740 return smc_connect_decline_fallback(smc, rc);
743 smc_connect_ism_vlan_cleanup(smc, ism_supported, &ini);
747 static void smc_connect_work(struct work_struct *work)
749 struct smc_sock *smc = container_of(work, struct smc_sock,
751 long timeo = smc->sk.sk_sndtimeo;
755 timeo = MAX_SCHEDULE_TIMEOUT;
756 lock_sock(smc->clcsock->sk);
757 if (smc->clcsock->sk->sk_err) {
758 smc->sk.sk_err = smc->clcsock->sk->sk_err;
759 } else if ((1 << smc->clcsock->sk->sk_state) &
760 (TCPF_SYN_SENT | TCP_SYN_RECV)) {
761 rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo);
762 if ((rc == -EPIPE) &&
763 ((1 << smc->clcsock->sk->sk_state) &
764 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)))
767 release_sock(smc->clcsock->sk);
769 if (rc != 0 || smc->sk.sk_err) {
770 smc->sk.sk_state = SMC_CLOSED;
771 if (rc == -EPIPE || rc == -EAGAIN)
772 smc->sk.sk_err = EPIPE;
773 else if (signal_pending(current))
774 smc->sk.sk_err = -sock_intr_errno(timeo);
778 rc = __smc_connect(smc);
780 smc->sk.sk_err = -rc;
784 smc->sk.sk_state_change(&smc->sk);
786 smc->sk.sk_write_space(&smc->sk);
787 release_sock(&smc->sk);
790 static int smc_connect(struct socket *sock, struct sockaddr *addr,
793 struct sock *sk = sock->sk;
794 struct smc_sock *smc;
799 /* separate smc parameter checking to be safe */
800 if (alen < sizeof(addr->sa_family))
802 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
806 switch (sk->sk_state) {
817 smc_copy_sock_settings_to_clc(smc);
818 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
819 if (smc->connect_nonblock) {
823 rc = kernel_connect(smc->clcsock, addr, alen, flags);
824 if (rc && rc != -EINPROGRESS)
826 if (flags & O_NONBLOCK) {
827 if (schedule_work(&smc->connect_work))
828 smc->connect_nonblock = 1;
831 rc = __smc_connect(smc);
835 rc = 0; /* success cases including fallback */
844 static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
846 struct socket *new_clcsock = NULL;
847 struct sock *lsk = &lsmc->sk;
852 new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
855 lsk->sk_err = ENOMEM;
860 *new_smc = smc_sk(new_sk);
862 mutex_lock(&lsmc->clcsock_release_lock);
864 rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
865 mutex_unlock(&lsmc->clcsock_release_lock);
869 if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
871 sock_release(new_clcsock);
872 new_sk->sk_state = SMC_CLOSED;
873 sock_set_flag(new_sk, SOCK_DEAD);
874 new_sk->sk_prot->unhash(new_sk);
875 sock_put(new_sk); /* final */
880 (*new_smc)->clcsock = new_clcsock;
885 /* add a just created sock to the accept queue of the listen sock as
886 * candidate for a following socket accept call from user space
888 static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
890 struct smc_sock *par = smc_sk(parent);
892 sock_hold(sk); /* sock_put in smc_accept_unlink () */
893 spin_lock(&par->accept_q_lock);
894 list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
895 spin_unlock(&par->accept_q_lock);
896 sk_acceptq_added(parent);
899 /* remove a socket from the accept queue of its parental listening socket */
900 static void smc_accept_unlink(struct sock *sk)
902 struct smc_sock *par = smc_sk(sk)->listen_smc;
904 spin_lock(&par->accept_q_lock);
905 list_del_init(&smc_sk(sk)->accept_q);
906 spin_unlock(&par->accept_q_lock);
907 sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
908 sock_put(sk); /* sock_hold in smc_accept_enqueue */
911 /* remove a sock from the accept queue to bind it to a new socket created
912 * for a socket accept call from user space
914 struct sock *smc_accept_dequeue(struct sock *parent,
915 struct socket *new_sock)
917 struct smc_sock *isk, *n;
920 list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) {
921 new_sk = (struct sock *)isk;
923 smc_accept_unlink(new_sk);
924 if (new_sk->sk_state == SMC_CLOSED) {
926 sock_release(isk->clcsock);
929 new_sk->sk_prot->unhash(new_sk);
930 sock_put(new_sk); /* final */
934 sock_graft(new_sk, new_sock);
940 /* clean up for a created but never accepted sock */
941 void smc_close_non_accepted(struct sock *sk)
943 struct smc_sock *smc = smc_sk(sk);
946 if (!sk->sk_lingertime)
947 /* wait for peer closing */
948 sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
949 if (!smc->use_fallback) {
950 smc_close_active(smc);
951 sock_set_flag(sk, SOCK_DEAD);
952 sk->sk_shutdown |= SHUTDOWN_MASK;
961 if (smc->use_fallback) {
962 sock_put(sk); /* passive closing */
963 sk->sk_state = SMC_CLOSED;
965 if (sk->sk_state == SMC_CLOSED)
966 smc_conn_free(&smc->conn);
969 sk->sk_prot->unhash(sk);
970 sock_put(sk); /* final sock_put */
973 static int smc_serv_conf_first_link(struct smc_sock *smc)
975 struct net *net = sock_net(smc->clcsock->sk);
976 struct smc_link_group *lgr = smc->conn.lgr;
977 struct smc_link *link;
981 link = &lgr->lnk[SMC_SINGLE_LINK];
983 if (smc_reg_rmb(link, smc->conn.rmb_desc, false))
984 return SMC_CLC_DECL_ERR_REGRMB;
986 /* send CONFIRM LINK request to client over the RoCE fabric */
987 rc = smc_llc_send_confirm_link(link, SMC_LLC_REQ);
989 return SMC_CLC_DECL_TIMEOUT_CL;
991 /* receive CONFIRM LINK response from client over the RoCE fabric */
992 rest = wait_for_completion_interruptible_timeout(
993 &link->llc_confirm_resp,
994 SMC_LLC_WAIT_FIRST_TIME);
996 struct smc_clc_msg_decline dclc;
998 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
999 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
1000 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
1003 if (link->llc_confirm_resp_rc)
1004 return SMC_CLC_DECL_RMBE_EC;
1006 /* send ADD LINK request to client over the RoCE fabric */
1007 rc = smc_llc_send_add_link(link,
1008 link->smcibdev->mac[link->ibport - 1],
1009 link->gid, SMC_LLC_REQ);
1011 return SMC_CLC_DECL_TIMEOUT_AL;
1013 /* receive ADD LINK response from client over the RoCE fabric */
1014 rest = wait_for_completion_interruptible_timeout(&link->llc_add_resp,
1017 struct smc_clc_msg_decline dclc;
1019 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
1020 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
1021 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_AL : rc;
1024 smc_llc_link_active(link, net->ipv4.sysctl_tcp_keepalive_time);
1029 /* listen worker: finish */
1030 static void smc_listen_out(struct smc_sock *new_smc)
1032 struct smc_sock *lsmc = new_smc->listen_smc;
1033 struct sock *newsmcsk = &new_smc->sk;
1035 lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
1036 if (lsmc->sk.sk_state == SMC_LISTEN) {
1037 smc_accept_enqueue(&lsmc->sk, newsmcsk);
1038 } else { /* no longer listening */
1039 smc_close_non_accepted(newsmcsk);
1041 release_sock(&lsmc->sk);
1043 /* Wake up accept */
1044 lsmc->sk.sk_data_ready(&lsmc->sk);
1045 sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */
1048 /* listen worker: finish in state connected */
1049 static void smc_listen_out_connected(struct smc_sock *new_smc)
1051 struct sock *newsmcsk = &new_smc->sk;
1053 sk_refcnt_debug_inc(newsmcsk);
1054 if (newsmcsk->sk_state == SMC_INIT)
1055 newsmcsk->sk_state = SMC_ACTIVE;
1057 smc_listen_out(new_smc);
1060 /* listen worker: finish in error state */
1061 static void smc_listen_out_err(struct smc_sock *new_smc)
1063 struct sock *newsmcsk = &new_smc->sk;
1065 if (newsmcsk->sk_state == SMC_INIT)
1066 sock_put(&new_smc->sk); /* passive closing */
1067 newsmcsk->sk_state = SMC_CLOSED;
1068 smc_conn_free(&new_smc->conn);
1070 smc_listen_out(new_smc);
1073 /* listen worker: decline and fall back if possible */
1074 static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
1077 /* RDMA setup failed, switch back to TCP */
1078 if (local_contact == SMC_FIRST_CONTACT)
1079 smc_lgr_forget(new_smc->conn.lgr);
1080 if (reason_code < 0) { /* error, no fallback possible */
1081 smc_listen_out_err(new_smc);
1084 smc_conn_free(&new_smc->conn);
1085 new_smc->use_fallback = true;
1086 new_smc->fallback_rsn = reason_code;
1087 if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
1088 if (smc_clc_send_decline(new_smc, reason_code) < 0) {
1089 smc_listen_out_err(new_smc);
1093 smc_listen_out_connected(new_smc);
1096 /* listen worker: check prefixes */
1097 static int smc_listen_prfx_check(struct smc_sock *new_smc,
1098 struct smc_clc_msg_proposal *pclc)
1100 struct smc_clc_msg_proposal_prefix *pclc_prfx;
1101 struct socket *newclcsock = new_smc->clcsock;
1103 pclc_prfx = smc_clc_proposal_get_prefix(pclc);
1104 if (smc_clc_prfx_match(newclcsock, pclc_prfx))
1105 return SMC_CLC_DECL_DIFFPREFIX;
1110 /* listen worker: initialize connection and buffers */
1111 static int smc_listen_rdma_init(struct smc_sock *new_smc,
1112 struct smc_init_info *ini)
1116 /* allocate connection / link group */
1117 rc = smc_conn_create(new_smc, ini);
1121 /* create send buffer and rmb */
1122 if (smc_buf_create(new_smc, false))
1123 return SMC_CLC_DECL_MEM;
1128 /* listen worker: initialize connection and buffers for SMC-D */
1129 static int smc_listen_ism_init(struct smc_sock *new_smc,
1130 struct smc_clc_msg_proposal *pclc,
1131 struct smc_init_info *ini)
1133 struct smc_clc_msg_smcd *pclc_smcd;
1136 pclc_smcd = smc_get_clc_msg_smcd(pclc);
1137 ini->ism_gid = pclc_smcd->gid;
1138 rc = smc_conn_create(new_smc, ini);
1142 /* Check if peer can be reached via ISM device */
1143 if (smc_ism_cantalk(new_smc->conn.lgr->peer_gid,
1144 new_smc->conn.lgr->vlan_id,
1145 new_smc->conn.lgr->smcd)) {
1146 if (ini->cln_first_contact == SMC_FIRST_CONTACT)
1147 smc_lgr_forget(new_smc->conn.lgr);
1148 smc_conn_free(&new_smc->conn);
1149 return SMC_CLC_DECL_SMCDNOTALK;
1152 /* Create send and receive buffers */
1153 if (smc_buf_create(new_smc, true)) {
1154 if (ini->cln_first_contact == SMC_FIRST_CONTACT)
1155 smc_lgr_forget(new_smc->conn.lgr);
1156 smc_conn_free(&new_smc->conn);
1157 return SMC_CLC_DECL_MEM;
1163 /* listen worker: register buffers */
1164 static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact)
1166 struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
1168 if (local_contact != SMC_FIRST_CONTACT) {
1169 if (smc_reg_rmb(link, new_smc->conn.rmb_desc, true))
1170 return SMC_CLC_DECL_ERR_REGRMB;
1172 smc_rmb_sync_sg_for_device(&new_smc->conn);
1177 /* listen worker: finish RDMA setup */
1178 static int smc_listen_rdma_finish(struct smc_sock *new_smc,
1179 struct smc_clc_msg_accept_confirm *cclc,
1182 struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
1183 int reason_code = 0;
1185 if (local_contact == SMC_FIRST_CONTACT)
1186 smc_link_save_peer_info(link, cclc);
1188 if (smc_rmb_rtoken_handling(&new_smc->conn, cclc)) {
1189 reason_code = SMC_CLC_DECL_ERR_RTOK;
1193 if (local_contact == SMC_FIRST_CONTACT) {
1194 if (smc_ib_ready_link(link)) {
1195 reason_code = SMC_CLC_DECL_ERR_RDYLNK;
1198 /* QP confirmation over RoCE fabric */
1199 reason_code = smc_serv_conf_first_link(new_smc);
1206 smc_listen_decline(new_smc, reason_code, local_contact);
1210 /* setup for RDMA connection of server */
1211 static void smc_listen_work(struct work_struct *work)
1213 struct smc_sock *new_smc = container_of(work, struct smc_sock,
1215 struct socket *newclcsock = new_smc->clcsock;
1216 struct smc_clc_msg_accept_confirm cclc;
1217 struct smc_clc_msg_proposal *pclc;
1218 struct smc_init_info ini = {0};
1219 bool ism_supported = false;
1220 u8 buf[SMC_CLC_MAX_LEN];
1223 if (new_smc->use_fallback) {
1224 smc_listen_out_connected(new_smc);
1228 /* check if peer is smc capable */
1229 if (!tcp_sk(newclcsock->sk)->syn_smc) {
1230 new_smc->use_fallback = true;
1231 new_smc->fallback_rsn = SMC_CLC_DECL_PEERNOSMC;
1232 smc_listen_out_connected(new_smc);
1236 /* do inband token exchange -
1237 * wait for and receive SMC Proposal CLC message
1239 pclc = (struct smc_clc_msg_proposal *)&buf;
1240 rc = smc_clc_wait_msg(new_smc, pclc, SMC_CLC_MAX_LEN,
1241 SMC_CLC_PROPOSAL, CLC_WAIT_TIME);
1245 /* IPSec connections opt out of SMC-R optimizations */
1246 if (using_ipsec(new_smc)) {
1247 rc = SMC_CLC_DECL_IPSEC;
1251 /* check for matching IP prefix and subnet length */
1252 rc = smc_listen_prfx_check(new_smc, pclc);
1256 /* get vlan id from IP device */
1257 if (smc_vlan_by_tcpsk(new_smc->clcsock, &ini)) {
1258 rc = SMC_CLC_DECL_GETVLANERR;
1262 mutex_lock(&smc_server_lgr_pending);
1263 smc_close_init(new_smc);
1264 smc_rx_init(new_smc);
1265 smc_tx_init(new_smc);
1267 /* check if ISM is available */
1268 if (pclc->hdr.path == SMC_TYPE_D || pclc->hdr.path == SMC_TYPE_B) {
1269 ini.is_smcd = true; /* prepare ISM check */
1270 rc = smc_find_ism_device(new_smc, &ini);
1272 rc = smc_listen_ism_init(new_smc, pclc, &ini);
1274 ism_supported = true;
1275 else if (pclc->hdr.path == SMC_TYPE_D)
1276 goto out_unlock; /* skip RDMA and decline */
1279 /* check if RDMA is available */
1280 if (!ism_supported) { /* SMC_TYPE_R or SMC_TYPE_B */
1281 /* prepare RDMA check */
1282 memset(&ini, 0, sizeof(ini));
1283 ini.is_smcd = false;
1284 ini.ib_lcl = &pclc->lcl;
1285 rc = smc_find_rdma_device(new_smc, &ini);
1287 /* no RDMA device found */
1288 if (pclc->hdr.path == SMC_TYPE_B)
1289 /* neither ISM nor RDMA device found */
1290 rc = SMC_CLC_DECL_NOSMCDEV;
1293 rc = smc_listen_rdma_init(new_smc, &ini);
1296 rc = smc_listen_rdma_reg(new_smc, ini.cln_first_contact);
1301 /* send SMC Accept CLC message */
1302 rc = smc_clc_send_accept(new_smc, ini.cln_first_contact);
1306 /* SMC-D does not need this lock any more */
1308 mutex_unlock(&smc_server_lgr_pending);
1310 /* receive SMC Confirm CLC message */
1311 rc = smc_clc_wait_msg(new_smc, &cclc, sizeof(cclc),
1312 SMC_CLC_CONFIRM, CLC_WAIT_TIME);
1320 if (!ism_supported) {
1321 rc = smc_listen_rdma_finish(new_smc, &cclc,
1322 ini.cln_first_contact);
1323 mutex_unlock(&smc_server_lgr_pending);
1327 smc_conn_save_peer_info(new_smc, &cclc);
1328 smc_listen_out_connected(new_smc);
1332 mutex_unlock(&smc_server_lgr_pending);
1334 smc_listen_decline(new_smc, rc, ini.cln_first_contact);
1337 static void smc_tcp_listen_work(struct work_struct *work)
1339 struct smc_sock *lsmc = container_of(work, struct smc_sock,
1341 struct sock *lsk = &lsmc->sk;
1342 struct smc_sock *new_smc;
1346 while (lsk->sk_state == SMC_LISTEN) {
1347 rc = smc_clcsock_accept(lsmc, &new_smc);
1353 new_smc->listen_smc = lsmc;
1354 new_smc->use_fallback = lsmc->use_fallback;
1355 new_smc->fallback_rsn = lsmc->fallback_rsn;
1356 sock_hold(lsk); /* sock_put in smc_listen_work */
1357 INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
1358 smc_copy_sock_settings_to_smc(new_smc);
1359 new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf;
1360 new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
1361 sock_hold(&new_smc->sk); /* sock_put in passive closing */
1362 if (!schedule_work(&new_smc->smc_listen_work))
1363 sock_put(&new_smc->sk);
1368 sock_put(&lsmc->sk); /* sock_hold in smc_listen */
1371 static int smc_listen(struct socket *sock, int backlog)
1373 struct sock *sk = sock->sk;
1374 struct smc_sock *smc;
1381 if ((sk->sk_state != SMC_INIT) && (sk->sk_state != SMC_LISTEN))
1385 if (sk->sk_state == SMC_LISTEN) {
1386 sk->sk_max_ack_backlog = backlog;
1389 /* some socket options are handled in core, so we could not apply
1390 * them to the clc socket -- copy smc socket options to clc socket
1392 smc_copy_sock_settings_to_clc(smc);
1393 if (!smc->use_fallback)
1394 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
1396 rc = kernel_listen(smc->clcsock, backlog);
1399 sk->sk_max_ack_backlog = backlog;
1400 sk->sk_ack_backlog = 0;
1401 sk->sk_state = SMC_LISTEN;
1402 sock_hold(sk); /* sock_hold in tcp_listen_worker */
1403 if (!schedule_work(&smc->tcp_listen_work))
1411 static int smc_accept(struct socket *sock, struct socket *new_sock,
1412 int flags, bool kern)
1414 struct sock *sk = sock->sk, *nsk;
1415 DECLARE_WAITQUEUE(wait, current);
1416 struct smc_sock *lsmc;
1421 sock_hold(sk); /* sock_put below */
1424 if (lsmc->sk.sk_state != SMC_LISTEN) {
1430 /* Wait for an incoming connection */
1431 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1432 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1433 while (!(nsk = smc_accept_dequeue(sk, new_sock))) {
1434 set_current_state(TASK_INTERRUPTIBLE);
1440 timeo = schedule_timeout(timeo);
1441 /* wakeup by sk_data_ready in smc_listen_work() */
1442 sched_annotate_sleep();
1444 if (signal_pending(current)) {
1445 rc = sock_intr_errno(timeo);
1449 set_current_state(TASK_RUNNING);
1450 remove_wait_queue(sk_sleep(sk), &wait);
1453 rc = sock_error(nsk);
1458 if (lsmc->sockopt_defer_accept && !(flags & O_NONBLOCK)) {
1459 /* wait till data arrives on the socket */
1460 timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept *
1462 if (smc_sk(nsk)->use_fallback) {
1463 struct sock *clcsk = smc_sk(nsk)->clcsock->sk;
1466 if (skb_queue_empty(&clcsk->sk_receive_queue))
1467 sk_wait_data(clcsk, &timeo, NULL);
1468 release_sock(clcsk);
1469 } else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
1471 smc_rx_wait(smc_sk(nsk), &timeo, smc_rx_data_available);
1477 sock_put(sk); /* sock_hold above */
1481 static int smc_getname(struct socket *sock, struct sockaddr *addr,
1484 struct smc_sock *smc;
1486 if (peer && (sock->sk->sk_state != SMC_ACTIVE) &&
1487 (sock->sk->sk_state != SMC_APPCLOSEWAIT1))
1490 smc = smc_sk(sock->sk);
1492 return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
1495 static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1497 struct sock *sk = sock->sk;
1498 struct smc_sock *smc;
1503 if ((sk->sk_state != SMC_ACTIVE) &&
1504 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
1505 (sk->sk_state != SMC_INIT))
1508 if (msg->msg_flags & MSG_FASTOPEN) {
1509 if (sk->sk_state == SMC_INIT) {
1510 smc->use_fallback = true;
1511 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
1518 if (smc->use_fallback)
1519 rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
1521 rc = smc_tx_sendmsg(smc, msg, len);
1527 static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1530 struct sock *sk = sock->sk;
1531 struct smc_sock *smc;
1536 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
1537 /* socket was connected before, no more data to read */
1541 if ((sk->sk_state == SMC_INIT) ||
1542 (sk->sk_state == SMC_LISTEN) ||
1543 (sk->sk_state == SMC_CLOSED))
1546 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
1551 if (smc->use_fallback) {
1552 rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);
1554 msg->msg_namelen = 0;
1555 rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
1563 static __poll_t smc_accept_poll(struct sock *parent)
1565 struct smc_sock *isk = smc_sk(parent);
1568 spin_lock(&isk->accept_q_lock);
1569 if (!list_empty(&isk->accept_q))
1570 mask = EPOLLIN | EPOLLRDNORM;
1571 spin_unlock(&isk->accept_q_lock);
1576 static __poll_t smc_poll(struct file *file, struct socket *sock,
1579 struct sock *sk = sock->sk;
1580 struct smc_sock *smc;
1586 smc = smc_sk(sock->sk);
1587 if (smc->use_fallback) {
1588 /* delegate to CLC child sock */
1589 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
1590 sk->sk_err = smc->clcsock->sk->sk_err;
1592 if (sk->sk_state != SMC_CLOSED)
1593 sock_poll_wait(file, sock, wait);
1596 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
1597 (sk->sk_state == SMC_CLOSED))
1599 if (sk->sk_state == SMC_LISTEN) {
1600 /* woken up by sk_data_ready in smc_listen_work() */
1601 mask |= smc_accept_poll(sk);
1602 } else if (smc->use_fallback) { /* as result of connect_work()*/
1603 mask |= smc->clcsock->ops->poll(file, smc->clcsock,
1605 sk->sk_err = smc->clcsock->sk->sk_err;
1607 if ((sk->sk_state != SMC_INIT &&
1608 atomic_read(&smc->conn.sndbuf_space)) ||
1609 sk->sk_shutdown & SEND_SHUTDOWN) {
1610 mask |= EPOLLOUT | EPOLLWRNORM;
1612 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1613 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1615 if (atomic_read(&smc->conn.bytes_to_rcv))
1616 mask |= EPOLLIN | EPOLLRDNORM;
1617 if (sk->sk_shutdown & RCV_SHUTDOWN)
1618 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
1619 if (sk->sk_state == SMC_APPCLOSEWAIT1)
1621 if (smc->conn.urg_state == SMC_URG_VALID)
1629 static int smc_shutdown(struct socket *sock, int how)
1631 struct sock *sk = sock->sk;
1632 struct smc_sock *smc;
1638 if ((how < SHUT_RD) || (how > SHUT_RDWR))
1644 if ((sk->sk_state != SMC_ACTIVE) &&
1645 (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
1646 (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
1647 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
1648 (sk->sk_state != SMC_APPCLOSEWAIT2) &&
1649 (sk->sk_state != SMC_APPFINCLOSEWAIT))
1651 if (smc->use_fallback) {
1652 rc = kernel_sock_shutdown(smc->clcsock, how);
1653 sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
1654 if (sk->sk_shutdown == SHUTDOWN_MASK)
1655 sk->sk_state = SMC_CLOSED;
1659 case SHUT_RDWR: /* shutdown in both directions */
1660 rc = smc_close_active(smc);
1663 rc = smc_close_shutdown_write(smc);
1667 /* nothing more to do because peer is not involved */
1671 rc1 = kernel_sock_shutdown(smc->clcsock, how);
1672 /* map sock_shutdown_cmd constants to sk_shutdown value range */
1673 sk->sk_shutdown |= how + 1;
1677 return rc ? rc : rc1;
1680 static int smc_setsockopt(struct socket *sock, int level, int optname,
1681 char __user *optval, unsigned int optlen)
1683 struct sock *sk = sock->sk;
1684 struct smc_sock *smc;
1689 /* generic setsockopts reaching us here always apply to the
1692 rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
1694 if (smc->clcsock->sk->sk_err) {
1695 sk->sk_err = smc->clcsock->sk->sk_err;
1696 sk->sk_error_report(sk);
1701 if (optlen < sizeof(int))
1703 if (get_user(val, (int __user *)optval))
1710 case TCP_FASTOPEN_CONNECT:
1711 case TCP_FASTOPEN_KEY:
1712 case TCP_FASTOPEN_NO_COOKIE:
1713 /* option not supported by SMC */
1714 if (sk->sk_state == SMC_INIT) {
1715 smc->use_fallback = true;
1716 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
1718 if (!smc->use_fallback)
1723 if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) {
1724 if (val && !smc->use_fallback)
1725 mod_delayed_work(system_wq, &smc->conn.tx_work,
1730 if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) {
1731 if (!val && !smc->use_fallback)
1732 mod_delayed_work(system_wq, &smc->conn.tx_work,
1736 case TCP_DEFER_ACCEPT:
1737 smc->sockopt_defer_accept = val;
1747 static int smc_getsockopt(struct socket *sock, int level, int optname,
1748 char __user *optval, int __user *optlen)
1750 struct smc_sock *smc;
1752 smc = smc_sk(sock->sk);
1753 /* socket options apply to the CLC socket */
1754 return smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
1758 static int smc_ioctl(struct socket *sock, unsigned int cmd,
1761 union smc_host_cursor cons, urg;
1762 struct smc_connection *conn;
1763 struct smc_sock *smc;
1766 smc = smc_sk(sock->sk);
1768 lock_sock(&smc->sk);
1769 if (smc->use_fallback) {
1770 if (!smc->clcsock) {
1771 release_sock(&smc->sk);
1774 answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
1775 release_sock(&smc->sk);
1779 case SIOCINQ: /* same as FIONREAD */
1780 if (smc->sk.sk_state == SMC_LISTEN) {
1781 release_sock(&smc->sk);
1784 if (smc->sk.sk_state == SMC_INIT ||
1785 smc->sk.sk_state == SMC_CLOSED)
1788 answ = atomic_read(&smc->conn.bytes_to_rcv);
1791 /* output queue size (not send + not acked) */
1792 if (smc->sk.sk_state == SMC_LISTEN) {
1793 release_sock(&smc->sk);
1796 if (smc->sk.sk_state == SMC_INIT ||
1797 smc->sk.sk_state == SMC_CLOSED)
1800 answ = smc->conn.sndbuf_desc->len -
1801 atomic_read(&smc->conn.sndbuf_space);
1804 /* output queue size (not send only) */
1805 if (smc->sk.sk_state == SMC_LISTEN) {
1806 release_sock(&smc->sk);
1809 if (smc->sk.sk_state == SMC_INIT ||
1810 smc->sk.sk_state == SMC_CLOSED)
1813 answ = smc_tx_prepared_sends(&smc->conn);
1816 if (smc->sk.sk_state == SMC_LISTEN) {
1817 release_sock(&smc->sk);
1820 if (smc->sk.sk_state == SMC_INIT ||
1821 smc->sk.sk_state == SMC_CLOSED) {
1824 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
1825 smc_curs_copy(&urg, &conn->urg_curs, conn);
1826 answ = smc_curs_diff(conn->rmb_desc->len,
1831 release_sock(&smc->sk);
1832 return -ENOIOCTLCMD;
1834 release_sock(&smc->sk);
1836 return put_user(answ, (int __user *)arg);
1839 static ssize_t smc_sendpage(struct socket *sock, struct page *page,
1840 int offset, size_t size, int flags)
1842 struct sock *sk = sock->sk;
1843 struct smc_sock *smc;
1848 if (sk->sk_state != SMC_ACTIVE) {
1853 if (smc->use_fallback)
1854 rc = kernel_sendpage(smc->clcsock, page, offset,
1857 rc = sock_no_sendpage(sock, page, offset, size, flags);
1863 /* Map the affected portions of the rmbe into an spd, note the number of bytes
1864 * to splice in conn->splice_pending, and press 'go'. Delays consumer cursor
1865 * updates till whenever a respective page has been fully processed.
1866 * Note that subsequent recv() calls have to wait till all splice() processing
1869 static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
1870 struct pipe_inode_info *pipe, size_t len,
1873 struct sock *sk = sock->sk;
1874 struct smc_sock *smc;
1879 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
1880 /* socket was connected before, no more data to read */
1884 if (sk->sk_state == SMC_INIT ||
1885 sk->sk_state == SMC_LISTEN ||
1886 sk->sk_state == SMC_CLOSED)
1889 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
1894 if (smc->use_fallback) {
1895 rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos,
1902 if (flags & SPLICE_F_NONBLOCK)
1903 flags = MSG_DONTWAIT;
1906 rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
1914 /* must look like tcp */
1915 static const struct proto_ops smc_sock_ops = {
1917 .owner = THIS_MODULE,
1918 .release = smc_release,
1920 .connect = smc_connect,
1921 .socketpair = sock_no_socketpair,
1922 .accept = smc_accept,
1923 .getname = smc_getname,
1926 .listen = smc_listen,
1927 .shutdown = smc_shutdown,
1928 .setsockopt = smc_setsockopt,
1929 .getsockopt = smc_getsockopt,
1930 .sendmsg = smc_sendmsg,
1931 .recvmsg = smc_recvmsg,
1932 .mmap = sock_no_mmap,
1933 .sendpage = smc_sendpage,
1934 .splice_read = smc_splice_read,
1937 static int smc_create(struct net *net, struct socket *sock, int protocol,
1940 int family = (protocol == SMCPROTO_SMC6) ? PF_INET6 : PF_INET;
1941 struct smc_sock *smc;
1945 rc = -ESOCKTNOSUPPORT;
1946 if (sock->type != SOCK_STREAM)
1949 rc = -EPROTONOSUPPORT;
1950 if (protocol != SMCPROTO_SMC && protocol != SMCPROTO_SMC6)
1954 sock->ops = &smc_sock_ops;
1955 sk = smc_sock_alloc(net, sock, protocol);
1959 /* create internal TCP socket for CLC handshake and fallback */
1961 smc->use_fallback = false; /* assume rdma capability first */
1962 smc->fallback_rsn = 0;
1963 rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
1966 sk_common_release(sk);
1969 smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
1970 smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
1976 static const struct net_proto_family smc_sock_family_ops = {
1978 .owner = THIS_MODULE,
1979 .create = smc_create,
1982 unsigned int smc_net_id;
1984 static __net_init int smc_net_init(struct net *net)
1986 return smc_pnet_net_init(net);
1989 static void __net_exit smc_net_exit(struct net *net)
1991 smc_pnet_net_exit(net);
1994 static struct pernet_operations smc_net_ops = {
1995 .init = smc_net_init,
1996 .exit = smc_net_exit,
1998 .size = sizeof(struct smc_net),
2001 static int __init smc_init(void)
2005 rc = register_pernet_subsys(&smc_net_ops);
2009 rc = smc_pnet_init();
2013 rc = smc_llc_init();
2015 pr_err("%s: smc_llc_init fails with %d\n", __func__, rc);
2019 rc = smc_cdc_init();
2021 pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc);
2025 rc = proto_register(&smc_proto, 1);
2027 pr_err("%s: proto_register(v4) fails with %d\n", __func__, rc);
2031 rc = proto_register(&smc_proto6, 1);
2033 pr_err("%s: proto_register(v6) fails with %d\n", __func__, rc);
2037 rc = sock_register(&smc_sock_family_ops);
2039 pr_err("%s: sock_register fails with %d\n", __func__, rc);
2042 INIT_HLIST_HEAD(&smc_v4_hashinfo.ht);
2043 INIT_HLIST_HEAD(&smc_v6_hashinfo.ht);
2045 rc = smc_ib_register_client();
2047 pr_err("%s: ib_register fails with %d\n", __func__, rc);
2051 static_branch_enable(&tcp_have_smc);
2055 sock_unregister(PF_SMC);
2057 proto_unregister(&smc_proto6);
2059 proto_unregister(&smc_proto);
2065 static void __exit smc_exit(void)
2068 static_branch_disable(&tcp_have_smc);
2069 smc_ib_unregister_client();
2070 sock_unregister(PF_SMC);
2071 proto_unregister(&smc_proto6);
2072 proto_unregister(&smc_proto);
2074 unregister_pernet_subsys(&smc_net_ops);
2077 module_init(smc_init);
2078 module_exit(smc_exit);
2080 MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>");
2081 MODULE_DESCRIPTION("smc socket address family");
2082 MODULE_LICENSE("GPL");
2083 MODULE_ALIAS_NETPROTO(PF_SMC);