1 // SPDX-License-Identifier: GPL-2.0-only
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * AF_SMC protocol family socket handler keeping the AF_INET sock address type
6 * applies to SOCK_STREAM sockets only
7 * offers an alternative communication option for TCP-protocol sockets
8 * applicable with RoCE-cards only
10 * Initial restrictions:
11 * - support for alternate links postponed
13 * Copyright IBM Corp. 2016, 2018
15 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
16 * based on prototype from Frank Blaschka
19 #define KMSG_COMPONENT "smc"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 #include <linux/module.h>
23 #include <linux/socket.h>
24 #include <linux/workqueue.h>
26 #include <linux/sched/signal.h>
27 #include <linux/if_vlan.h>
32 #include <asm/ioctls.h>
34 #include <net/net_namespace.h>
35 #include <net/netns/generic.h>
36 #include "smc_netns.h"
48 #include "smc_close.h"
50 static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group
53 static DEFINE_MUTEX(smc_client_lgr_pending); /* serialize link group
57 static void smc_tcp_listen_work(struct work_struct *);
58 static void smc_connect_work(struct work_struct *);
60 static void smc_set_keepalive(struct sock *sk, int val)
62 struct smc_sock *smc = smc_sk(sk);
64 smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
67 static struct smc_hashinfo smc_v4_hashinfo = {
68 .lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
71 static struct smc_hashinfo smc_v6_hashinfo = {
72 .lock = __RW_LOCK_UNLOCKED(smc_v6_hashinfo.lock),
75 int smc_hash_sk(struct sock *sk)
77 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
78 struct hlist_head *head;
82 write_lock_bh(&h->lock);
83 sk_add_node(sk, head);
84 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
85 write_unlock_bh(&h->lock);
89 EXPORT_SYMBOL_GPL(smc_hash_sk);
91 void smc_unhash_sk(struct sock *sk)
93 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
95 write_lock_bh(&h->lock);
96 if (sk_del_node_init(sk))
97 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
98 write_unlock_bh(&h->lock);
100 EXPORT_SYMBOL_GPL(smc_unhash_sk);
102 struct proto smc_proto = {
104 .owner = THIS_MODULE,
105 .keepalive = smc_set_keepalive,
107 .unhash = smc_unhash_sk,
108 .obj_size = sizeof(struct smc_sock),
109 .h.smc_hash = &smc_v4_hashinfo,
110 .slab_flags = SLAB_TYPESAFE_BY_RCU,
112 EXPORT_SYMBOL_GPL(smc_proto);
114 struct proto smc_proto6 = {
116 .owner = THIS_MODULE,
117 .keepalive = smc_set_keepalive,
119 .unhash = smc_unhash_sk,
120 .obj_size = sizeof(struct smc_sock),
121 .h.smc_hash = &smc_v6_hashinfo,
122 .slab_flags = SLAB_TYPESAFE_BY_RCU,
124 EXPORT_SYMBOL_GPL(smc_proto6);
126 static void smc_restore_fallback_changes(struct smc_sock *smc)
128 smc->clcsock->file->private_data = smc->sk.sk_socket;
129 smc->clcsock->file = NULL;
132 static int __smc_release(struct smc_sock *smc)
134 struct sock *sk = &smc->sk;
137 if (!smc->use_fallback) {
138 rc = smc_close_active(smc);
139 sock_set_flag(sk, SOCK_DEAD);
140 sk->sk_shutdown |= SHUTDOWN_MASK;
142 if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
143 sock_put(sk); /* passive closing */
144 if (sk->sk_state == SMC_LISTEN) {
145 /* wake up clcsock accept */
146 rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
148 sk->sk_state = SMC_CLOSED;
149 sk->sk_state_change(sk);
150 smc_restore_fallback_changes(smc);
153 sk->sk_prot->unhash(sk);
155 if (sk->sk_state == SMC_CLOSED) {
158 smc_clcsock_release(smc);
161 if (!smc->use_fallback)
162 smc_conn_free(&smc->conn);
168 static int smc_release(struct socket *sock)
170 struct sock *sk = sock->sk;
171 struct smc_sock *smc;
179 /* cleanup for a dangling non-blocking connect */
180 if (smc->connect_nonblock && sk->sk_state == SMC_INIT)
181 tcp_abort(smc->clcsock->sk, ECONNABORTED);
182 flush_work(&smc->connect_work);
184 if (sk->sk_state == SMC_LISTEN)
185 /* smc_close_non_accepted() is called and acquires
186 * sock lock for child sockets again
188 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
192 rc = __smc_release(smc);
199 sock_put(sk); /* final sock_put */
204 static void smc_destruct(struct sock *sk)
206 if (sk->sk_state != SMC_CLOSED)
208 if (!sock_flag(sk, SOCK_DEAD))
211 sk_refcnt_debug_dec(sk);
214 static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
217 struct smc_sock *smc;
221 prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto;
222 sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0);
226 sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
227 sk->sk_state = SMC_INIT;
228 sk->sk_destruct = smc_destruct;
229 sk->sk_protocol = protocol;
231 INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
232 INIT_WORK(&smc->connect_work, smc_connect_work);
233 INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
234 INIT_LIST_HEAD(&smc->accept_q);
235 spin_lock_init(&smc->accept_q_lock);
236 spin_lock_init(&smc->conn.send_lock);
237 sk->sk_prot->hash(sk);
238 sk_refcnt_debug_inc(sk);
239 mutex_init(&smc->clcsock_release_lock);
244 static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
247 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
248 struct sock *sk = sock->sk;
249 struct smc_sock *smc;
254 /* replicate tests from inet_bind(), to be safe wrt. future changes */
256 if (addr_len < sizeof(struct sockaddr_in))
260 if (addr->sin_family != AF_INET &&
261 addr->sin_family != AF_INET6 &&
262 addr->sin_family != AF_UNSPEC)
264 /* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
265 if (addr->sin_family == AF_UNSPEC &&
266 addr->sin_addr.s_addr != htonl(INADDR_ANY))
271 /* Check if socket is already active */
273 if (sk->sk_state != SMC_INIT || smc->connect_nonblock)
276 smc->clcsock->sk->sk_reuse = sk->sk_reuse;
277 rc = kernel_bind(smc->clcsock, uaddr, addr_len);
285 static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
288 /* options we don't get control via setsockopt for */
289 nsk->sk_type = osk->sk_type;
290 nsk->sk_sndbuf = osk->sk_sndbuf;
291 nsk->sk_rcvbuf = osk->sk_rcvbuf;
292 nsk->sk_sndtimeo = osk->sk_sndtimeo;
293 nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
294 nsk->sk_mark = osk->sk_mark;
295 nsk->sk_priority = osk->sk_priority;
296 nsk->sk_rcvlowat = osk->sk_rcvlowat;
297 nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
298 nsk->sk_err = osk->sk_err;
300 nsk->sk_flags &= ~mask;
301 nsk->sk_flags |= osk->sk_flags & mask;
304 #define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
305 (1UL << SOCK_KEEPOPEN) | \
306 (1UL << SOCK_LINGER) | \
307 (1UL << SOCK_BROADCAST) | \
308 (1UL << SOCK_TIMESTAMP) | \
309 (1UL << SOCK_DBG) | \
310 (1UL << SOCK_RCVTSTAMP) | \
311 (1UL << SOCK_RCVTSTAMPNS) | \
312 (1UL << SOCK_LOCALROUTE) | \
313 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
314 (1UL << SOCK_RXQ_OVFL) | \
315 (1UL << SOCK_WIFI_STATUS) | \
316 (1UL << SOCK_NOFCS) | \
317 (1UL << SOCK_FILTER_LOCKED) | \
318 (1UL << SOCK_TSTAMP_NEW))
319 /* copy only relevant settings and flags of SOL_SOCKET level from smc to
320 * clc socket (since smc is not called for these options from net/core)
322 static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
324 smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
327 #define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
328 (1UL << SOCK_KEEPOPEN) | \
329 (1UL << SOCK_LINGER) | \
331 /* copy only settings and flags relevant for smc from clc to smc socket */
332 static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
334 smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
337 /* register a new rmb, send confirm_rkey msg to register with peer */
338 static int smc_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc,
341 if (!rmb_desc->wr_reg) {
342 /* register memory region for new rmb */
343 if (smc_wr_reg_send(link, rmb_desc->mr_rx[SMC_SINGLE_LINK])) {
344 rmb_desc->regerr = 1;
347 rmb_desc->wr_reg = 1;
351 /* exchange confirm_rkey msg with peer */
352 if (smc_llc_do_confirm_rkey(link, rmb_desc)) {
353 rmb_desc->regerr = 1;
359 static int smc_clnt_conf_first_link(struct smc_sock *smc)
361 struct net *net = sock_net(smc->clcsock->sk);
362 struct smc_link_group *lgr = smc->conn.lgr;
363 struct smc_link *link;
367 link = &lgr->lnk[SMC_SINGLE_LINK];
368 /* receive CONFIRM LINK request from server over RoCE fabric */
369 rest = wait_for_completion_interruptible_timeout(
371 SMC_LLC_WAIT_FIRST_TIME);
373 struct smc_clc_msg_decline dclc;
375 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
376 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
377 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
380 if (link->llc_confirm_rc)
381 return SMC_CLC_DECL_RMBE_EC;
383 rc = smc_ib_modify_qp_rts(link);
385 return SMC_CLC_DECL_ERR_RDYLNK;
387 smc_wr_remember_qp_attr(link);
389 if (smc_reg_rmb(link, smc->conn.rmb_desc, false))
390 return SMC_CLC_DECL_ERR_REGRMB;
392 /* send CONFIRM LINK response over RoCE fabric */
393 rc = smc_llc_send_confirm_link(link, SMC_LLC_RESP);
395 return SMC_CLC_DECL_TIMEOUT_CL;
397 /* receive ADD LINK request from server over RoCE fabric */
398 rest = wait_for_completion_interruptible_timeout(&link->llc_add,
401 struct smc_clc_msg_decline dclc;
403 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
404 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
405 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_AL : rc;
408 /* send add link reject message, only one link supported for now */
409 rc = smc_llc_send_add_link(link,
410 link->smcibdev->mac[link->ibport - 1],
411 link->gid, SMC_LLC_RESP);
413 return SMC_CLC_DECL_TIMEOUT_AL;
415 smc_llc_link_active(link, net->ipv4.sysctl_tcp_keepalive_time);
420 static void smcr_conn_save_peer_info(struct smc_sock *smc,
421 struct smc_clc_msg_accept_confirm *clc)
423 int bufsize = smc_uncompress_bufsize(clc->rmbe_size);
425 smc->conn.peer_rmbe_idx = clc->rmbe_idx;
426 smc->conn.local_tx_ctrl.token = ntohl(clc->rmbe_alert_token);
427 smc->conn.peer_rmbe_size = bufsize;
428 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
429 smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
432 static void smcd_conn_save_peer_info(struct smc_sock *smc,
433 struct smc_clc_msg_accept_confirm *clc)
435 int bufsize = smc_uncompress_bufsize(clc->dmbe_size);
437 smc->conn.peer_rmbe_idx = clc->dmbe_idx;
438 smc->conn.peer_token = clc->token;
439 /* msg header takes up space in the buffer */
440 smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
441 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
442 smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx;
445 static void smc_conn_save_peer_info(struct smc_sock *smc,
446 struct smc_clc_msg_accept_confirm *clc)
448 if (smc->conn.lgr->is_smcd)
449 smcd_conn_save_peer_info(smc, clc);
451 smcr_conn_save_peer_info(smc, clc);
454 static void smc_link_save_peer_info(struct smc_link *link,
455 struct smc_clc_msg_accept_confirm *clc)
457 link->peer_qpn = ntoh24(clc->qpn);
458 memcpy(link->peer_gid, clc->lcl.gid, SMC_GID_SIZE);
459 memcpy(link->peer_mac, clc->lcl.mac, sizeof(link->peer_mac));
460 link->peer_psn = ntoh24(clc->psn);
461 link->peer_mtu = clc->qp_mtu;
464 static void smc_switch_to_fallback(struct smc_sock *smc)
466 smc->use_fallback = true;
467 if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
468 smc->clcsock->file = smc->sk.sk_socket->file;
469 smc->clcsock->file->private_data = smc->clcsock;
473 /* fall back during connect */
474 static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
476 smc_switch_to_fallback(smc);
477 smc->fallback_rsn = reason_code;
478 smc_copy_sock_settings_to_clc(smc);
479 smc->connect_nonblock = 0;
480 if (smc->sk.sk_state == SMC_INIT)
481 smc->sk.sk_state = SMC_ACTIVE;
485 /* decline and fall back during connect */
486 static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code)
490 if (reason_code < 0) { /* error, fallback is not possible */
491 if (smc->sk.sk_state == SMC_INIT)
492 sock_put(&smc->sk); /* passive closing */
495 if (reason_code != SMC_CLC_DECL_PEERDECL) {
496 rc = smc_clc_send_decline(smc, reason_code);
498 if (smc->sk.sk_state == SMC_INIT)
499 sock_put(&smc->sk); /* passive closing */
503 return smc_connect_fallback(smc, reason_code);
506 /* abort connecting */
507 static int smc_connect_abort(struct smc_sock *smc, int reason_code,
510 if (local_contact == SMC_FIRST_CONTACT)
511 smc_lgr_forget(smc->conn.lgr);
512 if (smc->conn.lgr->is_smcd)
513 /* there is only one lgr role for SMC-D; use server lock */
514 mutex_unlock(&smc_server_lgr_pending);
516 mutex_unlock(&smc_client_lgr_pending);
518 smc_conn_free(&smc->conn);
519 smc->connect_nonblock = 0;
523 /* check if there is a rdma device available for this connection. */
524 /* called for connect and listen */
525 static int smc_find_rdma_device(struct smc_sock *smc, struct smc_init_info *ini)
527 /* PNET table look up: search active ib_device and port
528 * within same PNETID that also contains the ethernet device
529 * used for the internal TCP socket
531 smc_pnet_find_roce_resource(smc->clcsock->sk, ini);
533 return SMC_CLC_DECL_NOSMCRDEV;
537 /* check if there is an ISM device available for this connection. */
538 /* called for connect and listen */
539 static int smc_find_ism_device(struct smc_sock *smc, struct smc_init_info *ini)
541 /* Find ISM device with same PNETID as connecting interface */
542 smc_pnet_find_ism_resource(smc->clcsock->sk, ini);
544 return SMC_CLC_DECL_NOSMCDDEV;
548 /* Check for VLAN ID and register it on ISM device just for CLC handshake */
549 static int smc_connect_ism_vlan_setup(struct smc_sock *smc,
550 struct smc_init_info *ini)
552 if (ini->vlan_id && smc_ism_get_vlan(ini->ism_dev, ini->vlan_id))
553 return SMC_CLC_DECL_ISMVLANERR;
557 /* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is
558 * used, the VLAN ID will be registered again during the connection setup.
560 static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc, bool is_smcd,
561 struct smc_init_info *ini)
565 if (ini->vlan_id && smc_ism_put_vlan(ini->ism_dev, ini->vlan_id))
566 return SMC_CLC_DECL_CNFERR;
570 /* CLC handshake during connect */
571 static int smc_connect_clc(struct smc_sock *smc, int smc_type,
572 struct smc_clc_msg_accept_confirm *aclc,
573 struct smc_init_info *ini)
577 /* do inband token exchange */
578 rc = smc_clc_send_proposal(smc, smc_type, ini);
581 /* receive SMC Accept CLC message */
582 return smc_clc_wait_msg(smc, aclc, sizeof(*aclc), SMC_CLC_ACCEPT,
586 /* setup for RDMA connection of client */
587 static int smc_connect_rdma(struct smc_sock *smc,
588 struct smc_clc_msg_accept_confirm *aclc,
589 struct smc_init_info *ini)
591 struct smc_link *link;
594 ini->is_smcd = false;
595 ini->ib_lcl = &aclc->lcl;
596 ini->ib_clcqpn = ntoh24(aclc->qpn);
597 ini->srv_first_contact = aclc->hdr.flag;
599 mutex_lock(&smc_client_lgr_pending);
600 reason_code = smc_conn_create(smc, ini);
602 mutex_unlock(&smc_client_lgr_pending);
605 link = &smc->conn.lgr->lnk[SMC_SINGLE_LINK];
607 smc_conn_save_peer_info(smc, aclc);
609 /* create send buffer and rmb */
610 if (smc_buf_create(smc, false))
611 return smc_connect_abort(smc, SMC_CLC_DECL_MEM,
612 ini->cln_first_contact);
614 if (ini->cln_first_contact == SMC_FIRST_CONTACT)
615 smc_link_save_peer_info(link, aclc);
617 if (smc_rmb_rtoken_handling(&smc->conn, aclc))
618 return smc_connect_abort(smc, SMC_CLC_DECL_ERR_RTOK,
619 ini->cln_first_contact);
624 if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
625 if (smc_ib_ready_link(link))
626 return smc_connect_abort(smc, SMC_CLC_DECL_ERR_RDYLNK,
627 ini->cln_first_contact);
629 if (smc_reg_rmb(link, smc->conn.rmb_desc, true))
630 return smc_connect_abort(smc, SMC_CLC_DECL_ERR_REGRMB,
631 ini->cln_first_contact);
633 smc_rmb_sync_sg_for_device(&smc->conn);
635 reason_code = smc_clc_send_confirm(smc);
637 return smc_connect_abort(smc, reason_code,
638 ini->cln_first_contact);
642 if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
643 /* QP confirmation over RoCE fabric */
644 reason_code = smc_clnt_conf_first_link(smc);
646 return smc_connect_abort(smc, reason_code,
647 ini->cln_first_contact);
649 mutex_unlock(&smc_client_lgr_pending);
651 smc_copy_sock_settings_to_clc(smc);
652 smc->connect_nonblock = 0;
653 if (smc->sk.sk_state == SMC_INIT)
654 smc->sk.sk_state = SMC_ACTIVE;
659 /* setup for ISM connection of client */
660 static int smc_connect_ism(struct smc_sock *smc,
661 struct smc_clc_msg_accept_confirm *aclc,
662 struct smc_init_info *ini)
667 ini->ism_gid = aclc->gid;
668 ini->srv_first_contact = aclc->hdr.flag;
670 /* there is only one lgr role for SMC-D; use server lock */
671 mutex_lock(&smc_server_lgr_pending);
672 rc = smc_conn_create(smc, ini);
674 mutex_unlock(&smc_server_lgr_pending);
678 /* Create send and receive buffers */
679 if (smc_buf_create(smc, true))
680 return smc_connect_abort(smc, SMC_CLC_DECL_MEM,
681 ini->cln_first_contact);
683 smc_conn_save_peer_info(smc, aclc);
688 rc = smc_clc_send_confirm(smc);
690 return smc_connect_abort(smc, rc, ini->cln_first_contact);
691 mutex_unlock(&smc_server_lgr_pending);
693 smc_copy_sock_settings_to_clc(smc);
694 smc->connect_nonblock = 0;
695 if (smc->sk.sk_state == SMC_INIT)
696 smc->sk.sk_state = SMC_ACTIVE;
701 /* perform steps before actually connecting */
702 static int __smc_connect(struct smc_sock *smc)
704 bool ism_supported = false, rdma_supported = false;
705 struct smc_clc_msg_accept_confirm aclc;
706 struct smc_init_info ini = {0};
710 if (smc->use_fallback)
711 return smc_connect_fallback(smc, smc->fallback_rsn);
713 /* if peer has not signalled SMC-capability, fall back */
714 if (!tcp_sk(smc->clcsock->sk)->syn_smc)
715 return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC);
717 /* IPSec connections opt out of SMC-R optimizations */
718 if (using_ipsec(smc))
719 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC);
721 /* get vlan id from IP device */
722 if (smc_vlan_by_tcpsk(smc->clcsock, &ini))
723 return smc_connect_decline_fallback(smc,
724 SMC_CLC_DECL_GETVLANERR);
726 /* check if there is an ism device available */
727 if (!smc_find_ism_device(smc, &ini) &&
728 !smc_connect_ism_vlan_setup(smc, &ini)) {
729 /* ISM is supported for this connection */
730 ism_supported = true;
731 smc_type = SMC_TYPE_D;
734 /* check if there is a rdma device available */
735 if (!smc_find_rdma_device(smc, &ini)) {
736 /* RDMA is supported for this connection */
737 rdma_supported = true;
739 smc_type = SMC_TYPE_B; /* both */
741 smc_type = SMC_TYPE_R; /* only RDMA */
744 /* if neither ISM nor RDMA are supported, fallback */
745 if (!rdma_supported && !ism_supported)
746 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_NOSMCDEV);
748 /* perform CLC handshake */
749 rc = smc_connect_clc(smc, smc_type, &aclc, &ini);
751 smc_connect_ism_vlan_cleanup(smc, ism_supported, &ini);
752 return smc_connect_decline_fallback(smc, rc);
755 /* depending on previous steps, connect using rdma or ism */
756 if (rdma_supported && aclc.hdr.path == SMC_TYPE_R)
757 rc = smc_connect_rdma(smc, &aclc, &ini);
758 else if (ism_supported && aclc.hdr.path == SMC_TYPE_D)
759 rc = smc_connect_ism(smc, &aclc, &ini);
761 rc = SMC_CLC_DECL_MODEUNSUPP;
763 smc_connect_ism_vlan_cleanup(smc, ism_supported, &ini);
764 return smc_connect_decline_fallback(smc, rc);
767 smc_connect_ism_vlan_cleanup(smc, ism_supported, &ini);
771 static void smc_connect_work(struct work_struct *work)
773 struct smc_sock *smc = container_of(work, struct smc_sock,
775 long timeo = smc->sk.sk_sndtimeo;
779 timeo = MAX_SCHEDULE_TIMEOUT;
780 lock_sock(smc->clcsock->sk);
781 if (smc->clcsock->sk->sk_err) {
782 smc->sk.sk_err = smc->clcsock->sk->sk_err;
783 } else if ((1 << smc->clcsock->sk->sk_state) &
784 (TCPF_SYN_SENT | TCP_SYN_RECV)) {
785 rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo);
786 if ((rc == -EPIPE) &&
787 ((1 << smc->clcsock->sk->sk_state) &
788 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)))
791 release_sock(smc->clcsock->sk);
793 if (rc != 0 || smc->sk.sk_err) {
794 smc->sk.sk_state = SMC_CLOSED;
795 if (rc == -EPIPE || rc == -EAGAIN)
796 smc->sk.sk_err = EPIPE;
797 else if (signal_pending(current))
798 smc->sk.sk_err = -sock_intr_errno(timeo);
802 rc = __smc_connect(smc);
804 smc->sk.sk_err = -rc;
807 if (!sock_flag(&smc->sk, SOCK_DEAD)) {
808 if (smc->sk.sk_err) {
809 smc->sk.sk_state_change(&smc->sk);
810 } else { /* allow polling before and after fallback decision */
811 smc->clcsock->sk->sk_write_space(smc->clcsock->sk);
812 smc->sk.sk_write_space(&smc->sk);
815 release_sock(&smc->sk);
818 static int smc_connect(struct socket *sock, struct sockaddr *addr,
821 struct sock *sk = sock->sk;
822 struct smc_sock *smc;
827 /* separate smc parameter checking to be safe */
828 if (alen < sizeof(addr->sa_family))
830 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
834 switch (sk->sk_state) {
845 smc_copy_sock_settings_to_clc(smc);
846 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
847 if (smc->connect_nonblock) {
851 rc = kernel_connect(smc->clcsock, addr, alen, flags);
852 if (rc && rc != -EINPROGRESS)
855 sock_hold(&smc->sk); /* sock put in passive closing */
856 if (flags & O_NONBLOCK) {
857 if (schedule_work(&smc->connect_work))
858 smc->connect_nonblock = 1;
861 rc = __smc_connect(smc);
865 rc = 0; /* success cases including fallback */
874 static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
876 struct socket *new_clcsock = NULL;
877 struct sock *lsk = &lsmc->sk;
882 new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
885 lsk->sk_err = ENOMEM;
890 *new_smc = smc_sk(new_sk);
892 mutex_lock(&lsmc->clcsock_release_lock);
894 rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
895 mutex_unlock(&lsmc->clcsock_release_lock);
899 if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
900 new_sk->sk_prot->unhash(new_sk);
902 sock_release(new_clcsock);
903 new_sk->sk_state = SMC_CLOSED;
904 sock_set_flag(new_sk, SOCK_DEAD);
905 sock_put(new_sk); /* final */
910 (*new_smc)->clcsock = new_clcsock;
915 /* add a just created sock to the accept queue of the listen sock as
916 * candidate for a following socket accept call from user space
918 static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
920 struct smc_sock *par = smc_sk(parent);
922 sock_hold(sk); /* sock_put in smc_accept_unlink () */
923 spin_lock(&par->accept_q_lock);
924 list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
925 spin_unlock(&par->accept_q_lock);
926 sk_acceptq_added(parent);
929 /* remove a socket from the accept queue of its parental listening socket */
930 static void smc_accept_unlink(struct sock *sk)
932 struct smc_sock *par = smc_sk(sk)->listen_smc;
934 spin_lock(&par->accept_q_lock);
935 list_del_init(&smc_sk(sk)->accept_q);
936 spin_unlock(&par->accept_q_lock);
937 sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
938 sock_put(sk); /* sock_hold in smc_accept_enqueue */
941 /* remove a sock from the accept queue to bind it to a new socket created
942 * for a socket accept call from user space
944 struct sock *smc_accept_dequeue(struct sock *parent,
945 struct socket *new_sock)
947 struct smc_sock *isk, *n;
950 list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) {
951 new_sk = (struct sock *)isk;
953 smc_accept_unlink(new_sk);
954 if (new_sk->sk_state == SMC_CLOSED) {
955 new_sk->sk_prot->unhash(new_sk);
957 sock_release(isk->clcsock);
960 sock_put(new_sk); /* final */
964 sock_graft(new_sk, new_sock);
965 if (isk->use_fallback) {
966 smc_sk(new_sk)->clcsock->file = new_sock->file;
967 isk->clcsock->file->private_data = isk->clcsock;
975 /* clean up for a created but never accepted sock */
976 void smc_close_non_accepted(struct sock *sk)
978 struct smc_sock *smc = smc_sk(sk);
981 if (!sk->sk_lingertime)
982 /* wait for peer closing */
983 sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
986 sock_put(sk); /* final sock_put */
989 static int smc_serv_conf_first_link(struct smc_sock *smc)
991 struct net *net = sock_net(smc->clcsock->sk);
992 struct smc_link_group *lgr = smc->conn.lgr;
993 struct smc_link *link;
997 link = &lgr->lnk[SMC_SINGLE_LINK];
999 if (smc_reg_rmb(link, smc->conn.rmb_desc, false))
1000 return SMC_CLC_DECL_ERR_REGRMB;
1002 /* send CONFIRM LINK request to client over the RoCE fabric */
1003 rc = smc_llc_send_confirm_link(link, SMC_LLC_REQ);
1005 return SMC_CLC_DECL_TIMEOUT_CL;
1007 /* receive CONFIRM LINK response from client over the RoCE fabric */
1008 rest = wait_for_completion_interruptible_timeout(
1009 &link->llc_confirm_resp,
1010 SMC_LLC_WAIT_FIRST_TIME);
1012 struct smc_clc_msg_decline dclc;
1014 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
1015 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
1016 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
1019 if (link->llc_confirm_resp_rc)
1020 return SMC_CLC_DECL_RMBE_EC;
1022 /* send ADD LINK request to client over the RoCE fabric */
1023 rc = smc_llc_send_add_link(link,
1024 link->smcibdev->mac[link->ibport - 1],
1025 link->gid, SMC_LLC_REQ);
1027 return SMC_CLC_DECL_TIMEOUT_AL;
1029 /* receive ADD LINK response from client over the RoCE fabric */
1030 rest = wait_for_completion_interruptible_timeout(&link->llc_add_resp,
1033 struct smc_clc_msg_decline dclc;
1035 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
1036 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
1037 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_AL : rc;
1040 smc_llc_link_active(link, net->ipv4.sysctl_tcp_keepalive_time);
1045 /* listen worker: finish */
1046 static void smc_listen_out(struct smc_sock *new_smc)
1048 struct smc_sock *lsmc = new_smc->listen_smc;
1049 struct sock *newsmcsk = &new_smc->sk;
1051 if (lsmc->sk.sk_state == SMC_LISTEN) {
1052 lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
1053 smc_accept_enqueue(&lsmc->sk, newsmcsk);
1054 release_sock(&lsmc->sk);
1055 } else { /* no longer listening */
1056 smc_close_non_accepted(newsmcsk);
1059 /* Wake up accept */
1060 lsmc->sk.sk_data_ready(&lsmc->sk);
1061 sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */
1064 /* listen worker: finish in state connected */
1065 static void smc_listen_out_connected(struct smc_sock *new_smc)
1067 struct sock *newsmcsk = &new_smc->sk;
1069 sk_refcnt_debug_inc(newsmcsk);
1070 if (newsmcsk->sk_state == SMC_INIT)
1071 newsmcsk->sk_state = SMC_ACTIVE;
1073 smc_listen_out(new_smc);
1076 /* listen worker: finish in error state */
1077 static void smc_listen_out_err(struct smc_sock *new_smc)
1079 struct sock *newsmcsk = &new_smc->sk;
1081 if (newsmcsk->sk_state == SMC_INIT)
1082 sock_put(&new_smc->sk); /* passive closing */
1083 newsmcsk->sk_state = SMC_CLOSED;
1084 smc_conn_free(&new_smc->conn);
1086 smc_listen_out(new_smc);
1089 /* listen worker: decline and fall back if possible */
1090 static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
1093 /* RDMA setup failed, switch back to TCP */
1094 if (local_contact == SMC_FIRST_CONTACT)
1095 smc_lgr_forget(new_smc->conn.lgr);
1096 if (reason_code < 0) { /* error, no fallback possible */
1097 smc_listen_out_err(new_smc);
1100 smc_conn_free(&new_smc->conn);
1101 smc_switch_to_fallback(new_smc);
1102 new_smc->fallback_rsn = reason_code;
1103 if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
1104 if (smc_clc_send_decline(new_smc, reason_code) < 0) {
1105 smc_listen_out_err(new_smc);
1109 smc_listen_out_connected(new_smc);
1112 /* listen worker: check prefixes */
1113 static int smc_listen_prfx_check(struct smc_sock *new_smc,
1114 struct smc_clc_msg_proposal *pclc)
1116 struct smc_clc_msg_proposal_prefix *pclc_prfx;
1117 struct socket *newclcsock = new_smc->clcsock;
1119 pclc_prfx = smc_clc_proposal_get_prefix(pclc);
1120 if (smc_clc_prfx_match(newclcsock, pclc_prfx))
1121 return SMC_CLC_DECL_DIFFPREFIX;
1126 /* listen worker: initialize connection and buffers */
1127 static int smc_listen_rdma_init(struct smc_sock *new_smc,
1128 struct smc_init_info *ini)
1132 /* allocate connection / link group */
1133 rc = smc_conn_create(new_smc, ini);
1137 /* create send buffer and rmb */
1138 if (smc_buf_create(new_smc, false))
1139 return SMC_CLC_DECL_MEM;
1144 /* listen worker: initialize connection and buffers for SMC-D */
1145 static int smc_listen_ism_init(struct smc_sock *new_smc,
1146 struct smc_clc_msg_proposal *pclc,
1147 struct smc_init_info *ini)
1149 struct smc_clc_msg_smcd *pclc_smcd;
1152 pclc_smcd = smc_get_clc_msg_smcd(pclc);
1153 ini->ism_gid = pclc_smcd->gid;
1154 rc = smc_conn_create(new_smc, ini);
1158 /* Check if peer can be reached via ISM device */
1159 if (smc_ism_cantalk(new_smc->conn.lgr->peer_gid,
1160 new_smc->conn.lgr->vlan_id,
1161 new_smc->conn.lgr->smcd)) {
1162 if (ini->cln_first_contact == SMC_FIRST_CONTACT)
1163 smc_lgr_forget(new_smc->conn.lgr);
1164 smc_conn_free(&new_smc->conn);
1165 return SMC_CLC_DECL_SMCDNOTALK;
1168 /* Create send and receive buffers */
1169 if (smc_buf_create(new_smc, true)) {
1170 if (ini->cln_first_contact == SMC_FIRST_CONTACT)
1171 smc_lgr_forget(new_smc->conn.lgr);
1172 smc_conn_free(&new_smc->conn);
1173 return SMC_CLC_DECL_MEM;
1179 /* listen worker: register buffers */
1180 static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact)
1182 struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
1184 if (local_contact != SMC_FIRST_CONTACT) {
1185 if (smc_reg_rmb(link, new_smc->conn.rmb_desc, true))
1186 return SMC_CLC_DECL_ERR_REGRMB;
1188 smc_rmb_sync_sg_for_device(&new_smc->conn);
1193 /* listen worker: finish RDMA setup */
1194 static int smc_listen_rdma_finish(struct smc_sock *new_smc,
1195 struct smc_clc_msg_accept_confirm *cclc,
1198 struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
1199 int reason_code = 0;
1201 if (local_contact == SMC_FIRST_CONTACT)
1202 smc_link_save_peer_info(link, cclc);
1204 if (smc_rmb_rtoken_handling(&new_smc->conn, cclc)) {
1205 reason_code = SMC_CLC_DECL_ERR_RTOK;
1209 if (local_contact == SMC_FIRST_CONTACT) {
1210 if (smc_ib_ready_link(link)) {
1211 reason_code = SMC_CLC_DECL_ERR_RDYLNK;
1214 /* QP confirmation over RoCE fabric */
1215 reason_code = smc_serv_conf_first_link(new_smc);
1222 smc_listen_decline(new_smc, reason_code, local_contact);
1226 /* setup for RDMA connection of server */
1227 static void smc_listen_work(struct work_struct *work)
1229 struct smc_sock *new_smc = container_of(work, struct smc_sock,
1231 struct socket *newclcsock = new_smc->clcsock;
1232 struct smc_clc_msg_accept_confirm cclc;
1233 struct smc_clc_msg_proposal *pclc;
1234 struct smc_init_info ini = {0};
1235 bool ism_supported = false;
1236 u8 buf[SMC_CLC_MAX_LEN];
1239 if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
1240 return smc_listen_out_err(new_smc);
1242 if (new_smc->use_fallback) {
1243 smc_listen_out_connected(new_smc);
1247 /* check if peer is smc capable */
1248 if (!tcp_sk(newclcsock->sk)->syn_smc) {
1249 smc_switch_to_fallback(new_smc);
1250 new_smc->fallback_rsn = SMC_CLC_DECL_PEERNOSMC;
1251 smc_listen_out_connected(new_smc);
1255 /* do inband token exchange -
1256 * wait for and receive SMC Proposal CLC message
1258 pclc = (struct smc_clc_msg_proposal *)&buf;
1259 rc = smc_clc_wait_msg(new_smc, pclc, SMC_CLC_MAX_LEN,
1260 SMC_CLC_PROPOSAL, CLC_WAIT_TIME);
1264 /* IPSec connections opt out of SMC-R optimizations */
1265 if (using_ipsec(new_smc)) {
1266 rc = SMC_CLC_DECL_IPSEC;
1270 /* check for matching IP prefix and subnet length */
1271 rc = smc_listen_prfx_check(new_smc, pclc);
1275 /* get vlan id from IP device */
1276 if (smc_vlan_by_tcpsk(new_smc->clcsock, &ini)) {
1277 rc = SMC_CLC_DECL_GETVLANERR;
1281 mutex_lock(&smc_server_lgr_pending);
1282 smc_close_init(new_smc);
1283 smc_rx_init(new_smc);
1284 smc_tx_init(new_smc);
1286 /* check if ISM is available */
1287 if (pclc->hdr.path == SMC_TYPE_D || pclc->hdr.path == SMC_TYPE_B) {
1288 ini.is_smcd = true; /* prepare ISM check */
1289 rc = smc_find_ism_device(new_smc, &ini);
1291 rc = smc_listen_ism_init(new_smc, pclc, &ini);
1293 ism_supported = true;
1294 else if (pclc->hdr.path == SMC_TYPE_D)
1295 goto out_unlock; /* skip RDMA and decline */
1298 /* check if RDMA is available */
1299 if (!ism_supported) { /* SMC_TYPE_R or SMC_TYPE_B */
1300 /* prepare RDMA check */
1301 ini.is_smcd = false;
1303 ini.ib_lcl = &pclc->lcl;
1304 rc = smc_find_rdma_device(new_smc, &ini);
1306 /* no RDMA device found */
1307 if (pclc->hdr.path == SMC_TYPE_B)
1308 /* neither ISM nor RDMA device found */
1309 rc = SMC_CLC_DECL_NOSMCDEV;
1312 rc = smc_listen_rdma_init(new_smc, &ini);
1315 rc = smc_listen_rdma_reg(new_smc, ini.cln_first_contact);
1320 /* send SMC Accept CLC message */
1321 rc = smc_clc_send_accept(new_smc, ini.cln_first_contact);
1325 /* SMC-D does not need this lock any more */
1327 mutex_unlock(&smc_server_lgr_pending);
1329 /* receive SMC Confirm CLC message */
1330 rc = smc_clc_wait_msg(new_smc, &cclc, sizeof(cclc),
1331 SMC_CLC_CONFIRM, CLC_WAIT_TIME);
1339 if (!ism_supported) {
1340 rc = smc_listen_rdma_finish(new_smc, &cclc,
1341 ini.cln_first_contact);
1342 mutex_unlock(&smc_server_lgr_pending);
1346 smc_conn_save_peer_info(new_smc, &cclc);
1347 smc_listen_out_connected(new_smc);
1351 mutex_unlock(&smc_server_lgr_pending);
1353 smc_listen_decline(new_smc, rc, ini.cln_first_contact);
1356 static void smc_tcp_listen_work(struct work_struct *work)
1358 struct smc_sock *lsmc = container_of(work, struct smc_sock,
1360 struct sock *lsk = &lsmc->sk;
1361 struct smc_sock *new_smc;
1365 while (lsk->sk_state == SMC_LISTEN) {
1366 rc = smc_clcsock_accept(lsmc, &new_smc);
1372 new_smc->listen_smc = lsmc;
1373 new_smc->use_fallback = lsmc->use_fallback;
1374 new_smc->fallback_rsn = lsmc->fallback_rsn;
1375 sock_hold(lsk); /* sock_put in smc_listen_work */
1376 INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
1377 smc_copy_sock_settings_to_smc(new_smc);
1378 new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf;
1379 new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
1380 sock_hold(&new_smc->sk); /* sock_put in passive closing */
1381 if (!schedule_work(&new_smc->smc_listen_work))
1382 sock_put(&new_smc->sk);
1387 sock_put(&lsmc->sk); /* sock_hold in smc_listen */
1390 static int smc_listen(struct socket *sock, int backlog)
1392 struct sock *sk = sock->sk;
1393 struct smc_sock *smc;
1400 if ((sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) ||
1401 smc->connect_nonblock)
1405 if (sk->sk_state == SMC_LISTEN) {
1406 sk->sk_max_ack_backlog = backlog;
1409 /* some socket options are handled in core, so we could not apply
1410 * them to the clc socket -- copy smc socket options to clc socket
1412 smc_copy_sock_settings_to_clc(smc);
1413 if (!smc->use_fallback)
1414 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
1416 rc = kernel_listen(smc->clcsock, backlog);
1419 sk->sk_max_ack_backlog = backlog;
1420 sk->sk_ack_backlog = 0;
1421 sk->sk_state = SMC_LISTEN;
1422 sock_hold(sk); /* sock_hold in tcp_listen_worker */
1423 if (!schedule_work(&smc->tcp_listen_work))
1431 static int smc_accept(struct socket *sock, struct socket *new_sock,
1432 int flags, bool kern)
1434 struct sock *sk = sock->sk, *nsk;
1435 DECLARE_WAITQUEUE(wait, current);
1436 struct smc_sock *lsmc;
1441 sock_hold(sk); /* sock_put below */
1444 if (lsmc->sk.sk_state != SMC_LISTEN) {
1450 /* Wait for an incoming connection */
1451 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1452 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1453 while (!(nsk = smc_accept_dequeue(sk, new_sock))) {
1454 set_current_state(TASK_INTERRUPTIBLE);
1460 timeo = schedule_timeout(timeo);
1461 /* wakeup by sk_data_ready in smc_listen_work() */
1462 sched_annotate_sleep();
1464 if (signal_pending(current)) {
1465 rc = sock_intr_errno(timeo);
1469 set_current_state(TASK_RUNNING);
1470 remove_wait_queue(sk_sleep(sk), &wait);
1473 rc = sock_error(nsk);
1478 if (lsmc->sockopt_defer_accept && !(flags & O_NONBLOCK)) {
1479 /* wait till data arrives on the socket */
1480 timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept *
1482 if (smc_sk(nsk)->use_fallback) {
1483 struct sock *clcsk = smc_sk(nsk)->clcsock->sk;
1486 if (skb_queue_empty(&clcsk->sk_receive_queue))
1487 sk_wait_data(clcsk, &timeo, NULL);
1488 release_sock(clcsk);
1489 } else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
1491 smc_rx_wait(smc_sk(nsk), &timeo, smc_rx_data_available);
1497 sock_put(sk); /* sock_hold above */
1501 static int smc_getname(struct socket *sock, struct sockaddr *addr,
1504 struct smc_sock *smc;
1506 if (peer && (sock->sk->sk_state != SMC_ACTIVE) &&
1507 (sock->sk->sk_state != SMC_APPCLOSEWAIT1))
1510 smc = smc_sk(sock->sk);
1512 return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
1515 static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1517 struct sock *sk = sock->sk;
1518 struct smc_sock *smc;
1523 if ((sk->sk_state != SMC_ACTIVE) &&
1524 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
1525 (sk->sk_state != SMC_INIT))
1528 if (msg->msg_flags & MSG_FASTOPEN) {
1529 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
1530 smc_switch_to_fallback(smc);
1531 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
1538 if (smc->use_fallback)
1539 rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
1541 rc = smc_tx_sendmsg(smc, msg, len);
1547 static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1550 struct sock *sk = sock->sk;
1551 struct smc_sock *smc;
1556 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
1557 /* socket was connected before, no more data to read */
1561 if ((sk->sk_state == SMC_INIT) ||
1562 (sk->sk_state == SMC_LISTEN) ||
1563 (sk->sk_state == SMC_CLOSED))
1566 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
1571 if (smc->use_fallback) {
1572 rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);
1574 msg->msg_namelen = 0;
1575 rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
1583 static __poll_t smc_accept_poll(struct sock *parent)
1585 struct smc_sock *isk = smc_sk(parent);
1588 spin_lock(&isk->accept_q_lock);
1589 if (!list_empty(&isk->accept_q))
1590 mask = EPOLLIN | EPOLLRDNORM;
1591 spin_unlock(&isk->accept_q_lock);
1596 static __poll_t smc_poll(struct file *file, struct socket *sock,
1599 struct sock *sk = sock->sk;
1600 struct smc_sock *smc;
1606 smc = smc_sk(sock->sk);
1607 if (smc->use_fallback) {
1608 /* delegate to CLC child sock */
1609 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
1610 sk->sk_err = smc->clcsock->sk->sk_err;
1612 if (sk->sk_state != SMC_CLOSED)
1613 sock_poll_wait(file, sock, wait);
1616 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
1617 (sk->sk_state == SMC_CLOSED))
1619 if (sk->sk_state == SMC_LISTEN) {
1620 /* woken up by sk_data_ready in smc_listen_work() */
1621 mask |= smc_accept_poll(sk);
1622 } else if (smc->use_fallback) { /* as result of connect_work()*/
1623 mask |= smc->clcsock->ops->poll(file, smc->clcsock,
1625 sk->sk_err = smc->clcsock->sk->sk_err;
1627 if ((sk->sk_state != SMC_INIT &&
1628 atomic_read(&smc->conn.sndbuf_space)) ||
1629 sk->sk_shutdown & SEND_SHUTDOWN) {
1630 mask |= EPOLLOUT | EPOLLWRNORM;
1632 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1633 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1635 if (atomic_read(&smc->conn.bytes_to_rcv))
1636 mask |= EPOLLIN | EPOLLRDNORM;
1637 if (sk->sk_shutdown & RCV_SHUTDOWN)
1638 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
1639 if (sk->sk_state == SMC_APPCLOSEWAIT1)
1641 if (smc->conn.urg_state == SMC_URG_VALID)
1649 static int smc_shutdown(struct socket *sock, int how)
1651 struct sock *sk = sock->sk;
1652 struct smc_sock *smc;
1658 if ((how < SHUT_RD) || (how > SHUT_RDWR))
1664 if ((sk->sk_state != SMC_ACTIVE) &&
1665 (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
1666 (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
1667 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
1668 (sk->sk_state != SMC_APPCLOSEWAIT2) &&
1669 (sk->sk_state != SMC_APPFINCLOSEWAIT))
1671 if (smc->use_fallback) {
1672 rc = kernel_sock_shutdown(smc->clcsock, how);
1673 sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
1674 if (sk->sk_shutdown == SHUTDOWN_MASK)
1675 sk->sk_state = SMC_CLOSED;
1679 case SHUT_RDWR: /* shutdown in both directions */
1680 rc = smc_close_active(smc);
1683 rc = smc_close_shutdown_write(smc);
1687 /* nothing more to do because peer is not involved */
1691 rc1 = kernel_sock_shutdown(smc->clcsock, how);
1692 /* map sock_shutdown_cmd constants to sk_shutdown value range */
1693 sk->sk_shutdown |= how + 1;
1697 return rc ? rc : rc1;
1700 static int smc_setsockopt(struct socket *sock, int level, int optname,
1701 char __user *optval, unsigned int optlen)
1703 struct sock *sk = sock->sk;
1704 struct smc_sock *smc;
1709 /* generic setsockopts reaching us here always apply to the
1712 rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
1714 if (smc->clcsock->sk->sk_err) {
1715 sk->sk_err = smc->clcsock->sk->sk_err;
1716 sk->sk_error_report(sk);
1721 if (optlen < sizeof(int))
1723 if (get_user(val, (int __user *)optval))
1730 case TCP_FASTOPEN_CONNECT:
1731 case TCP_FASTOPEN_KEY:
1732 case TCP_FASTOPEN_NO_COOKIE:
1733 /* option not supported by SMC */
1734 if (sk->sk_state == SMC_INIT) {
1735 smc_switch_to_fallback(smc);
1736 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
1738 if (!smc->use_fallback)
1743 if (sk->sk_state != SMC_INIT &&
1744 sk->sk_state != SMC_LISTEN &&
1745 sk->sk_state != SMC_CLOSED) {
1746 if (val && !smc->use_fallback)
1747 mod_delayed_work(system_wq, &smc->conn.tx_work,
1752 if (sk->sk_state != SMC_INIT &&
1753 sk->sk_state != SMC_LISTEN &&
1754 sk->sk_state != SMC_CLOSED) {
1755 if (!val && !smc->use_fallback)
1756 mod_delayed_work(system_wq, &smc->conn.tx_work,
1760 case TCP_DEFER_ACCEPT:
1761 smc->sockopt_defer_accept = val;
1771 static int smc_getsockopt(struct socket *sock, int level, int optname,
1772 char __user *optval, int __user *optlen)
1774 struct smc_sock *smc;
1776 smc = smc_sk(sock->sk);
1777 /* socket options apply to the CLC socket */
1778 return smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
1782 static int smc_ioctl(struct socket *sock, unsigned int cmd,
1785 union smc_host_cursor cons, urg;
1786 struct smc_connection *conn;
1787 struct smc_sock *smc;
1790 smc = smc_sk(sock->sk);
1792 lock_sock(&smc->sk);
1793 if (smc->use_fallback) {
1794 if (!smc->clcsock) {
1795 release_sock(&smc->sk);
1798 answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
1799 release_sock(&smc->sk);
1803 case SIOCINQ: /* same as FIONREAD */
1804 if (smc->sk.sk_state == SMC_LISTEN) {
1805 release_sock(&smc->sk);
1808 if (smc->sk.sk_state == SMC_INIT ||
1809 smc->sk.sk_state == SMC_CLOSED)
1812 answ = atomic_read(&smc->conn.bytes_to_rcv);
1815 /* output queue size (not send + not acked) */
1816 if (smc->sk.sk_state == SMC_LISTEN) {
1817 release_sock(&smc->sk);
1820 if (smc->sk.sk_state == SMC_INIT ||
1821 smc->sk.sk_state == SMC_CLOSED)
1824 answ = smc->conn.sndbuf_desc->len -
1825 atomic_read(&smc->conn.sndbuf_space);
1828 /* output queue size (not send only) */
1829 if (smc->sk.sk_state == SMC_LISTEN) {
1830 release_sock(&smc->sk);
1833 if (smc->sk.sk_state == SMC_INIT ||
1834 smc->sk.sk_state == SMC_CLOSED)
1837 answ = smc_tx_prepared_sends(&smc->conn);
1840 if (smc->sk.sk_state == SMC_LISTEN) {
1841 release_sock(&smc->sk);
1844 if (smc->sk.sk_state == SMC_INIT ||
1845 smc->sk.sk_state == SMC_CLOSED) {
1848 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
1849 smc_curs_copy(&urg, &conn->urg_curs, conn);
1850 answ = smc_curs_diff(conn->rmb_desc->len,
1855 release_sock(&smc->sk);
1856 return -ENOIOCTLCMD;
1858 release_sock(&smc->sk);
1860 return put_user(answ, (int __user *)arg);
1863 static ssize_t smc_sendpage(struct socket *sock, struct page *page,
1864 int offset, size_t size, int flags)
1866 struct sock *sk = sock->sk;
1867 struct smc_sock *smc;
1872 if (sk->sk_state != SMC_ACTIVE) {
1877 if (smc->use_fallback)
1878 rc = kernel_sendpage(smc->clcsock, page, offset,
1881 rc = sock_no_sendpage(sock, page, offset, size, flags);
1887 /* Map the affected portions of the rmbe into an spd, note the number of bytes
1888 * to splice in conn->splice_pending, and press 'go'. Delays consumer cursor
1889 * updates till whenever a respective page has been fully processed.
1890 * Note that subsequent recv() calls have to wait till all splice() processing
1893 static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
1894 struct pipe_inode_info *pipe, size_t len,
1897 struct sock *sk = sock->sk;
1898 struct smc_sock *smc;
1903 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
1904 /* socket was connected before, no more data to read */
1908 if (sk->sk_state == SMC_INIT ||
1909 sk->sk_state == SMC_LISTEN ||
1910 sk->sk_state == SMC_CLOSED)
1913 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
1918 if (smc->use_fallback) {
1919 rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos,
1926 if (flags & SPLICE_F_NONBLOCK)
1927 flags = MSG_DONTWAIT;
1930 rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
1938 /* must look like tcp */
1939 static const struct proto_ops smc_sock_ops = {
1941 .owner = THIS_MODULE,
1942 .release = smc_release,
1944 .connect = smc_connect,
1945 .socketpair = sock_no_socketpair,
1946 .accept = smc_accept,
1947 .getname = smc_getname,
1950 .listen = smc_listen,
1951 .shutdown = smc_shutdown,
1952 .setsockopt = smc_setsockopt,
1953 .getsockopt = smc_getsockopt,
1954 .sendmsg = smc_sendmsg,
1955 .recvmsg = smc_recvmsg,
1956 .mmap = sock_no_mmap,
1957 .sendpage = smc_sendpage,
1958 .splice_read = smc_splice_read,
1961 static int smc_create(struct net *net, struct socket *sock, int protocol,
1964 int family = (protocol == SMCPROTO_SMC6) ? PF_INET6 : PF_INET;
1965 struct smc_sock *smc;
1969 rc = -ESOCKTNOSUPPORT;
1970 if (sock->type != SOCK_STREAM)
1973 rc = -EPROTONOSUPPORT;
1974 if (protocol != SMCPROTO_SMC && protocol != SMCPROTO_SMC6)
1978 sock->ops = &smc_sock_ops;
1979 sk = smc_sock_alloc(net, sock, protocol);
1983 /* create internal TCP socket for CLC handshake and fallback */
1985 smc->use_fallback = false; /* assume rdma capability first */
1986 smc->fallback_rsn = 0;
1987 rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
1990 sk_common_release(sk);
1993 smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
1994 smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
2000 static const struct net_proto_family smc_sock_family_ops = {
2002 .owner = THIS_MODULE,
2003 .create = smc_create,
2006 unsigned int smc_net_id;
2008 static __net_init int smc_net_init(struct net *net)
2010 return smc_pnet_net_init(net);
2013 static void __net_exit smc_net_exit(struct net *net)
2015 smc_pnet_net_exit(net);
2018 static struct pernet_operations smc_net_ops = {
2019 .init = smc_net_init,
2020 .exit = smc_net_exit,
2022 .size = sizeof(struct smc_net),
2025 static int __init smc_init(void)
2029 rc = register_pernet_subsys(&smc_net_ops);
2033 rc = smc_pnet_init();
2035 goto out_pernet_subsys;
2037 rc = smc_llc_init();
2039 pr_err("%s: smc_llc_init fails with %d\n", __func__, rc);
2043 rc = smc_cdc_init();
2045 pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc);
2049 rc = proto_register(&smc_proto, 1);
2051 pr_err("%s: proto_register(v4) fails with %d\n", __func__, rc);
2055 rc = proto_register(&smc_proto6, 1);
2057 pr_err("%s: proto_register(v6) fails with %d\n", __func__, rc);
2061 rc = sock_register(&smc_sock_family_ops);
2063 pr_err("%s: sock_register fails with %d\n", __func__, rc);
2066 INIT_HLIST_HEAD(&smc_v4_hashinfo.ht);
2067 INIT_HLIST_HEAD(&smc_v6_hashinfo.ht);
2069 rc = smc_ib_register_client();
2071 pr_err("%s: ib_register fails with %d\n", __func__, rc);
2075 static_branch_enable(&tcp_have_smc);
2079 sock_unregister(PF_SMC);
2081 proto_unregister(&smc_proto6);
2083 proto_unregister(&smc_proto);
2087 unregister_pernet_subsys(&smc_net_ops);
2092 static void __exit smc_exit(void)
2095 static_branch_disable(&tcp_have_smc);
2096 smc_ib_unregister_client();
2097 sock_unregister(PF_SMC);
2098 proto_unregister(&smc_proto6);
2099 proto_unregister(&smc_proto);
2101 unregister_pernet_subsys(&smc_net_ops);
2104 module_init(smc_init);
2105 module_exit(smc_exit);
2107 MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>");
2108 MODULE_DESCRIPTION("smc socket address family");
2109 MODULE_LICENSE("GPL");
2110 MODULE_ALIAS_NETPROTO(PF_SMC);