1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * Basic Transport Functions exploiting Infiniband API
7 * Copyright IBM Corp. 2016
9 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
12 #include <linux/socket.h>
13 #include <linux/if_vlan.h>
14 #include <linux/random.h>
15 #include <linux/workqueue.h>
18 #include <rdma/ib_verbs.h>
19 #include <rdma/ib_cache.h>
28 #include "smc_close.h"
31 #define SMC_LGR_NUM_INCR 256
32 #define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
33 #define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10 * HZ)
34 #define SMC_LGR_FREE_DELAY_FAST (8 * HZ)
36 static struct smc_lgr_list smc_lgr_list = { /* established link groups */
37 .lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
38 .list = LIST_HEAD_INIT(smc_lgr_list.list),
42 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
43 struct smc_buf_desc *buf_desc);
45 /* return head of link group list and its lock for a given link group */
46 static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
47 spinlock_t **lgr_lock)
50 *lgr_lock = &lgr->smcd->lgr_lock;
51 return &lgr->smcd->lgr_list;
54 *lgr_lock = &smc_lgr_list.lock;
55 return &smc_lgr_list.list;
58 static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
60 /* client link group creation always follows the server link group
61 * creation. For client use a somewhat higher removal delay time,
62 * otherwise there is a risk of out-of-sync link groups.
64 if (!lgr->freeing && !lgr->freefast) {
65 mod_delayed_work(system_wq, &lgr->free_work,
66 (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
67 SMC_LGR_FREE_DELAY_CLNT :
68 SMC_LGR_FREE_DELAY_SERV);
72 void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr)
74 if (!lgr->freeing && !lgr->freefast) {
76 mod_delayed_work(system_wq, &lgr->free_work,
77 SMC_LGR_FREE_DELAY_FAST);
81 /* Register connection's alert token in our lookup structure.
82 * To use rbtrees we have to implement our own insert core.
83 * Requires @conns_lock
84 * @smc connection to register
85 * Returns 0 on success, != otherwise.
87 static void smc_lgr_add_alert_token(struct smc_connection *conn)
89 struct rb_node **link, *parent = NULL;
90 u32 token = conn->alert_token_local;
92 link = &conn->lgr->conns_all.rb_node;
94 struct smc_connection *cur = rb_entry(*link,
95 struct smc_connection, alert_node);
98 if (cur->alert_token_local > token)
99 link = &parent->rb_left;
101 link = &parent->rb_right;
103 /* Put the new node there */
104 rb_link_node(&conn->alert_node, parent, link);
105 rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
108 /* Register connection in link group by assigning an alert token
109 * registered in a search tree.
110 * Requires @conns_lock
111 * Note that '0' is a reserved value and not assigned.
113 static void smc_lgr_register_conn(struct smc_connection *conn)
115 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
116 static atomic_t nexttoken = ATOMIC_INIT(0);
118 /* find a new alert_token_local value not yet used by some connection
121 sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */
122 while (!conn->alert_token_local) {
123 conn->alert_token_local = atomic_inc_return(&nexttoken);
124 if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr))
125 conn->alert_token_local = 0;
127 smc_lgr_add_alert_token(conn);
128 conn->lgr->conns_num++;
131 /* Unregister connection and reset the alert token of the given connection<
133 static void __smc_lgr_unregister_conn(struct smc_connection *conn)
135 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
136 struct smc_link_group *lgr = conn->lgr;
138 rb_erase(&conn->alert_node, &lgr->conns_all);
140 conn->alert_token_local = 0;
141 sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
144 /* Unregister connection from lgr
146 static void smc_lgr_unregister_conn(struct smc_connection *conn)
148 struct smc_link_group *lgr = conn->lgr;
152 write_lock_bh(&lgr->conns_lock);
153 if (conn->alert_token_local) {
154 __smc_lgr_unregister_conn(conn);
156 write_unlock_bh(&lgr->conns_lock);
160 /* Send delete link, either as client to request the initiation
161 * of the DELETE LINK sequence from server; or as server to
162 * initiate the delete processing. See smc_llc_rx_delete_link().
164 static int smc_link_send_delete(struct smc_link *lnk)
166 if (lnk->state == SMC_LNK_ACTIVE &&
167 !smc_llc_send_delete_link(lnk, SMC_LLC_REQ, true)) {
168 smc_llc_link_deleting(lnk);
174 static void smc_lgr_free(struct smc_link_group *lgr);
176 static void smc_lgr_free_work(struct work_struct *work)
178 struct smc_link_group *lgr = container_of(to_delayed_work(work),
179 struct smc_link_group,
181 spinlock_t *lgr_lock;
182 struct smc_link *lnk;
185 smc_lgr_list_head(lgr, &lgr_lock);
186 spin_lock_bh(lgr_lock);
188 spin_unlock_bh(lgr_lock);
191 read_lock_bh(&lgr->conns_lock);
192 conns = RB_EMPTY_ROOT(&lgr->conns_all);
193 read_unlock_bh(&lgr->conns_lock);
194 if (!conns) { /* number of lgr connections is no longer zero */
195 spin_unlock_bh(lgr_lock);
198 list_del_init(&lgr->list); /* remove from smc_lgr_list */
200 lnk = &lgr->lnk[SMC_SINGLE_LINK];
201 if (!lgr->is_smcd && !lgr->terminating) {
202 /* try to send del link msg, on error free lgr immediately */
203 if (lnk->state == SMC_LNK_ACTIVE &&
204 !smc_link_send_delete(lnk)) {
205 /* reschedule in case we never receive a response */
206 smc_lgr_schedule_free_work(lgr);
207 spin_unlock_bh(lgr_lock);
211 lgr->freeing = 1; /* this instance does the freeing, no new schedule */
212 spin_unlock_bh(lgr_lock);
213 cancel_delayed_work(&lgr->free_work);
215 if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
216 smc_llc_link_inactive(lnk);
217 if (lgr->is_smcd && !lgr->terminating)
218 smc_ism_signal_shutdown(lgr);
222 static void smc_lgr_terminate_work(struct work_struct *work)
224 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
227 smc_lgr_terminate(lgr, true);
230 /* create a new SMC link group */
231 static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
233 struct smc_link_group *lgr;
234 struct list_head *lgr_list;
235 struct smc_link *lnk;
236 spinlock_t *lgr_lock;
241 if (ini->is_smcd && ini->vlan_id) {
242 if (smc_ism_get_vlan(ini->ism_dev, ini->vlan_id)) {
243 rc = SMC_CLC_DECL_ISMVLANERR;
248 lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
250 rc = SMC_CLC_DECL_MEM;
253 lgr->is_smcd = ini->is_smcd;
255 lgr->terminating = 0;
258 lgr->vlan_id = ini->vlan_id;
259 rwlock_init(&lgr->sndbufs_lock);
260 rwlock_init(&lgr->rmbs_lock);
261 rwlock_init(&lgr->conns_lock);
262 for (i = 0; i < SMC_RMBE_SIZES; i++) {
263 INIT_LIST_HEAD(&lgr->sndbufs[i]);
264 INIT_LIST_HEAD(&lgr->rmbs[i]);
266 smc_lgr_list.num += SMC_LGR_NUM_INCR;
267 memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
268 INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
269 INIT_WORK(&lgr->terminate_work, smc_lgr_terminate_work);
270 lgr->conns_all = RB_ROOT;
272 /* SMC-D specific settings */
273 get_device(&ini->ism_dev->dev);
274 lgr->peer_gid = ini->ism_gid;
275 lgr->smcd = ini->ism_dev;
276 lgr_list = &ini->ism_dev->lgr_list;
277 lgr_lock = &lgr->smcd->lgr_lock;
278 lgr->peer_shutdown = 0;
279 atomic_inc(&ini->ism_dev->lgr_cnt);
281 /* SMC-R specific settings */
282 get_device(&ini->ib_dev->ibdev->dev);
283 lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
284 memcpy(lgr->peer_systemid, ini->ib_lcl->id_for_peer,
287 lnk = &lgr->lnk[SMC_SINGLE_LINK];
288 /* initialize link */
289 lnk->state = SMC_LNK_ACTIVATING;
290 lnk->link_id = SMC_SINGLE_LINK;
291 lnk->smcibdev = ini->ib_dev;
292 lnk->ibport = ini->ib_port;
293 lgr_list = &smc_lgr_list.list;
294 lgr_lock = &smc_lgr_list.lock;
296 ini->ib_dev->pattr[ini->ib_port - 1].active_mtu;
297 if (!ini->ib_dev->initialized)
298 smc_ib_setup_per_ibdev(ini->ib_dev);
299 get_random_bytes(rndvec, sizeof(rndvec));
300 lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
302 rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
303 ini->vlan_id, lnk->gid,
307 rc = smc_llc_link_init(lnk);
310 rc = smc_wr_alloc_link_mem(lnk);
313 rc = smc_ib_create_protection_domain(lnk);
316 rc = smc_ib_create_queue_pair(lnk);
319 rc = smc_wr_create_link(lnk);
324 spin_lock_bh(lgr_lock);
325 list_add(&lgr->list, lgr_list);
326 spin_unlock_bh(lgr_lock);
330 smc_ib_destroy_queue_pair(lnk);
332 smc_ib_dealloc_protection_domain(lnk);
334 smc_wr_free_link_mem(lnk);
336 smc_llc_link_clear(lnk);
340 if (ini->is_smcd && ini->vlan_id)
341 smc_ism_put_vlan(ini->ism_dev, ini->vlan_id);
345 rc = SMC_CLC_DECL_MEM;
347 rc = SMC_CLC_DECL_INTERR;
352 static void smc_buf_unuse(struct smc_connection *conn,
353 struct smc_link_group *lgr)
355 if (conn->sndbuf_desc)
356 conn->sndbuf_desc->used = 0;
357 if (conn->rmb_desc) {
358 if (!conn->rmb_desc->regerr) {
359 if (!lgr->is_smcd && !list_empty(&lgr->list)) {
360 /* unregister rmb with peer */
361 smc_llc_do_delete_rkey(
362 &lgr->lnk[SMC_SINGLE_LINK],
365 conn->rmb_desc->used = 0;
367 /* buf registration failed, reuse not possible */
368 write_lock_bh(&lgr->rmbs_lock);
369 list_del(&conn->rmb_desc->list);
370 write_unlock_bh(&lgr->rmbs_lock);
372 smc_buf_free(lgr, true, conn->rmb_desc);
377 /* remove a finished connection from its link group */
378 void smc_conn_free(struct smc_connection *conn)
380 struct smc_link_group *lgr = conn->lgr;
385 if (!list_empty(&lgr->list))
386 smc_ism_unset_conn(conn);
387 tasklet_kill(&conn->rx_tsklet);
389 smc_cdc_tx_dismiss_slots(conn);
391 if (!list_empty(&lgr->list)) {
392 smc_lgr_unregister_conn(conn);
393 smc_buf_unuse(conn, lgr); /* allow buffer reuse */
397 smc_lgr_schedule_free_work(lgr);
400 static void smc_link_clear(struct smc_link *lnk)
403 smc_llc_link_clear(lnk);
404 smc_ib_modify_qp_reset(lnk);
405 smc_wr_free_link(lnk);
406 smc_ib_destroy_queue_pair(lnk);
407 smc_ib_dealloc_protection_domain(lnk);
408 smc_wr_free_link_mem(lnk);
411 static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
412 struct smc_buf_desc *buf_desc)
414 struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
417 if (buf_desc->mr_rx[SMC_SINGLE_LINK])
418 smc_ib_put_memory_region(
419 buf_desc->mr_rx[SMC_SINGLE_LINK]);
420 smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc,
423 smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc,
426 sg_free_table(&buf_desc->sgt[SMC_SINGLE_LINK]);
428 __free_pages(buf_desc->pages, buf_desc->order);
432 static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb,
433 struct smc_buf_desc *buf_desc)
436 /* restore original buf len */
437 buf_desc->len += sizeof(struct smcd_cdc_msg);
438 smc_ism_unregister_dmb(lgr->smcd, buf_desc);
440 kfree(buf_desc->cpu_addr);
445 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
446 struct smc_buf_desc *buf_desc)
449 smcd_buf_free(lgr, is_rmb, buf_desc);
451 smcr_buf_free(lgr, is_rmb, buf_desc);
454 static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
456 struct smc_buf_desc *buf_desc, *bf_desc;
457 struct list_head *buf_list;
460 for (i = 0; i < SMC_RMBE_SIZES; i++) {
462 buf_list = &lgr->rmbs[i];
464 buf_list = &lgr->sndbufs[i];
465 list_for_each_entry_safe(buf_desc, bf_desc, buf_list,
467 list_del(&buf_desc->list);
468 smc_buf_free(lgr, is_rmb, buf_desc);
473 static void smc_lgr_free_bufs(struct smc_link_group *lgr)
475 /* free send buffers */
476 __smc_lgr_free_bufs(lgr, false);
478 __smc_lgr_free_bufs(lgr, true);
481 /* remove a link group */
482 static void smc_lgr_free(struct smc_link_group *lgr)
484 smc_lgr_free_bufs(lgr);
486 if (!lgr->terminating) {
487 smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
488 put_device(&lgr->smcd->dev);
490 if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
491 wake_up(&lgr->smcd->lgrs_deleted);
493 smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
494 put_device(&lgr->lnk[SMC_SINGLE_LINK].smcibdev->ibdev->dev);
499 void smc_lgr_forget(struct smc_link_group *lgr)
501 struct list_head *lgr_list;
502 spinlock_t *lgr_lock;
504 lgr_list = smc_lgr_list_head(lgr, &lgr_lock);
505 spin_lock_bh(lgr_lock);
506 /* do not use this link group for new connections */
507 if (!list_empty(lgr_list))
508 list_del_init(lgr_list);
509 spin_unlock_bh(lgr_lock);
512 static void smcd_unregister_all_dmbs(struct smc_link_group *lgr)
516 for (i = 0; i < SMC_RMBE_SIZES; i++) {
517 struct smc_buf_desc *buf_desc;
519 list_for_each_entry(buf_desc, &lgr->rmbs[i], list) {
520 buf_desc->len += sizeof(struct smcd_cdc_msg);
521 smc_ism_unregister_dmb(lgr->smcd, buf_desc);
526 static void smc_sk_wake_ups(struct smc_sock *smc)
528 smc->sk.sk_write_space(&smc->sk);
529 smc->sk.sk_data_ready(&smc->sk);
530 smc->sk.sk_state_change(&smc->sk);
533 /* kill a connection */
534 static void smc_conn_kill(struct smc_connection *conn, bool soft)
536 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
538 if (conn->lgr->is_smcd && conn->lgr->peer_shutdown)
539 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
541 smc_close_abort(conn);
543 smc->sk.sk_err = ECONNABORTED;
544 smc_sk_wake_ups(smc);
545 if (conn->lgr->is_smcd) {
546 smc_ism_unset_conn(conn);
548 tasklet_kill(&conn->rx_tsklet);
550 tasklet_unlock_wait(&conn->rx_tsklet);
552 smc_lgr_unregister_conn(conn);
553 smc_close_active_abort(smc);
556 static void smc_lgr_cleanup(struct smc_link_group *lgr)
559 smc_ism_signal_shutdown(lgr);
560 smcd_unregister_all_dmbs(lgr);
561 smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
562 put_device(&lgr->smcd->dev);
564 struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
566 wake_up(&lnk->wr_reg_wait);
570 /* terminate link group */
571 static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
573 struct smc_connection *conn;
574 struct smc_sock *smc;
575 struct rb_node *node;
577 if (lgr->terminating)
578 return; /* lgr already terminating */
580 cancel_delayed_work_sync(&lgr->free_work);
581 lgr->terminating = 1;
583 smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
585 /* kill remaining link group connections */
586 read_lock_bh(&lgr->conns_lock);
587 node = rb_first(&lgr->conns_all);
589 read_unlock_bh(&lgr->conns_lock);
590 conn = rb_entry(node, struct smc_connection, alert_node);
591 smc = container_of(conn, struct smc_sock, conn);
592 sock_hold(&smc->sk); /* sock_put below */
594 smc_conn_kill(conn, soft);
595 release_sock(&smc->sk);
596 sock_put(&smc->sk); /* sock_hold above */
597 read_lock_bh(&lgr->conns_lock);
598 node = rb_first(&lgr->conns_all);
600 read_unlock_bh(&lgr->conns_lock);
601 smc_lgr_cleanup(lgr);
603 smc_lgr_schedule_free_work_fast(lgr);
608 /* unlink and terminate link group
609 * @soft: true if link group shutdown can take its time
610 * false if immediate link group shutdown is required
612 void smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
614 spinlock_t *lgr_lock;
616 smc_lgr_list_head(lgr, &lgr_lock);
617 spin_lock_bh(lgr_lock);
618 if (lgr->terminating) {
619 spin_unlock_bh(lgr_lock);
620 return; /* lgr already terminating */
624 list_del_init(&lgr->list);
625 spin_unlock_bh(lgr_lock);
626 __smc_lgr_terminate(lgr, soft);
629 /* Called when IB port is terminated */
630 void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
632 struct smc_link_group *lgr, *l;
633 LIST_HEAD(lgr_free_list);
635 spin_lock_bh(&smc_lgr_list.lock);
636 list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
638 lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
639 lgr->lnk[SMC_SINGLE_LINK].ibport == ibport)
640 list_move(&lgr->list, &lgr_free_list);
642 spin_unlock_bh(&smc_lgr_list.lock);
644 list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
645 list_del_init(&lgr->list);
646 __smc_lgr_terminate(lgr, true);
650 /* Called when peer lgr shutdown (regularly or abnormally) is received */
651 void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
653 struct smc_link_group *lgr, *l;
654 LIST_HEAD(lgr_free_list);
656 /* run common cleanup function and build free list */
657 spin_lock_bh(&dev->lgr_lock);
658 list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
659 if ((!peer_gid || lgr->peer_gid == peer_gid) &&
660 (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
661 if (peer_gid) /* peer triggered termination */
662 lgr->peer_shutdown = 1;
663 list_move(&lgr->list, &lgr_free_list);
666 spin_unlock_bh(&dev->lgr_lock);
668 /* cancel the regular free workers and actually free lgrs */
669 list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
670 list_del_init(&lgr->list);
671 schedule_work(&lgr->terminate_work);
675 /* Called when an SMCD device is removed or the smc module is unloaded */
676 void smc_smcd_terminate_all(struct smcd_dev *smcd)
678 struct smc_link_group *lgr, *lg;
679 LIST_HEAD(lgr_free_list);
681 spin_lock_bh(&smcd->lgr_lock);
682 list_splice_init(&smcd->lgr_list, &lgr_free_list);
683 list_for_each_entry(lgr, &lgr_free_list, list)
685 spin_unlock_bh(&smcd->lgr_lock);
687 list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
688 list_del_init(&lgr->list);
689 __smc_lgr_terminate(lgr, false);
692 if (atomic_read(&smcd->lgr_cnt))
693 wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt));
696 /* Determine vlan of internal TCP socket.
697 * @vlan_id: address to store the determined vlan id into
699 int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
701 struct dst_entry *dst = sk_dst_get(clcsock->sk);
702 struct net_device *ndev;
703 int i, nest_lvl, rc = 0;
716 if (is_vlan_dev(ndev)) {
717 ini->vlan_id = vlan_dev_vlan_id(ndev);
722 nest_lvl = ndev->lower_level;
723 for (i = 0; i < nest_lvl; i++) {
724 struct list_head *lower = &ndev->adj_list.lower;
726 if (list_empty(lower))
729 ndev = (struct net_device *)netdev_lower_get_next(ndev, &lower);
730 if (is_vlan_dev(ndev)) {
731 ini->vlan_id = vlan_dev_vlan_id(ndev);
743 static bool smcr_lgr_match(struct smc_link_group *lgr,
744 struct smc_clc_msg_local *lcl,
745 enum smc_lgr_role role, u32 clcqpn)
747 return !memcmp(lgr->peer_systemid, lcl->id_for_peer,
749 !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_gid, &lcl->gid,
751 !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac,
754 (lgr->role == SMC_SERV ||
755 lgr->lnk[SMC_SINGLE_LINK].peer_qpn == clcqpn);
758 static bool smcd_lgr_match(struct smc_link_group *lgr,
759 struct smcd_dev *smcismdev, u64 peer_gid)
761 return lgr->peer_gid == peer_gid && lgr->smcd == smcismdev;
764 /* create a new SMC connection (and a new link group if necessary) */
765 int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
767 struct smc_connection *conn = &smc->conn;
768 struct list_head *lgr_list;
769 struct smc_link_group *lgr;
770 enum smc_lgr_role role;
771 spinlock_t *lgr_lock;
774 lgr_list = ini->is_smcd ? &ini->ism_dev->lgr_list : &smc_lgr_list.list;
775 lgr_lock = ini->is_smcd ? &ini->ism_dev->lgr_lock : &smc_lgr_list.lock;
776 ini->cln_first_contact = SMC_FIRST_CONTACT;
777 role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
778 if (role == SMC_CLNT && ini->srv_first_contact)
779 /* create new link group as well */
782 /* determine if an existing link group can be reused */
783 spin_lock_bh(lgr_lock);
784 list_for_each_entry(lgr, lgr_list, list) {
785 write_lock_bh(&lgr->conns_lock);
787 smcd_lgr_match(lgr, ini->ism_dev, ini->ism_gid) :
788 smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) &&
790 lgr->vlan_id == ini->vlan_id &&
792 lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) {
793 /* link group found */
794 ini->cln_first_contact = SMC_REUSE_CONTACT;
796 smc_lgr_register_conn(conn); /* add smc conn to lgr */
797 if (delayed_work_pending(&lgr->free_work))
798 cancel_delayed_work(&lgr->free_work);
799 write_unlock_bh(&lgr->conns_lock);
802 write_unlock_bh(&lgr->conns_lock);
804 spin_unlock_bh(lgr_lock);
806 if (role == SMC_CLNT && !ini->srv_first_contact &&
807 ini->cln_first_contact == SMC_FIRST_CONTACT) {
808 /* Server reuses a link group, but Client wants to start
810 * send out_of_sync decline, reason synchr. error
812 return SMC_CLC_DECL_SYNCERR;
816 if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
817 rc = smc_lgr_create(smc, ini);
821 write_lock_bh(&lgr->conns_lock);
822 smc_lgr_register_conn(conn); /* add smc conn to lgr */
823 write_unlock_bh(&lgr->conns_lock);
825 conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
826 conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
827 conn->urg_state = SMC_URG_READ;
829 conn->rx_off = sizeof(struct smcd_cdc_msg);
830 smcd_cdc_rx_init(conn); /* init tasklet for this conn */
832 #ifndef KERNEL_HAS_ATOMIC64
833 spin_lock_init(&conn->acurs_lock);
840 /* convert the RMB size into the compressed notation - minimum 16K.
841 * In contrast to plain ilog2, this rounds towards the next power of 2,
842 * so the socket application gets at least its desired sndbuf / rcvbuf size.
844 static u8 smc_compress_bufsize(int size)
848 if (size <= SMC_BUF_MIN_SIZE)
851 size = (size - 1) >> 14;
852 compressed = ilog2(size) + 1;
853 if (compressed >= SMC_RMBE_SIZES)
854 compressed = SMC_RMBE_SIZES - 1;
858 /* convert the RMB size from compressed notation into integer */
859 int smc_uncompress_bufsize(u8 compressed)
863 size = 0x00000001 << (((int)compressed) + 14);
867 /* try to reuse a sndbuf or rmb description slot for a certain
868 * buffer size; if not available, return NULL
870 static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
872 struct list_head *buf_list)
874 struct smc_buf_desc *buf_slot;
877 list_for_each_entry(buf_slot, buf_list, list) {
878 if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
879 read_unlock_bh(lock);
883 read_unlock_bh(lock);
887 /* one of the conditions for announcing a receiver's current window size is
888 * that it "results in a minimum increase in the window size of 10% of the
889 * receive buffer space" [RFC7609]
891 static inline int smc_rmb_wnd_update_limit(int rmbe_size)
893 return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
896 static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
897 bool is_rmb, int bufsize)
899 struct smc_buf_desc *buf_desc;
900 struct smc_link *lnk;
903 /* try to alloc a new buffer */
904 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
906 return ERR_PTR(-ENOMEM);
908 buf_desc->order = get_order(bufsize);
909 buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN |
910 __GFP_NOMEMALLOC | __GFP_COMP |
911 __GFP_NORETRY | __GFP_ZERO,
913 if (!buf_desc->pages) {
915 return ERR_PTR(-EAGAIN);
917 buf_desc->cpu_addr = (void *)page_address(buf_desc->pages);
919 /* build the sg table from the pages */
920 lnk = &lgr->lnk[SMC_SINGLE_LINK];
921 rc = sg_alloc_table(&buf_desc->sgt[SMC_SINGLE_LINK], 1,
924 smc_buf_free(lgr, is_rmb, buf_desc);
927 sg_set_buf(buf_desc->sgt[SMC_SINGLE_LINK].sgl,
928 buf_desc->cpu_addr, bufsize);
930 /* map sg table to DMA address */
931 rc = smc_ib_buf_map_sg(lnk->smcibdev, buf_desc,
932 is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
933 /* SMC protocol depends on mapping to one DMA address only */
935 smc_buf_free(lgr, is_rmb, buf_desc);
936 return ERR_PTR(-EAGAIN);
939 /* create a new memory region for the RMB */
941 rc = smc_ib_get_memory_region(lnk->roce_pd,
942 IB_ACCESS_REMOTE_WRITE |
943 IB_ACCESS_LOCAL_WRITE,
946 smc_buf_free(lgr, is_rmb, buf_desc);
951 buf_desc->len = bufsize;
955 #define SMCD_DMBE_SIZES 7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
957 static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
958 bool is_dmb, int bufsize)
960 struct smc_buf_desc *buf_desc;
963 if (smc_compress_bufsize(bufsize) > SMCD_DMBE_SIZES)
964 return ERR_PTR(-EAGAIN);
966 /* try to alloc a new DMB */
967 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
969 return ERR_PTR(-ENOMEM);
971 rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
974 return ERR_PTR(-EAGAIN);
976 buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
977 /* CDC header stored in buf. So, pretend it was smaller */
978 buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg);
980 buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL |
981 __GFP_NOWARN | __GFP_NORETRY |
983 if (!buf_desc->cpu_addr) {
985 return ERR_PTR(-EAGAIN);
987 buf_desc->len = bufsize;
992 static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
994 struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
995 struct smc_connection *conn = &smc->conn;
996 struct smc_link_group *lgr = conn->lgr;
997 struct list_head *buf_list;
998 int bufsize, bufsize_short;
1003 /* use socket recv buffer size (w/o overhead) as start value */
1004 sk_buf_size = smc->sk.sk_rcvbuf / 2;
1006 /* use socket send buffer size (w/o overhead) as start value */
1007 sk_buf_size = smc->sk.sk_sndbuf / 2;
1009 for (bufsize_short = smc_compress_bufsize(sk_buf_size);
1010 bufsize_short >= 0; bufsize_short--) {
1013 lock = &lgr->rmbs_lock;
1014 buf_list = &lgr->rmbs[bufsize_short];
1016 lock = &lgr->sndbufs_lock;
1017 buf_list = &lgr->sndbufs[bufsize_short];
1019 bufsize = smc_uncompress_bufsize(bufsize_short);
1020 if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC)
1023 /* check for reusable slot in the link group */
1024 buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list);
1026 memset(buf_desc->cpu_addr, 0, bufsize);
1027 break; /* found reusable slot */
1031 buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize);
1033 buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize);
1035 if (PTR_ERR(buf_desc) == -ENOMEM)
1037 if (IS_ERR(buf_desc))
1041 write_lock_bh(lock);
1042 list_add(&buf_desc->list, buf_list);
1043 write_unlock_bh(lock);
1047 if (IS_ERR(buf_desc))
1051 conn->rmb_desc = buf_desc;
1052 conn->rmbe_size_short = bufsize_short;
1053 smc->sk.sk_rcvbuf = bufsize * 2;
1054 atomic_set(&conn->bytes_to_rcv, 0);
1055 conn->rmbe_update_limit =
1056 smc_rmb_wnd_update_limit(buf_desc->len);
1058 smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */
1060 conn->sndbuf_desc = buf_desc;
1061 smc->sk.sk_sndbuf = bufsize * 2;
1062 atomic_set(&conn->sndbuf_space, bufsize);
1067 void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
1069 struct smc_link_group *lgr = conn->lgr;
1071 if (!conn->lgr || conn->lgr->is_smcd)
1073 smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
1074 conn->sndbuf_desc, DMA_TO_DEVICE);
1077 void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
1079 struct smc_link_group *lgr = conn->lgr;
1081 if (!conn->lgr || conn->lgr->is_smcd)
1083 smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
1084 conn->sndbuf_desc, DMA_TO_DEVICE);
1087 void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
1089 struct smc_link_group *lgr = conn->lgr;
1091 if (!conn->lgr || conn->lgr->is_smcd)
1093 smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
1094 conn->rmb_desc, DMA_FROM_DEVICE);
1097 void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
1099 struct smc_link_group *lgr = conn->lgr;
1101 if (!conn->lgr || conn->lgr->is_smcd)
1103 smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
1104 conn->rmb_desc, DMA_FROM_DEVICE);
1107 /* create the send and receive buffer for an SMC socket;
1108 * receive buffers are called RMBs;
1109 * (even though the SMC protocol allows more than one RMB-element per RMB,
1110 * the Linux implementation uses just one RMB-element per RMB, i.e. uses an
1111 * extra RMB for every connection in a link group
1113 int smc_buf_create(struct smc_sock *smc, bool is_smcd)
1117 /* create send buffer */
1118 rc = __smc_buf_create(smc, is_smcd, false);
1122 rc = __smc_buf_create(smc, is_smcd, true);
1124 smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
1128 static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
1132 for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) {
1133 if (!test_and_set_bit(i, lgr->rtokens_used_mask))
1139 /* add a new rtoken from peer */
1140 int smc_rtoken_add(struct smc_link_group *lgr, __be64 nw_vaddr, __be32 nw_rkey)
1142 u64 dma_addr = be64_to_cpu(nw_vaddr);
1143 u32 rkey = ntohl(nw_rkey);
1146 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1147 if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) &&
1148 (lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr == dma_addr) &&
1149 test_bit(i, lgr->rtokens_used_mask)) {
1150 /* already in list */
1154 i = smc_rmb_reserve_rtoken_idx(lgr);
1157 lgr->rtokens[i][SMC_SINGLE_LINK].rkey = rkey;
1158 lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = dma_addr;
1162 /* delete an rtoken */
1163 int smc_rtoken_delete(struct smc_link_group *lgr, __be32 nw_rkey)
1165 u32 rkey = ntohl(nw_rkey);
1168 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1169 if (lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey &&
1170 test_bit(i, lgr->rtokens_used_mask)) {
1171 lgr->rtokens[i][SMC_SINGLE_LINK].rkey = 0;
1172 lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = 0;
1174 clear_bit(i, lgr->rtokens_used_mask);
1181 /* save rkey and dma_addr received from peer during clc handshake */
1182 int smc_rmb_rtoken_handling(struct smc_connection *conn,
1183 struct smc_clc_msg_accept_confirm *clc)
1185 conn->rtoken_idx = smc_rtoken_add(conn->lgr, clc->rmb_dma_addr,
1187 if (conn->rtoken_idx < 0)
1188 return conn->rtoken_idx;
1192 static void smc_core_going_away(void)
1194 struct smc_ib_device *smcibdev;
1195 struct smcd_dev *smcd;
1197 spin_lock(&smc_ib_devices.lock);
1198 list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
1201 for (i = 0; i < SMC_MAX_PORTS; i++)
1202 set_bit(i, smcibdev->ports_going_away);
1204 spin_unlock(&smc_ib_devices.lock);
1206 spin_lock(&smcd_dev_list.lock);
1207 list_for_each_entry(smcd, &smcd_dev_list.list, list) {
1208 smcd->going_away = 1;
1210 spin_unlock(&smcd_dev_list.lock);
1213 /* Clean up all SMC link groups */
1214 static void smc_lgrs_shutdown(void)
1216 struct smc_link_group *lgr, *lg;
1217 LIST_HEAD(lgr_freeing_list);
1218 struct smcd_dev *smcd;
1220 smc_core_going_away();
1222 spin_lock_bh(&smc_lgr_list.lock);
1223 list_splice_init(&smc_lgr_list.list, &lgr_freeing_list);
1224 spin_unlock_bh(&smc_lgr_list.lock);
1226 spin_lock(&smcd_dev_list.lock);
1227 list_for_each_entry(smcd, &smcd_dev_list.list, list)
1228 smc_smcd_terminate_all(smcd);
1229 spin_unlock(&smcd_dev_list.lock);
1231 list_for_each_entry_safe(lgr, lg, &lgr_freeing_list, list) {
1232 list_del_init(&lgr->list);
1233 if (!lgr->is_smcd) {
1234 struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
1236 if (lnk->state == SMC_LNK_ACTIVE)
1237 smc_llc_send_delete_link(lnk, SMC_LLC_REQ,
1239 smc_llc_link_inactive(lnk);
1241 cancel_delayed_work_sync(&lgr->free_work);
1242 smc_lgr_free(lgr); /* free link group */
1246 /* Called (from smc_exit) when module is removed */
1247 void smc_core_exit(void)
1249 smc_lgrs_shutdown();