1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * Basic Transport Functions exploiting Infiniband API
7 * Copyright IBM Corp. 2016
9 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
12 #include <linux/socket.h>
13 #include <linux/if_vlan.h>
14 #include <linux/random.h>
15 #include <linux/workqueue.h>
18 #include <rdma/ib_verbs.h>
19 #include <rdma/ib_cache.h>
28 #include "smc_close.h"
31 #define SMC_LGR_NUM_INCR 256
32 #define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
33 #define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10 * HZ)
34 #define SMC_LGR_FREE_DELAY_FAST (8 * HZ)
36 static struct smc_lgr_list smc_lgr_list = { /* established link groups */
37 .lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
38 .list = LIST_HEAD_INIT(smc_lgr_list.list),
42 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
43 struct smc_buf_desc *buf_desc);
45 /* return head of link group list and its lock for a given link group */
46 static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
47 spinlock_t **lgr_lock)
50 *lgr_lock = &lgr->smcd->lgr_lock;
51 return &lgr->smcd->lgr_list;
54 *lgr_lock = &smc_lgr_list.lock;
55 return &smc_lgr_list.list;
58 static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
60 /* client link group creation always follows the server link group
61 * creation. For client use a somewhat higher removal delay time,
62 * otherwise there is a risk of out-of-sync link groups.
64 if (!lgr->freeing && !lgr->freefast) {
65 mod_delayed_work(system_wq, &lgr->free_work,
66 (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
67 SMC_LGR_FREE_DELAY_CLNT :
68 SMC_LGR_FREE_DELAY_SERV);
72 void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr)
74 if (!lgr->freeing && !lgr->freefast) {
76 mod_delayed_work(system_wq, &lgr->free_work,
77 SMC_LGR_FREE_DELAY_FAST);
81 /* Register connection's alert token in our lookup structure.
82 * To use rbtrees we have to implement our own insert core.
83 * Requires @conns_lock
84 * @smc connection to register
85 * Returns 0 on success, != otherwise.
87 static void smc_lgr_add_alert_token(struct smc_connection *conn)
89 struct rb_node **link, *parent = NULL;
90 u32 token = conn->alert_token_local;
92 link = &conn->lgr->conns_all.rb_node;
94 struct smc_connection *cur = rb_entry(*link,
95 struct smc_connection, alert_node);
98 if (cur->alert_token_local > token)
99 link = &parent->rb_left;
101 link = &parent->rb_right;
103 /* Put the new node there */
104 rb_link_node(&conn->alert_node, parent, link);
105 rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
108 /* Register connection in link group by assigning an alert token
109 * registered in a search tree.
110 * Requires @conns_lock
111 * Note that '0' is a reserved value and not assigned.
113 static void smc_lgr_register_conn(struct smc_connection *conn)
115 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
116 static atomic_t nexttoken = ATOMIC_INIT(0);
118 /* find a new alert_token_local value not yet used by some connection
121 sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */
122 while (!conn->alert_token_local) {
123 conn->alert_token_local = atomic_inc_return(&nexttoken);
124 if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr))
125 conn->alert_token_local = 0;
127 smc_lgr_add_alert_token(conn);
128 conn->lgr->conns_num++;
131 /* Unregister connection and reset the alert token of the given connection<
133 static void __smc_lgr_unregister_conn(struct smc_connection *conn)
135 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
136 struct smc_link_group *lgr = conn->lgr;
138 rb_erase(&conn->alert_node, &lgr->conns_all);
140 conn->alert_token_local = 0;
141 sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
144 /* Unregister connection from lgr
146 static void smc_lgr_unregister_conn(struct smc_connection *conn)
148 struct smc_link_group *lgr = conn->lgr;
152 write_lock_bh(&lgr->conns_lock);
153 if (conn->alert_token_local) {
154 __smc_lgr_unregister_conn(conn);
156 write_unlock_bh(&lgr->conns_lock);
160 /* Send delete link, either as client to request the initiation
161 * of the DELETE LINK sequence from server; or as server to
162 * initiate the delete processing. See smc_llc_rx_delete_link().
164 static int smc_link_send_delete(struct smc_link *lnk)
166 if (lnk->state == SMC_LNK_ACTIVE &&
167 !smc_llc_send_delete_link(lnk, SMC_LLC_REQ, true)) {
168 smc_llc_link_deleting(lnk);
174 static void smc_lgr_free(struct smc_link_group *lgr);
176 static void smc_lgr_free_work(struct work_struct *work)
178 struct smc_link_group *lgr = container_of(to_delayed_work(work),
179 struct smc_link_group,
181 spinlock_t *lgr_lock;
182 struct smc_link *lnk;
185 smc_lgr_list_head(lgr, &lgr_lock);
186 spin_lock_bh(lgr_lock);
188 spin_unlock_bh(lgr_lock);
191 read_lock_bh(&lgr->conns_lock);
192 conns = RB_EMPTY_ROOT(&lgr->conns_all);
193 read_unlock_bh(&lgr->conns_lock);
194 if (!conns) { /* number of lgr connections is no longer zero */
195 spin_unlock_bh(lgr_lock);
198 list_del_init(&lgr->list); /* remove from smc_lgr_list */
200 lnk = &lgr->lnk[SMC_SINGLE_LINK];
201 if (!lgr->is_smcd && !lgr->terminating) {
202 /* try to send del link msg, on error free lgr immediately */
203 if (lnk->state == SMC_LNK_ACTIVE &&
204 !smc_link_send_delete(lnk)) {
205 /* reschedule in case we never receive a response */
206 smc_lgr_schedule_free_work(lgr);
207 spin_unlock_bh(lgr_lock);
211 lgr->freeing = 1; /* this instance does the freeing, no new schedule */
212 spin_unlock_bh(lgr_lock);
213 cancel_delayed_work(&lgr->free_work);
215 if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
216 smc_llc_link_inactive(lnk);
218 smc_ism_signal_shutdown(lgr);
222 static void smc_lgr_terminate_work(struct work_struct *work)
224 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
227 smc_lgr_terminate(lgr);
230 /* create a new SMC link group */
231 static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
233 struct smc_link_group *lgr;
234 struct list_head *lgr_list;
235 struct smc_link *lnk;
236 spinlock_t *lgr_lock;
241 if (ini->is_smcd && ini->vlan_id) {
242 if (smc_ism_get_vlan(ini->ism_dev, ini->vlan_id)) {
243 rc = SMC_CLC_DECL_ISMVLANERR;
248 lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
250 rc = SMC_CLC_DECL_MEM;
253 lgr->is_smcd = ini->is_smcd;
255 lgr->terminating = 0;
258 lgr->vlan_id = ini->vlan_id;
259 rwlock_init(&lgr->sndbufs_lock);
260 rwlock_init(&lgr->rmbs_lock);
261 rwlock_init(&lgr->conns_lock);
262 for (i = 0; i < SMC_RMBE_SIZES; i++) {
263 INIT_LIST_HEAD(&lgr->sndbufs[i]);
264 INIT_LIST_HEAD(&lgr->rmbs[i]);
266 smc_lgr_list.num += SMC_LGR_NUM_INCR;
267 memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
268 INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
269 INIT_WORK(&lgr->terminate_work, smc_lgr_terminate_work);
270 lgr->conns_all = RB_ROOT;
272 /* SMC-D specific settings */
273 get_device(&ini->ism_dev->dev);
274 lgr->peer_gid = ini->ism_gid;
275 lgr->smcd = ini->ism_dev;
276 lgr_list = &ini->ism_dev->lgr_list;
277 lgr_lock = &lgr->smcd->lgr_lock;
278 lgr->peer_shutdown = 0;
280 /* SMC-R specific settings */
281 get_device(&ini->ib_dev->ibdev->dev);
282 lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
283 memcpy(lgr->peer_systemid, ini->ib_lcl->id_for_peer,
286 lnk = &lgr->lnk[SMC_SINGLE_LINK];
287 /* initialize link */
288 lnk->state = SMC_LNK_ACTIVATING;
289 lnk->link_id = SMC_SINGLE_LINK;
290 lnk->smcibdev = ini->ib_dev;
291 lnk->ibport = ini->ib_port;
292 lgr_list = &smc_lgr_list.list;
293 lgr_lock = &smc_lgr_list.lock;
295 ini->ib_dev->pattr[ini->ib_port - 1].active_mtu;
296 if (!ini->ib_dev->initialized)
297 smc_ib_setup_per_ibdev(ini->ib_dev);
298 get_random_bytes(rndvec, sizeof(rndvec));
299 lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
301 rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
302 ini->vlan_id, lnk->gid,
306 rc = smc_llc_link_init(lnk);
309 rc = smc_wr_alloc_link_mem(lnk);
312 rc = smc_ib_create_protection_domain(lnk);
315 rc = smc_ib_create_queue_pair(lnk);
318 rc = smc_wr_create_link(lnk);
323 spin_lock_bh(lgr_lock);
324 list_add(&lgr->list, lgr_list);
325 spin_unlock_bh(lgr_lock);
329 smc_ib_destroy_queue_pair(lnk);
331 smc_ib_dealloc_protection_domain(lnk);
333 smc_wr_free_link_mem(lnk);
335 smc_llc_link_clear(lnk);
339 if (ini->is_smcd && ini->vlan_id)
340 smc_ism_put_vlan(ini->ism_dev, ini->vlan_id);
344 rc = SMC_CLC_DECL_MEM;
346 rc = SMC_CLC_DECL_INTERR;
351 static void smc_buf_unuse(struct smc_connection *conn,
352 struct smc_link_group *lgr)
354 if (conn->sndbuf_desc)
355 conn->sndbuf_desc->used = 0;
356 if (conn->rmb_desc) {
357 if (!conn->rmb_desc->regerr) {
358 if (!lgr->is_smcd && !list_empty(&lgr->list)) {
359 /* unregister rmb with peer */
360 smc_llc_do_delete_rkey(
361 &lgr->lnk[SMC_SINGLE_LINK],
364 conn->rmb_desc->used = 0;
366 /* buf registration failed, reuse not possible */
367 write_lock_bh(&lgr->rmbs_lock);
368 list_del(&conn->rmb_desc->list);
369 write_unlock_bh(&lgr->rmbs_lock);
371 smc_buf_free(lgr, true, conn->rmb_desc);
376 /* remove a finished connection from its link group */
377 void smc_conn_free(struct smc_connection *conn)
379 struct smc_link_group *lgr = conn->lgr;
384 smc_ism_unset_conn(conn);
385 tasklet_kill(&conn->rx_tsklet);
387 smc_cdc_tx_dismiss_slots(conn);
389 if (!list_empty(&lgr->list)) {
390 smc_lgr_unregister_conn(conn);
391 smc_buf_unuse(conn, lgr); /* allow buffer reuse */
395 smc_lgr_schedule_free_work(lgr);
398 static void smc_link_clear(struct smc_link *lnk)
401 smc_llc_link_clear(lnk);
402 smc_ib_modify_qp_reset(lnk);
403 smc_wr_free_link(lnk);
404 smc_ib_destroy_queue_pair(lnk);
405 smc_ib_dealloc_protection_domain(lnk);
406 smc_wr_free_link_mem(lnk);
409 static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
410 struct smc_buf_desc *buf_desc)
412 struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
415 if (buf_desc->mr_rx[SMC_SINGLE_LINK])
416 smc_ib_put_memory_region(
417 buf_desc->mr_rx[SMC_SINGLE_LINK]);
418 smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc,
421 smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc,
424 sg_free_table(&buf_desc->sgt[SMC_SINGLE_LINK]);
426 __free_pages(buf_desc->pages, buf_desc->order);
430 static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb,
431 struct smc_buf_desc *buf_desc)
434 /* restore original buf len */
435 buf_desc->len += sizeof(struct smcd_cdc_msg);
436 smc_ism_unregister_dmb(lgr->smcd, buf_desc);
438 kfree(buf_desc->cpu_addr);
443 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
444 struct smc_buf_desc *buf_desc)
447 smcd_buf_free(lgr, is_rmb, buf_desc);
449 smcr_buf_free(lgr, is_rmb, buf_desc);
452 static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
454 struct smc_buf_desc *buf_desc, *bf_desc;
455 struct list_head *buf_list;
458 for (i = 0; i < SMC_RMBE_SIZES; i++) {
460 buf_list = &lgr->rmbs[i];
462 buf_list = &lgr->sndbufs[i];
463 list_for_each_entry_safe(buf_desc, bf_desc, buf_list,
465 list_del(&buf_desc->list);
466 smc_buf_free(lgr, is_rmb, buf_desc);
471 static void smc_lgr_free_bufs(struct smc_link_group *lgr)
473 /* free send buffers */
474 __smc_lgr_free_bufs(lgr, false);
476 __smc_lgr_free_bufs(lgr, true);
479 /* remove a link group */
480 static void smc_lgr_free(struct smc_link_group *lgr)
482 smc_lgr_free_bufs(lgr);
484 smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
485 put_device(&lgr->smcd->dev);
487 smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
488 put_device(&lgr->lnk[SMC_SINGLE_LINK].smcibdev->ibdev->dev);
493 void smc_lgr_forget(struct smc_link_group *lgr)
495 struct list_head *lgr_list;
496 spinlock_t *lgr_lock;
498 lgr_list = smc_lgr_list_head(lgr, &lgr_lock);
499 spin_lock_bh(lgr_lock);
500 /* do not use this link group for new connections */
501 if (!list_empty(lgr_list))
502 list_del_init(lgr_list);
503 spin_unlock_bh(lgr_lock);
506 static void smc_sk_wake_ups(struct smc_sock *smc)
508 smc->sk.sk_write_space(&smc->sk);
509 smc->sk.sk_data_ready(&smc->sk);
510 smc->sk.sk_state_change(&smc->sk);
513 /* kill a connection */
514 static void smc_conn_kill(struct smc_connection *conn)
516 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
518 if (conn->lgr->is_smcd && conn->lgr->peer_shutdown)
519 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
521 smc_close_abort(conn);
523 smc->sk.sk_err = ECONNABORTED;
524 smc_sk_wake_ups(smc);
525 if (conn->lgr->is_smcd)
526 tasklet_kill(&conn->rx_tsklet);
527 smc_lgr_unregister_conn(conn);
528 smc_close_active_abort(smc);
531 /* terminate link group */
532 static void __smc_lgr_terminate(struct smc_link_group *lgr)
534 struct smc_connection *conn;
535 struct smc_sock *smc;
536 struct rb_node *node;
538 if (lgr->terminating)
539 return; /* lgr already terminating */
540 lgr->terminating = 1;
542 smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
544 /* kill remaining link group connections */
545 read_lock_bh(&lgr->conns_lock);
546 node = rb_first(&lgr->conns_all);
548 read_unlock_bh(&lgr->conns_lock);
549 conn = rb_entry(node, struct smc_connection, alert_node);
550 smc = container_of(conn, struct smc_sock, conn);
551 sock_hold(&smc->sk); /* sock_put below */
554 release_sock(&smc->sk);
555 sock_put(&smc->sk); /* sock_hold above */
556 read_lock_bh(&lgr->conns_lock);
557 node = rb_first(&lgr->conns_all);
559 read_unlock_bh(&lgr->conns_lock);
561 wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait);
562 smc_lgr_schedule_free_work_fast(lgr);
565 /* unlink and terminate link group */
566 void smc_lgr_terminate(struct smc_link_group *lgr)
568 spinlock_t *lgr_lock;
570 smc_lgr_list_head(lgr, &lgr_lock);
571 spin_lock_bh(lgr_lock);
572 if (lgr->terminating) {
573 spin_unlock_bh(lgr_lock);
574 return; /* lgr already terminating */
576 list_del_init(&lgr->list);
577 spin_unlock_bh(lgr_lock);
578 __smc_lgr_terminate(lgr);
581 /* Called when IB port is terminated */
582 void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
584 struct smc_link_group *lgr, *l;
585 LIST_HEAD(lgr_free_list);
587 spin_lock_bh(&smc_lgr_list.lock);
588 list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
590 lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
591 lgr->lnk[SMC_SINGLE_LINK].ibport == ibport)
592 list_move(&lgr->list, &lgr_free_list);
594 spin_unlock_bh(&smc_lgr_list.lock);
596 list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
597 list_del_init(&lgr->list);
598 __smc_lgr_terminate(lgr);
602 /* Called when SMC-D device is terminated or peer is lost */
603 void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
605 struct smc_link_group *lgr, *l;
606 LIST_HEAD(lgr_free_list);
608 /* run common cleanup function and build free list */
609 spin_lock_bh(&dev->lgr_lock);
610 list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
611 if ((!peer_gid || lgr->peer_gid == peer_gid) &&
612 (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
613 if (peer_gid) /* peer triggered termination */
614 lgr->peer_shutdown = 1;
615 list_move(&lgr->list, &lgr_free_list);
618 spin_unlock_bh(&dev->lgr_lock);
620 /* cancel the regular free workers and actually free lgrs */
621 list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
622 list_del_init(&lgr->list);
623 schedule_work(&lgr->terminate_work);
627 /* Determine vlan of internal TCP socket.
628 * @vlan_id: address to store the determined vlan id into
630 int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
632 struct dst_entry *dst = sk_dst_get(clcsock->sk);
633 struct net_device *ndev;
634 int i, nest_lvl, rc = 0;
647 if (is_vlan_dev(ndev)) {
648 ini->vlan_id = vlan_dev_vlan_id(ndev);
653 nest_lvl = ndev->lower_level;
654 for (i = 0; i < nest_lvl; i++) {
655 struct list_head *lower = &ndev->adj_list.lower;
657 if (list_empty(lower))
660 ndev = (struct net_device *)netdev_lower_get_next(ndev, &lower);
661 if (is_vlan_dev(ndev)) {
662 ini->vlan_id = vlan_dev_vlan_id(ndev);
674 static bool smcr_lgr_match(struct smc_link_group *lgr,
675 struct smc_clc_msg_local *lcl,
676 enum smc_lgr_role role, u32 clcqpn)
678 return !memcmp(lgr->peer_systemid, lcl->id_for_peer,
680 !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_gid, &lcl->gid,
682 !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac,
685 (lgr->role == SMC_SERV ||
686 lgr->lnk[SMC_SINGLE_LINK].peer_qpn == clcqpn);
689 static bool smcd_lgr_match(struct smc_link_group *lgr,
690 struct smcd_dev *smcismdev, u64 peer_gid)
692 return lgr->peer_gid == peer_gid && lgr->smcd == smcismdev;
695 /* create a new SMC connection (and a new link group if necessary) */
696 int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
698 struct smc_connection *conn = &smc->conn;
699 struct list_head *lgr_list;
700 struct smc_link_group *lgr;
701 enum smc_lgr_role role;
702 spinlock_t *lgr_lock;
705 lgr_list = ini->is_smcd ? &ini->ism_dev->lgr_list : &smc_lgr_list.list;
706 lgr_lock = ini->is_smcd ? &ini->ism_dev->lgr_lock : &smc_lgr_list.lock;
707 ini->cln_first_contact = SMC_FIRST_CONTACT;
708 role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
709 if (role == SMC_CLNT && ini->srv_first_contact)
710 /* create new link group as well */
713 /* determine if an existing link group can be reused */
714 spin_lock_bh(lgr_lock);
715 list_for_each_entry(lgr, lgr_list, list) {
716 write_lock_bh(&lgr->conns_lock);
718 smcd_lgr_match(lgr, ini->ism_dev, ini->ism_gid) :
719 smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) &&
721 lgr->vlan_id == ini->vlan_id &&
723 lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) {
724 /* link group found */
725 ini->cln_first_contact = SMC_REUSE_CONTACT;
727 smc_lgr_register_conn(conn); /* add smc conn to lgr */
728 if (delayed_work_pending(&lgr->free_work))
729 cancel_delayed_work(&lgr->free_work);
730 write_unlock_bh(&lgr->conns_lock);
733 write_unlock_bh(&lgr->conns_lock);
735 spin_unlock_bh(lgr_lock);
737 if (role == SMC_CLNT && !ini->srv_first_contact &&
738 ini->cln_first_contact == SMC_FIRST_CONTACT) {
739 /* Server reuses a link group, but Client wants to start
741 * send out_of_sync decline, reason synchr. error
743 return SMC_CLC_DECL_SYNCERR;
747 if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
748 rc = smc_lgr_create(smc, ini);
752 write_lock_bh(&lgr->conns_lock);
753 smc_lgr_register_conn(conn); /* add smc conn to lgr */
754 write_unlock_bh(&lgr->conns_lock);
756 conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
757 conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
758 conn->urg_state = SMC_URG_READ;
760 conn->rx_off = sizeof(struct smcd_cdc_msg);
761 smcd_cdc_rx_init(conn); /* init tasklet for this conn */
763 #ifndef KERNEL_HAS_ATOMIC64
764 spin_lock_init(&conn->acurs_lock);
771 /* convert the RMB size into the compressed notation - minimum 16K.
772 * In contrast to plain ilog2, this rounds towards the next power of 2,
773 * so the socket application gets at least its desired sndbuf / rcvbuf size.
775 static u8 smc_compress_bufsize(int size)
779 if (size <= SMC_BUF_MIN_SIZE)
782 size = (size - 1) >> 14;
783 compressed = ilog2(size) + 1;
784 if (compressed >= SMC_RMBE_SIZES)
785 compressed = SMC_RMBE_SIZES - 1;
789 /* convert the RMB size from compressed notation into integer */
790 int smc_uncompress_bufsize(u8 compressed)
794 size = 0x00000001 << (((int)compressed) + 14);
798 /* try to reuse a sndbuf or rmb description slot for a certain
799 * buffer size; if not available, return NULL
801 static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
803 struct list_head *buf_list)
805 struct smc_buf_desc *buf_slot;
808 list_for_each_entry(buf_slot, buf_list, list) {
809 if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
810 read_unlock_bh(lock);
814 read_unlock_bh(lock);
818 /* one of the conditions for announcing a receiver's current window size is
819 * that it "results in a minimum increase in the window size of 10% of the
820 * receive buffer space" [RFC7609]
822 static inline int smc_rmb_wnd_update_limit(int rmbe_size)
824 return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
827 static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
828 bool is_rmb, int bufsize)
830 struct smc_buf_desc *buf_desc;
831 struct smc_link *lnk;
834 /* try to alloc a new buffer */
835 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
837 return ERR_PTR(-ENOMEM);
839 buf_desc->order = get_order(bufsize);
840 buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN |
841 __GFP_NOMEMALLOC | __GFP_COMP |
842 __GFP_NORETRY | __GFP_ZERO,
844 if (!buf_desc->pages) {
846 return ERR_PTR(-EAGAIN);
848 buf_desc->cpu_addr = (void *)page_address(buf_desc->pages);
850 /* build the sg table from the pages */
851 lnk = &lgr->lnk[SMC_SINGLE_LINK];
852 rc = sg_alloc_table(&buf_desc->sgt[SMC_SINGLE_LINK], 1,
855 smc_buf_free(lgr, is_rmb, buf_desc);
858 sg_set_buf(buf_desc->sgt[SMC_SINGLE_LINK].sgl,
859 buf_desc->cpu_addr, bufsize);
861 /* map sg table to DMA address */
862 rc = smc_ib_buf_map_sg(lnk->smcibdev, buf_desc,
863 is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
864 /* SMC protocol depends on mapping to one DMA address only */
866 smc_buf_free(lgr, is_rmb, buf_desc);
867 return ERR_PTR(-EAGAIN);
870 /* create a new memory region for the RMB */
872 rc = smc_ib_get_memory_region(lnk->roce_pd,
873 IB_ACCESS_REMOTE_WRITE |
874 IB_ACCESS_LOCAL_WRITE,
877 smc_buf_free(lgr, is_rmb, buf_desc);
882 buf_desc->len = bufsize;
886 #define SMCD_DMBE_SIZES 7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
888 static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
889 bool is_dmb, int bufsize)
891 struct smc_buf_desc *buf_desc;
894 if (smc_compress_bufsize(bufsize) > SMCD_DMBE_SIZES)
895 return ERR_PTR(-EAGAIN);
897 /* try to alloc a new DMB */
898 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
900 return ERR_PTR(-ENOMEM);
902 rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
905 return ERR_PTR(-EAGAIN);
907 buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
908 /* CDC header stored in buf. So, pretend it was smaller */
909 buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg);
911 buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL |
912 __GFP_NOWARN | __GFP_NORETRY |
914 if (!buf_desc->cpu_addr) {
916 return ERR_PTR(-EAGAIN);
918 buf_desc->len = bufsize;
923 static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
925 struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
926 struct smc_connection *conn = &smc->conn;
927 struct smc_link_group *lgr = conn->lgr;
928 struct list_head *buf_list;
929 int bufsize, bufsize_short;
934 /* use socket recv buffer size (w/o overhead) as start value */
935 sk_buf_size = smc->sk.sk_rcvbuf / 2;
937 /* use socket send buffer size (w/o overhead) as start value */
938 sk_buf_size = smc->sk.sk_sndbuf / 2;
940 for (bufsize_short = smc_compress_bufsize(sk_buf_size);
941 bufsize_short >= 0; bufsize_short--) {
944 lock = &lgr->rmbs_lock;
945 buf_list = &lgr->rmbs[bufsize_short];
947 lock = &lgr->sndbufs_lock;
948 buf_list = &lgr->sndbufs[bufsize_short];
950 bufsize = smc_uncompress_bufsize(bufsize_short);
951 if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC)
954 /* check for reusable slot in the link group */
955 buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list);
957 memset(buf_desc->cpu_addr, 0, bufsize);
958 break; /* found reusable slot */
962 buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize);
964 buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize);
966 if (PTR_ERR(buf_desc) == -ENOMEM)
968 if (IS_ERR(buf_desc))
973 list_add(&buf_desc->list, buf_list);
974 write_unlock_bh(lock);
978 if (IS_ERR(buf_desc))
982 conn->rmb_desc = buf_desc;
983 conn->rmbe_size_short = bufsize_short;
984 smc->sk.sk_rcvbuf = bufsize * 2;
985 atomic_set(&conn->bytes_to_rcv, 0);
986 conn->rmbe_update_limit =
987 smc_rmb_wnd_update_limit(buf_desc->len);
989 smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */
991 conn->sndbuf_desc = buf_desc;
992 smc->sk.sk_sndbuf = bufsize * 2;
993 atomic_set(&conn->sndbuf_space, bufsize);
998 void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
1000 struct smc_link_group *lgr = conn->lgr;
1002 if (!conn->lgr || conn->lgr->is_smcd)
1004 smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
1005 conn->sndbuf_desc, DMA_TO_DEVICE);
1008 void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
1010 struct smc_link_group *lgr = conn->lgr;
1012 if (!conn->lgr || conn->lgr->is_smcd)
1014 smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
1015 conn->sndbuf_desc, DMA_TO_DEVICE);
1018 void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
1020 struct smc_link_group *lgr = conn->lgr;
1022 if (!conn->lgr || conn->lgr->is_smcd)
1024 smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
1025 conn->rmb_desc, DMA_FROM_DEVICE);
1028 void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
1030 struct smc_link_group *lgr = conn->lgr;
1032 if (!conn->lgr || conn->lgr->is_smcd)
1034 smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
1035 conn->rmb_desc, DMA_FROM_DEVICE);
1038 /* create the send and receive buffer for an SMC socket;
1039 * receive buffers are called RMBs;
1040 * (even though the SMC protocol allows more than one RMB-element per RMB,
1041 * the Linux implementation uses just one RMB-element per RMB, i.e. uses an
1042 * extra RMB for every connection in a link group
1044 int smc_buf_create(struct smc_sock *smc, bool is_smcd)
1048 /* create send buffer */
1049 rc = __smc_buf_create(smc, is_smcd, false);
1053 rc = __smc_buf_create(smc, is_smcd, true);
1055 smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
1059 static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
1063 for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) {
1064 if (!test_and_set_bit(i, lgr->rtokens_used_mask))
1070 /* add a new rtoken from peer */
1071 int smc_rtoken_add(struct smc_link_group *lgr, __be64 nw_vaddr, __be32 nw_rkey)
1073 u64 dma_addr = be64_to_cpu(nw_vaddr);
1074 u32 rkey = ntohl(nw_rkey);
1077 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1078 if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) &&
1079 (lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr == dma_addr) &&
1080 test_bit(i, lgr->rtokens_used_mask)) {
1081 /* already in list */
1085 i = smc_rmb_reserve_rtoken_idx(lgr);
1088 lgr->rtokens[i][SMC_SINGLE_LINK].rkey = rkey;
1089 lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = dma_addr;
1093 /* delete an rtoken */
1094 int smc_rtoken_delete(struct smc_link_group *lgr, __be32 nw_rkey)
1096 u32 rkey = ntohl(nw_rkey);
1099 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1100 if (lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey &&
1101 test_bit(i, lgr->rtokens_used_mask)) {
1102 lgr->rtokens[i][SMC_SINGLE_LINK].rkey = 0;
1103 lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = 0;
1105 clear_bit(i, lgr->rtokens_used_mask);
1112 /* save rkey and dma_addr received from peer during clc handshake */
1113 int smc_rmb_rtoken_handling(struct smc_connection *conn,
1114 struct smc_clc_msg_accept_confirm *clc)
1116 conn->rtoken_idx = smc_rtoken_add(conn->lgr, clc->rmb_dma_addr,
1118 if (conn->rtoken_idx < 0)
1119 return conn->rtoken_idx;
1123 static void smc_core_going_away(void)
1125 struct smc_ib_device *smcibdev;
1126 struct smcd_dev *smcd;
1128 spin_lock(&smc_ib_devices.lock);
1129 list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
1132 for (i = 0; i < SMC_MAX_PORTS; i++)
1133 set_bit(i, smcibdev->ports_going_away);
1135 spin_unlock(&smc_ib_devices.lock);
1137 spin_lock(&smcd_dev_list.lock);
1138 list_for_each_entry(smcd, &smcd_dev_list.list, list) {
1139 smcd->going_away = 1;
1141 spin_unlock(&smcd_dev_list.lock);
1144 /* Called (from smc_exit) when module is removed */
1145 void smc_core_exit(void)
1147 struct smc_link_group *lgr, *lg;
1148 LIST_HEAD(lgr_freeing_list);
1149 struct smcd_dev *smcd;
1151 smc_core_going_away();
1153 spin_lock_bh(&smc_lgr_list.lock);
1154 list_splice_init(&smc_lgr_list.list, &lgr_freeing_list);
1155 spin_unlock_bh(&smc_lgr_list.lock);
1157 spin_lock(&smcd_dev_list.lock);
1158 list_for_each_entry(smcd, &smcd_dev_list.list, list)
1159 list_splice_init(&smcd->lgr_list, &lgr_freeing_list);
1160 spin_unlock(&smcd_dev_list.lock);
1162 list_for_each_entry_safe(lgr, lg, &lgr_freeing_list, list) {
1163 list_del_init(&lgr->list);
1164 if (!lgr->is_smcd) {
1165 struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
1167 if (lnk->state == SMC_LNK_ACTIVE)
1168 smc_llc_send_delete_link(lnk, SMC_LLC_REQ,
1170 smc_llc_link_inactive(lnk);
1172 cancel_delayed_work_sync(&lgr->free_work);
1174 smc_ism_signal_shutdown(lgr);
1175 smc_lgr_free(lgr); /* free link group */