2 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/rculist.h>
36 #include <linux/llist.h>
38 #include "rds_single_path.h"
41 struct workqueue_struct *rds_ib_mr_wq;
43 static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
45 struct rds_ib_device *rds_ibdev;
46 struct rds_ib_ipaddr *i_ipaddr;
49 list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
50 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
51 if (i_ipaddr->ipaddr == ipaddr) {
52 refcount_inc(&rds_ibdev->refcount);
63 static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
65 struct rds_ib_ipaddr *i_ipaddr;
67 i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
71 i_ipaddr->ipaddr = ipaddr;
73 spin_lock_irq(&rds_ibdev->spinlock);
74 list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
75 spin_unlock_irq(&rds_ibdev->spinlock);
80 static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
82 struct rds_ib_ipaddr *i_ipaddr;
83 struct rds_ib_ipaddr *to_free = NULL;
86 spin_lock_irq(&rds_ibdev->spinlock);
87 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
88 if (i_ipaddr->ipaddr == ipaddr) {
89 list_del_rcu(&i_ipaddr->list);
94 spin_unlock_irq(&rds_ibdev->spinlock);
97 kfree_rcu(to_free, rcu);
100 int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev,
101 struct in6_addr *ipaddr)
103 struct rds_ib_device *rds_ibdev_old;
105 rds_ibdev_old = rds_ib_get_device(ipaddr->s6_addr32[3]);
107 return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]);
109 if (rds_ibdev_old != rds_ibdev) {
110 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr->s6_addr32[3]);
111 rds_ib_dev_put(rds_ibdev_old);
112 return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]);
114 rds_ib_dev_put(rds_ibdev_old);
119 void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
121 struct rds_ib_connection *ic = conn->c_transport_data;
123 /* conn was previously on the nodev_conns_list */
124 spin_lock_irq(&ib_nodev_conns_lock);
125 BUG_ON(list_empty(&ib_nodev_conns));
126 BUG_ON(list_empty(&ic->ib_node));
127 list_del(&ic->ib_node);
129 spin_lock(&rds_ibdev->spinlock);
130 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
131 spin_unlock(&rds_ibdev->spinlock);
132 spin_unlock_irq(&ib_nodev_conns_lock);
134 ic->rds_ibdev = rds_ibdev;
135 refcount_inc(&rds_ibdev->refcount);
138 void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
140 struct rds_ib_connection *ic = conn->c_transport_data;
142 /* place conn on nodev_conns_list */
143 spin_lock(&ib_nodev_conns_lock);
145 spin_lock_irq(&rds_ibdev->spinlock);
146 BUG_ON(list_empty(&ic->ib_node));
147 list_del(&ic->ib_node);
148 spin_unlock_irq(&rds_ibdev->spinlock);
150 list_add_tail(&ic->ib_node, &ib_nodev_conns);
152 spin_unlock(&ib_nodev_conns_lock);
154 ic->rds_ibdev = NULL;
155 rds_ib_dev_put(rds_ibdev);
158 void rds_ib_destroy_nodev_conns(void)
160 struct rds_ib_connection *ic, *_ic;
163 /* avoid calling conn_destroy with irqs off */
164 spin_lock_irq(&ib_nodev_conns_lock);
165 list_splice(&ib_nodev_conns, &tmp_list);
166 spin_unlock_irq(&ib_nodev_conns_lock);
168 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
169 rds_conn_destroy(ic->conn);
172 void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
174 struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
176 iinfo->rdma_mr_max = pool_1m->max_items;
177 iinfo->rdma_mr_size = pool_1m->fmr_attr.max_pages;
180 #if IS_ENABLED(CONFIG_IPV6)
181 void rds6_ib_get_mr_info(struct rds_ib_device *rds_ibdev,
182 struct rds6_info_rdma_connection *iinfo6)
184 struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
186 iinfo6->rdma_mr_max = pool_1m->max_items;
187 iinfo6->rdma_mr_size = pool_1m->fmr_attr.max_pages;
191 struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool)
193 struct rds_ib_mr *ibmr = NULL;
194 struct llist_node *ret;
197 spin_lock_irqsave(&pool->clean_lock, flags);
198 ret = llist_del_first(&pool->clean_list);
199 spin_unlock_irqrestore(&pool->clean_lock, flags);
201 ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
202 if (pool->pool_type == RDS_IB_MR_8K_POOL)
203 rds_ib_stats_inc(s_ib_rdma_mr_8k_reused);
205 rds_ib_stats_inc(s_ib_rdma_mr_1m_reused);
211 void rds_ib_sync_mr(void *trans_private, int direction)
213 struct rds_ib_mr *ibmr = trans_private;
214 struct rds_ib_device *rds_ibdev = ibmr->device;
217 case DMA_FROM_DEVICE:
218 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
219 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
222 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
223 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
228 void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
230 struct rds_ib_device *rds_ibdev = ibmr->device;
232 if (ibmr->sg_dma_len) {
233 ib_dma_unmap_sg(rds_ibdev->dev,
234 ibmr->sg, ibmr->sg_len,
236 ibmr->sg_dma_len = 0;
239 /* Release the s/g list */
243 for (i = 0; i < ibmr->sg_len; ++i) {
244 struct page *page = sg_page(&ibmr->sg[i]);
246 /* FIXME we need a way to tell a r/w MR
248 WARN_ON(!page->mapping && irqs_disabled());
249 set_page_dirty(page);
259 void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
261 unsigned int pinned = ibmr->sg_len;
263 __rds_ib_teardown_mr(ibmr);
265 struct rds_ib_mr_pool *pool = ibmr->pool;
267 atomic_sub(pinned, &pool->free_pinned);
271 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
273 unsigned int item_count;
275 item_count = atomic_read(&pool->item_count);
283 * given an llist of mrs, put them all into the list_head for more processing
285 static unsigned int llist_append_to_list(struct llist_head *llist,
286 struct list_head *list)
288 struct rds_ib_mr *ibmr;
289 struct llist_node *node;
290 struct llist_node *next;
291 unsigned int count = 0;
293 node = llist_del_all(llist);
296 ibmr = llist_entry(node, struct rds_ib_mr, llnode);
297 list_add_tail(&ibmr->unmap_list, list);
305 * this takes a list head of mrs and turns it into linked llist nodes
306 * of clusters. Each cluster has linked llist nodes of
307 * MR_CLUSTER_SIZE mrs that are ready for reuse.
309 static void list_to_llist_nodes(struct list_head *list,
310 struct llist_node **nodes_head,
311 struct llist_node **nodes_tail)
313 struct rds_ib_mr *ibmr;
314 struct llist_node *cur = NULL;
315 struct llist_node **next = nodes_head;
317 list_for_each_entry(ibmr, list, unmap_list) {
327 * Flush our pool of MRs.
328 * At a minimum, all currently unused MRs are unmapped.
329 * If the number of MRs allocated exceeds the limit, we also try
330 * to free as many MRs as needed to get back to this limit.
332 int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
333 int free_all, struct rds_ib_mr **ibmr_ret)
335 struct rds_ib_mr *ibmr;
336 struct llist_node *clean_nodes;
337 struct llist_node *clean_tail;
338 LIST_HEAD(unmap_list);
339 unsigned long unpinned = 0;
340 unsigned int nfreed = 0, dirty_to_clean = 0, free_goal;
342 if (pool->pool_type == RDS_IB_MR_8K_POOL)
343 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_flush);
345 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_flush);
349 while (!mutex_trylock(&pool->flush_lock)) {
350 ibmr = rds_ib_reuse_mr(pool);
353 finish_wait(&pool->flush_wait, &wait);
357 prepare_to_wait(&pool->flush_wait, &wait,
358 TASK_UNINTERRUPTIBLE);
359 if (llist_empty(&pool->clean_list))
362 ibmr = rds_ib_reuse_mr(pool);
365 finish_wait(&pool->flush_wait, &wait);
369 finish_wait(&pool->flush_wait, &wait);
371 mutex_lock(&pool->flush_lock);
374 ibmr = rds_ib_reuse_mr(pool);
381 /* Get the list of all MRs to be dropped. Ordering matters -
382 * we want to put drop_list ahead of free_list.
384 dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
385 dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
389 spin_lock_irqsave(&pool->clean_lock, flags);
390 llist_append_to_list(&pool->clean_list, &unmap_list);
391 spin_unlock_irqrestore(&pool->clean_lock, flags);
394 free_goal = rds_ib_flush_goal(pool, free_all);
396 if (list_empty(&unmap_list))
399 if (pool->use_fastreg)
400 rds_ib_unreg_frmr(&unmap_list, &nfreed, &unpinned, free_goal);
402 rds_ib_unreg_fmr(&unmap_list, &nfreed, &unpinned, free_goal);
404 if (!list_empty(&unmap_list)) {
407 list_to_llist_nodes(&unmap_list, &clean_nodes, &clean_tail);
409 *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
410 clean_nodes = clean_nodes->next;
412 /* more than one entry in llist nodes */
414 spin_lock_irqsave(&pool->clean_lock, flags);
415 llist_add_batch(clean_nodes, clean_tail,
417 spin_unlock_irqrestore(&pool->clean_lock, flags);
421 atomic_sub(unpinned, &pool->free_pinned);
422 atomic_sub(dirty_to_clean, &pool->dirty_count);
423 atomic_sub(nfreed, &pool->item_count);
426 mutex_unlock(&pool->flush_lock);
427 if (waitqueue_active(&pool->flush_wait))
428 wake_up(&pool->flush_wait);
433 struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
435 struct rds_ib_mr *ibmr = NULL;
439 ibmr = rds_ib_reuse_mr(pool);
443 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
446 atomic_dec(&pool->item_count);
449 if (pool->pool_type == RDS_IB_MR_8K_POOL)
450 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted);
452 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted);
456 /* We do have some empty MRs. Flush them out. */
457 if (pool->pool_type == RDS_IB_MR_8K_POOL)
458 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_wait);
460 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_wait);
462 rds_ib_flush_mr_pool(pool, 0, &ibmr);
470 static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
472 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
474 rds_ib_flush_mr_pool(pool, 0, NULL);
477 void rds_ib_free_mr(void *trans_private, int invalidate)
479 struct rds_ib_mr *ibmr = trans_private;
480 struct rds_ib_mr_pool *pool = ibmr->pool;
481 struct rds_ib_device *rds_ibdev = ibmr->device;
483 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
485 /* Return it to the pool's free list */
486 if (rds_ibdev->use_fastreg)
487 rds_ib_free_frmr_list(ibmr);
489 rds_ib_free_fmr_list(ibmr);
491 atomic_add(ibmr->sg_len, &pool->free_pinned);
492 atomic_inc(&pool->dirty_count);
494 /* If we've pinned too many pages, request a flush */
495 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
496 atomic_read(&pool->dirty_count) >= pool->max_items / 5)
497 queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
500 if (likely(!in_interrupt())) {
501 rds_ib_flush_mr_pool(pool, 0, NULL);
503 /* We get here if the user created a MR marked
504 * as use_once and invalidate at the same time.
506 queue_delayed_work(rds_ib_mr_wq,
507 &pool->flush_worker, 10);
511 rds_ib_dev_put(rds_ibdev);
514 void rds_ib_flush_mrs(void)
516 struct rds_ib_device *rds_ibdev;
518 down_read(&rds_ib_devices_lock);
519 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
520 if (rds_ibdev->mr_8k_pool)
521 rds_ib_flush_mr_pool(rds_ibdev->mr_8k_pool, 0, NULL);
523 if (rds_ibdev->mr_1m_pool)
524 rds_ib_flush_mr_pool(rds_ibdev->mr_1m_pool, 0, NULL);
526 up_read(&rds_ib_devices_lock);
529 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
530 struct rds_sock *rs, u32 *key_ret,
531 struct rds_connection *conn)
533 struct rds_ib_device *rds_ibdev;
534 struct rds_ib_mr *ibmr = NULL;
535 struct rds_ib_connection *ic = NULL;
538 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr.s6_addr32[3]);
545 ic = conn->c_transport_data;
547 if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) {
552 if (rds_ibdev->use_fastreg)
553 ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret);
555 ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret);
558 pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret);
565 rds_ib_dev_put(rds_ibdev);
570 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
572 cancel_delayed_work_sync(&pool->flush_worker);
573 rds_ib_flush_mr_pool(pool, 1, NULL);
574 WARN_ON(atomic_read(&pool->item_count));
575 WARN_ON(atomic_read(&pool->free_pinned));
579 struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
582 struct rds_ib_mr_pool *pool;
584 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
586 return ERR_PTR(-ENOMEM);
588 pool->pool_type = pool_type;
589 init_llist_head(&pool->free_list);
590 init_llist_head(&pool->drop_list);
591 init_llist_head(&pool->clean_list);
592 spin_lock_init(&pool->clean_lock);
593 mutex_init(&pool->flush_lock);
594 init_waitqueue_head(&pool->flush_wait);
595 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
597 if (pool_type == RDS_IB_MR_1M_POOL) {
598 /* +1 allows for unaligned MRs */
599 pool->fmr_attr.max_pages = RDS_MR_1M_MSG_SIZE + 1;
600 pool->max_items = rds_ibdev->max_1m_mrs;
602 /* pool_type == RDS_IB_MR_8K_POOL */
603 pool->fmr_attr.max_pages = RDS_MR_8K_MSG_SIZE + 1;
604 pool->max_items = rds_ibdev->max_8k_mrs;
607 pool->max_free_pinned = pool->max_items * pool->fmr_attr.max_pages / 4;
608 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
609 pool->fmr_attr.page_shift = PAGE_SHIFT;
610 pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4;
611 pool->use_fastreg = rds_ibdev->use_fastreg;
616 int rds_ib_mr_init(void)
618 rds_ib_mr_wq = alloc_workqueue("rds_mr_flushd", WQ_MEM_RECLAIM, 0);
624 /* By the time this is called all the IB devices should have been torn down and
625 * had their pools freed. As each pool is freed its work struct is waited on,
626 * so the pool flushing work queue should be idle by the time we get here.
628 void rds_ib_mr_exit(void)
630 destroy_workqueue(rds_ib_mr_wq);