2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
35 #include <linux/string.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/mutex.h>
41 #include <linux/netdevice.h>
42 #include <linux/security.h>
43 #include <linux/notifier.h>
44 #include <rdma/rdma_netlink.h>
45 #include <rdma/ib_addr.h>
46 #include <rdma/ib_cache.h>
48 #include "core_priv.h"
50 MODULE_AUTHOR("Roland Dreier");
51 MODULE_DESCRIPTION("core kernel InfiniBand API");
52 MODULE_LICENSE("Dual BSD/GPL");
54 struct ib_client_data {
55 struct list_head list;
56 struct ib_client *client;
58 /* The device or client is going down. Do not call client or device
59 * callbacks other than remove(). */
63 struct workqueue_struct *ib_comp_wq;
64 struct workqueue_struct *ib_comp_unbound_wq;
65 struct workqueue_struct *ib_wq;
66 EXPORT_SYMBOL_GPL(ib_wq);
68 /* The device_list and client_list contain devices and clients after their
69 * registration has completed, and the devices and clients are removed
70 * during unregistration. */
71 static LIST_HEAD(device_list);
72 static LIST_HEAD(client_list);
75 * device_mutex and lists_rwsem protect access to both device_list and
76 * client_list. device_mutex protects writer access by device and client
77 * registration / de-registration. lists_rwsem protects reader access to
78 * these lists. Iterators of these lists must lock it for read, while updates
79 * to the lists must be done with a write lock. A special case is when the
80 * device_mutex is locked. In this case locking the lists for read access is
81 * not necessary as the device_mutex implies it.
83 * lists_rwsem also protects access to the client data list.
85 static DEFINE_MUTEX(device_mutex);
86 static DECLARE_RWSEM(lists_rwsem);
88 static int ib_security_change(struct notifier_block *nb, unsigned long event,
90 static void ib_policy_change_task(struct work_struct *work);
91 static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task);
93 static struct notifier_block ibdev_lsm_nb = {
94 .notifier_call = ib_security_change,
97 static int ib_device_check_mandatory(struct ib_device *device)
99 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
100 static const struct {
103 } mandatory_table[] = {
104 IB_MANDATORY_FUNC(query_device),
105 IB_MANDATORY_FUNC(query_port),
106 IB_MANDATORY_FUNC(query_pkey),
107 IB_MANDATORY_FUNC(alloc_pd),
108 IB_MANDATORY_FUNC(dealloc_pd),
109 IB_MANDATORY_FUNC(create_qp),
110 IB_MANDATORY_FUNC(modify_qp),
111 IB_MANDATORY_FUNC(destroy_qp),
112 IB_MANDATORY_FUNC(post_send),
113 IB_MANDATORY_FUNC(post_recv),
114 IB_MANDATORY_FUNC(create_cq),
115 IB_MANDATORY_FUNC(destroy_cq),
116 IB_MANDATORY_FUNC(poll_cq),
117 IB_MANDATORY_FUNC(req_notify_cq),
118 IB_MANDATORY_FUNC(get_dma_mr),
119 IB_MANDATORY_FUNC(dereg_mr),
120 IB_MANDATORY_FUNC(get_port_immutable)
124 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
125 if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
126 dev_warn(&device->dev,
127 "Device is missing mandatory function %s\n",
128 mandatory_table[i].name);
136 static struct ib_device *__ib_device_get_by_index(u32 index)
138 struct ib_device *device;
140 list_for_each_entry(device, &device_list, core_list)
141 if (device->index == index)
148 * Caller is responsible to return refrerence count by calling put_device()
150 struct ib_device *ib_device_get_by_index(u32 index)
152 struct ib_device *device;
154 down_read(&lists_rwsem);
155 device = __ib_device_get_by_index(index);
157 get_device(&device->dev);
159 up_read(&lists_rwsem);
163 static struct ib_device *__ib_device_get_by_name(const char *name)
165 struct ib_device *device;
167 list_for_each_entry(device, &device_list, core_list)
168 if (!strcmp(name, dev_name(&device->dev)))
174 int ib_device_rename(struct ib_device *ibdev, const char *name)
176 struct ib_device *device;
179 if (!strcmp(name, dev_name(&ibdev->dev)))
182 mutex_lock(&device_mutex);
183 list_for_each_entry(device, &device_list, core_list) {
184 if (!strcmp(name, dev_name(&device->dev))) {
190 ret = device_rename(&ibdev->dev, name);
193 strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
195 mutex_unlock(&device_mutex);
199 static int alloc_name(struct ib_device *ibdev, const char *name)
201 unsigned long *inuse;
202 struct ib_device *device;
205 inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL);
209 list_for_each_entry(device, &device_list, core_list) {
210 char buf[IB_DEVICE_NAME_MAX];
212 if (sscanf(dev_name(&device->dev), name, &i) != 1)
214 if (i < 0 || i >= PAGE_SIZE * 8)
216 snprintf(buf, sizeof buf, name, i);
217 if (!strcmp(buf, dev_name(&device->dev)))
221 i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
222 free_page((unsigned long) inuse);
224 return dev_set_name(&ibdev->dev, name, i);
227 static void ib_device_release(struct device *device)
229 struct ib_device *dev = container_of(device, struct ib_device, dev);
231 WARN_ON(dev->reg_state == IB_DEV_REGISTERED);
232 if (dev->reg_state == IB_DEV_UNREGISTERED) {
234 * In IB_DEV_UNINITIALIZED state, cache or port table
235 * is not even created. Free cache and port table only when
236 * device reaches UNREGISTERED state.
238 ib_cache_release_one(dev);
239 kfree(dev->port_immutable);
244 static int ib_device_uevent(struct device *device,
245 struct kobj_uevent_env *env)
247 if (add_uevent_var(env, "NAME=%s", dev_name(device)))
251 * It would be nice to pass the node GUID with the event...
257 static struct class ib_class = {
258 .name = "infiniband",
259 .dev_release = ib_device_release,
260 .dev_uevent = ib_device_uevent,
264 * ib_alloc_device - allocate an IB device struct
265 * @size:size of structure to allocate
267 * Low-level drivers should use ib_alloc_device() to allocate &struct
268 * ib_device. @size is the size of the structure to be allocated,
269 * including any private data used by the low-level driver.
270 * ib_dealloc_device() must be used to free structures allocated with
273 struct ib_device *ib_alloc_device(size_t size)
275 struct ib_device *device;
277 if (WARN_ON(size < sizeof(struct ib_device)))
280 device = kzalloc(size, GFP_KERNEL);
284 rdma_restrack_init(&device->res);
286 device->dev.class = &ib_class;
287 device_initialize(&device->dev);
289 dev_set_drvdata(&device->dev, device);
291 INIT_LIST_HEAD(&device->event_handler_list);
292 spin_lock_init(&device->event_handler_lock);
293 rwlock_init(&device->client_data_lock);
294 INIT_LIST_HEAD(&device->client_data_list);
295 INIT_LIST_HEAD(&device->port_list);
299 EXPORT_SYMBOL(ib_alloc_device);
302 * ib_dealloc_device - free an IB device struct
303 * @device:structure to free
305 * Free a structure allocated with ib_alloc_device().
307 void ib_dealloc_device(struct ib_device *device)
309 WARN_ON(!list_empty(&device->client_data_list));
310 WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
311 device->reg_state != IB_DEV_UNINITIALIZED);
312 rdma_restrack_clean(&device->res);
313 put_device(&device->dev);
315 EXPORT_SYMBOL(ib_dealloc_device);
317 static int add_client_context(struct ib_device *device, struct ib_client *client)
319 struct ib_client_data *context;
321 context = kmalloc(sizeof(*context), GFP_KERNEL);
325 context->client = client;
326 context->data = NULL;
327 context->going_down = false;
329 down_write(&lists_rwsem);
330 write_lock_irq(&device->client_data_lock);
331 list_add(&context->list, &device->client_data_list);
332 write_unlock_irq(&device->client_data_lock);
333 up_write(&lists_rwsem);
338 static int verify_immutable(const struct ib_device *dev, u8 port)
340 return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
341 rdma_max_mad_size(dev, port) != 0);
344 static int read_port_immutable(struct ib_device *device)
347 u8 start_port = rdma_start_port(device);
348 u8 end_port = rdma_end_port(device);
352 * device->port_immutable is indexed directly by the port number to make
353 * access to this data as efficient as possible.
355 * Therefore port_immutable is declared as a 1 based array with
356 * potential empty slots at the beginning.
358 device->port_immutable = kcalloc(end_port + 1,
359 sizeof(*device->port_immutable),
361 if (!device->port_immutable)
364 for (port = start_port; port <= end_port; ++port) {
365 ret = device->get_port_immutable(device, port,
366 &device->port_immutable[port]);
370 if (verify_immutable(device, port))
376 void ib_get_device_fw_str(struct ib_device *dev, char *str)
378 if (dev->get_dev_fw_str)
379 dev->get_dev_fw_str(dev, str);
383 EXPORT_SYMBOL(ib_get_device_fw_str);
385 static int setup_port_pkey_list(struct ib_device *device)
390 * device->port_pkey_list is indexed directly by the port number,
391 * Therefore it is declared as a 1 based array with potential empty
392 * slots at the beginning.
394 device->port_pkey_list = kcalloc(rdma_end_port(device) + 1,
395 sizeof(*device->port_pkey_list),
398 if (!device->port_pkey_list)
401 for (i = 0; i < (rdma_end_port(device) + 1); i++) {
402 spin_lock_init(&device->port_pkey_list[i].list_lock);
403 INIT_LIST_HEAD(&device->port_pkey_list[i].pkey_list);
409 static void ib_policy_change_task(struct work_struct *work)
411 struct ib_device *dev;
413 down_read(&lists_rwsem);
414 list_for_each_entry(dev, &device_list, core_list) {
417 for (i = rdma_start_port(dev); i <= rdma_end_port(dev); i++) {
419 int ret = ib_get_cached_subnet_prefix(dev,
424 "ib_get_cached_subnet_prefix err: %d, this should never happen here\n",
427 ib_security_cache_change(dev, i, sp);
430 up_read(&lists_rwsem);
433 static int ib_security_change(struct notifier_block *nb, unsigned long event,
436 if (event != LSM_POLICY_CHANGE)
439 schedule_work(&ib_policy_change_work);
445 * __dev_new_index - allocate an device index
447 * Returns a suitable unique value for a new device interface
448 * number. It assumes that there are less than 2^32-1 ib devices
449 * will be present in the system.
451 static u32 __dev_new_index(void)
454 * The device index to allow stable naming.
455 * Similar to struct net -> ifindex.
463 if (!__ib_device_get_by_index(index))
468 static void setup_dma_device(struct ib_device *device)
470 struct device *parent = device->dev.parent;
472 WARN_ON_ONCE(device->dma_device);
473 if (device->dev.dma_ops) {
475 * The caller provided custom DMA operations. Copy the
476 * DMA-related fields that are used by e.g. dma_alloc_coherent()
479 device->dma_device = &device->dev;
480 if (!device->dev.dma_mask) {
482 device->dev.dma_mask = parent->dma_mask;
486 if (!device->dev.coherent_dma_mask) {
488 device->dev.coherent_dma_mask =
489 parent->coherent_dma_mask;
495 * The caller did not provide custom DMA operations. Use the
496 * DMA mapping operations of the parent device.
498 WARN_ON_ONCE(!parent);
499 device->dma_device = parent;
503 static void cleanup_device(struct ib_device *device)
505 ib_cache_cleanup_one(device);
506 ib_cache_release_one(device);
507 kfree(device->port_pkey_list);
508 kfree(device->port_immutable);
511 static int setup_device(struct ib_device *device)
513 struct ib_udata uhw = {.outlen = 0, .inlen = 0};
516 ret = ib_device_check_mandatory(device);
520 ret = read_port_immutable(device);
522 dev_warn(&device->dev,
523 "Couldn't create per port immutable data\n");
527 memset(&device->attrs, 0, sizeof(device->attrs));
528 ret = device->query_device(device, &device->attrs, &uhw);
530 dev_warn(&device->dev,
531 "Couldn't query the device attributes\n");
535 ret = setup_port_pkey_list(device);
537 dev_warn(&device->dev, "Couldn't create per port_pkey_list\n");
541 ret = ib_cache_setup_one(device);
543 dev_warn(&device->dev,
544 "Couldn't set up InfiniBand P_Key/GID cache\n");
550 kfree(device->port_pkey_list);
552 kfree(device->port_immutable);
557 * ib_register_device - Register an IB device with IB core
558 * @device:Device to register
560 * Low-level drivers use ib_register_device() to register their
561 * devices with the IB core. All registered clients will receive a
562 * callback for each device that is added. @device must be allocated
563 * with ib_alloc_device().
565 int ib_register_device(struct ib_device *device, const char *name,
566 int (*port_callback)(struct ib_device *, u8,
570 struct ib_client *client;
572 setup_dma_device(device);
574 mutex_lock(&device_mutex);
576 if (strchr(name, '%')) {
577 ret = alloc_name(device, name);
581 ret = dev_set_name(&device->dev, name);
585 if (__ib_device_get_by_name(dev_name(&device->dev))) {
589 strlcpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX);
591 ret = setup_device(device);
595 device->index = __dev_new_index();
597 ret = ib_device_register_rdmacg(device);
599 dev_warn(&device->dev,
600 "Couldn't register device with rdma cgroup\n");
604 ret = ib_device_register_sysfs(device, port_callback);
606 dev_warn(&device->dev,
607 "Couldn't register device with driver model\n");
611 device->reg_state = IB_DEV_REGISTERED;
613 list_for_each_entry(client, &client_list, list)
614 if (!add_client_context(device, client) && client->add)
617 down_write(&lists_rwsem);
618 list_add_tail(&device->core_list, &device_list);
619 up_write(&lists_rwsem);
620 mutex_unlock(&device_mutex);
624 ib_device_unregister_rdmacg(device);
626 cleanup_device(device);
628 mutex_unlock(&device_mutex);
631 EXPORT_SYMBOL(ib_register_device);
634 * ib_unregister_device - Unregister an IB device
635 * @device:Device to unregister
637 * Unregister an IB device. All clients will receive a remove callback.
639 void ib_unregister_device(struct ib_device *device)
641 struct ib_client_data *context, *tmp;
644 mutex_lock(&device_mutex);
646 down_write(&lists_rwsem);
647 list_del(&device->core_list);
648 write_lock_irq(&device->client_data_lock);
649 list_for_each_entry(context, &device->client_data_list, list)
650 context->going_down = true;
651 write_unlock_irq(&device->client_data_lock);
652 downgrade_write(&lists_rwsem);
654 list_for_each_entry(context, &device->client_data_list, list) {
655 if (context->client->remove)
656 context->client->remove(device, context->data);
658 up_read(&lists_rwsem);
660 ib_device_unregister_sysfs(device);
661 ib_device_unregister_rdmacg(device);
663 mutex_unlock(&device_mutex);
665 ib_cache_cleanup_one(device);
667 ib_security_destroy_port_pkey_list(device);
668 kfree(device->port_pkey_list);
670 down_write(&lists_rwsem);
671 write_lock_irqsave(&device->client_data_lock, flags);
672 list_for_each_entry_safe(context, tmp, &device->client_data_list,
674 list_del(&context->list);
677 write_unlock_irqrestore(&device->client_data_lock, flags);
678 up_write(&lists_rwsem);
680 device->reg_state = IB_DEV_UNREGISTERED;
682 EXPORT_SYMBOL(ib_unregister_device);
685 * ib_register_client - Register an IB client
686 * @client:Client to register
688 * Upper level users of the IB drivers can use ib_register_client() to
689 * register callbacks for IB device addition and removal. When an IB
690 * device is added, each registered client's add method will be called
691 * (in the order the clients were registered), and when a device is
692 * removed, each client's remove method will be called (in the reverse
693 * order that clients were registered). In addition, when
694 * ib_register_client() is called, the client will receive an add
695 * callback for all devices already registered.
697 int ib_register_client(struct ib_client *client)
699 struct ib_device *device;
701 mutex_lock(&device_mutex);
703 list_for_each_entry(device, &device_list, core_list)
704 if (!add_client_context(device, client) && client->add)
707 down_write(&lists_rwsem);
708 list_add_tail(&client->list, &client_list);
709 up_write(&lists_rwsem);
711 mutex_unlock(&device_mutex);
715 EXPORT_SYMBOL(ib_register_client);
718 * ib_unregister_client - Unregister an IB client
719 * @client:Client to unregister
721 * Upper level users use ib_unregister_client() to remove their client
722 * registration. When ib_unregister_client() is called, the client
723 * will receive a remove callback for each IB device still registered.
725 void ib_unregister_client(struct ib_client *client)
727 struct ib_client_data *context;
728 struct ib_device *device;
730 mutex_lock(&device_mutex);
732 down_write(&lists_rwsem);
733 list_del(&client->list);
734 up_write(&lists_rwsem);
736 list_for_each_entry(device, &device_list, core_list) {
737 struct ib_client_data *found_context = NULL;
739 down_write(&lists_rwsem);
740 write_lock_irq(&device->client_data_lock);
741 list_for_each_entry(context, &device->client_data_list, list)
742 if (context->client == client) {
743 context->going_down = true;
744 found_context = context;
747 write_unlock_irq(&device->client_data_lock);
748 up_write(&lists_rwsem);
751 client->remove(device, found_context ?
752 found_context->data : NULL);
754 if (!found_context) {
755 dev_warn(&device->dev,
756 "No client context found for %s\n",
761 down_write(&lists_rwsem);
762 write_lock_irq(&device->client_data_lock);
763 list_del(&found_context->list);
764 write_unlock_irq(&device->client_data_lock);
765 up_write(&lists_rwsem);
766 kfree(found_context);
769 mutex_unlock(&device_mutex);
771 EXPORT_SYMBOL(ib_unregister_client);
774 * ib_get_client_data - Get IB client context
775 * @device:Device to get context for
776 * @client:Client to get context for
778 * ib_get_client_data() returns client context set with
779 * ib_set_client_data().
781 void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
783 struct ib_client_data *context;
787 read_lock_irqsave(&device->client_data_lock, flags);
788 list_for_each_entry(context, &device->client_data_list, list)
789 if (context->client == client) {
793 read_unlock_irqrestore(&device->client_data_lock, flags);
797 EXPORT_SYMBOL(ib_get_client_data);
800 * ib_set_client_data - Set IB client context
801 * @device:Device to set context for
802 * @client:Client to set context for
803 * @data:Context to set
805 * ib_set_client_data() sets client context that can be retrieved with
806 * ib_get_client_data().
808 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
811 struct ib_client_data *context;
814 write_lock_irqsave(&device->client_data_lock, flags);
815 list_for_each_entry(context, &device->client_data_list, list)
816 if (context->client == client) {
817 context->data = data;
821 dev_warn(&device->dev, "No client context found for %s\n",
825 write_unlock_irqrestore(&device->client_data_lock, flags);
827 EXPORT_SYMBOL(ib_set_client_data);
830 * ib_register_event_handler - Register an IB event handler
831 * @event_handler:Handler to register
833 * ib_register_event_handler() registers an event handler that will be
834 * called back when asynchronous IB events occur (as defined in
835 * chapter 11 of the InfiniBand Architecture Specification). This
836 * callback may occur in interrupt context.
838 void ib_register_event_handler(struct ib_event_handler *event_handler)
842 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
843 list_add_tail(&event_handler->list,
844 &event_handler->device->event_handler_list);
845 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
847 EXPORT_SYMBOL(ib_register_event_handler);
850 * ib_unregister_event_handler - Unregister an event handler
851 * @event_handler:Handler to unregister
853 * Unregister an event handler registered with
854 * ib_register_event_handler().
856 void ib_unregister_event_handler(struct ib_event_handler *event_handler)
860 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
861 list_del(&event_handler->list);
862 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
864 EXPORT_SYMBOL(ib_unregister_event_handler);
867 * ib_dispatch_event - Dispatch an asynchronous event
868 * @event:Event to dispatch
870 * Low-level drivers must call ib_dispatch_event() to dispatch the
871 * event to all registered event handlers when an asynchronous event
874 void ib_dispatch_event(struct ib_event *event)
877 struct ib_event_handler *handler;
879 spin_lock_irqsave(&event->device->event_handler_lock, flags);
881 list_for_each_entry(handler, &event->device->event_handler_list, list)
882 handler->handler(handler, event);
884 spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
886 EXPORT_SYMBOL(ib_dispatch_event);
889 * ib_query_port - Query IB port attributes
890 * @device:Device to query
891 * @port_num:Port number to query
892 * @port_attr:Port attributes
894 * ib_query_port() returns the attributes of a port through the
895 * @port_attr pointer.
897 int ib_query_port(struct ib_device *device,
899 struct ib_port_attr *port_attr)
904 if (!rdma_is_port_valid(device, port_num))
907 memset(port_attr, 0, sizeof(*port_attr));
908 err = device->query_port(device, port_num, port_attr);
909 if (err || port_attr->subnet_prefix)
912 if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
915 err = device->query_gid(device, port_num, 0, &gid);
919 port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
922 EXPORT_SYMBOL(ib_query_port);
925 * ib_enum_roce_netdev - enumerate all RoCE ports
926 * @ib_dev : IB device we want to query
927 * @filter: Should we call the callback?
928 * @filter_cookie: Cookie passed to filter
929 * @cb: Callback to call for each found RoCE ports
930 * @cookie: Cookie passed back to the callback
932 * Enumerates all of the physical RoCE ports of ib_dev
933 * which are related to netdevice and calls callback() on each
934 * device for which filter() function returns non zero.
936 void ib_enum_roce_netdev(struct ib_device *ib_dev,
937 roce_netdev_filter filter,
939 roce_netdev_callback cb,
944 for (port = rdma_start_port(ib_dev); port <= rdma_end_port(ib_dev);
946 if (rdma_protocol_roce(ib_dev, port)) {
947 struct net_device *idev = NULL;
949 if (ib_dev->get_netdev)
950 idev = ib_dev->get_netdev(ib_dev, port);
953 idev->reg_state >= NETREG_UNREGISTERED) {
958 if (filter(ib_dev, port, idev, filter_cookie))
959 cb(ib_dev, port, idev, cookie);
967 * ib_enum_all_roce_netdevs - enumerate all RoCE devices
968 * @filter: Should we call the callback?
969 * @filter_cookie: Cookie passed to filter
970 * @cb: Callback to call for each found RoCE ports
971 * @cookie: Cookie passed back to the callback
973 * Enumerates all RoCE devices' physical ports which are related
974 * to netdevices and calls callback() on each device for which
975 * filter() function returns non zero.
977 void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
979 roce_netdev_callback cb,
982 struct ib_device *dev;
984 down_read(&lists_rwsem);
985 list_for_each_entry(dev, &device_list, core_list)
986 ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
987 up_read(&lists_rwsem);
991 * ib_enum_all_devs - enumerate all ib_devices
992 * @cb: Callback to call for each found ib_device
994 * Enumerates all ib_devices and calls callback() on each device.
996 int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
997 struct netlink_callback *cb)
999 struct ib_device *dev;
1000 unsigned int idx = 0;
1003 down_read(&lists_rwsem);
1004 list_for_each_entry(dev, &device_list, core_list) {
1005 ret = nldev_cb(dev, skb, cb, idx);
1011 up_read(&lists_rwsem);
1016 * ib_query_pkey - Get P_Key table entry
1017 * @device:Device to query
1018 * @port_num:Port number to query
1019 * @index:P_Key table index to query
1020 * @pkey:Returned P_Key
1022 * ib_query_pkey() fetches the specified P_Key table entry.
1024 int ib_query_pkey(struct ib_device *device,
1025 u8 port_num, u16 index, u16 *pkey)
1027 return device->query_pkey(device, port_num, index, pkey);
1029 EXPORT_SYMBOL(ib_query_pkey);
1032 * ib_modify_device - Change IB device attributes
1033 * @device:Device to modify
1034 * @device_modify_mask:Mask of attributes to change
1035 * @device_modify:New attribute values
1037 * ib_modify_device() changes a device's attributes as specified by
1038 * the @device_modify_mask and @device_modify structure.
1040 int ib_modify_device(struct ib_device *device,
1041 int device_modify_mask,
1042 struct ib_device_modify *device_modify)
1044 if (!device->modify_device)
1047 return device->modify_device(device, device_modify_mask,
1050 EXPORT_SYMBOL(ib_modify_device);
1053 * ib_modify_port - Modifies the attributes for the specified port.
1054 * @device: The device to modify.
1055 * @port_num: The number of the port to modify.
1056 * @port_modify_mask: Mask used to specify which attributes of the port
1058 * @port_modify: New attribute values for the port.
1060 * ib_modify_port() changes a port's attributes as specified by the
1061 * @port_modify_mask and @port_modify structure.
1063 int ib_modify_port(struct ib_device *device,
1064 u8 port_num, int port_modify_mask,
1065 struct ib_port_modify *port_modify)
1069 if (!rdma_is_port_valid(device, port_num))
1072 if (device->modify_port)
1073 rc = device->modify_port(device, port_num, port_modify_mask,
1076 rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS;
1079 EXPORT_SYMBOL(ib_modify_port);
1082 * ib_find_gid - Returns the port number and GID table index where
1083 * a specified GID value occurs. Its searches only for IB link layer.
1084 * @device: The device to query.
1085 * @gid: The GID value to search for.
1086 * @port_num: The port number of the device where the GID value was found.
1087 * @index: The index into the GID table where the GID was found. This
1088 * parameter may be NULL.
1090 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1091 u8 *port_num, u16 *index)
1093 union ib_gid tmp_gid;
1096 for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
1097 if (!rdma_protocol_ib(device, port))
1100 for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
1101 ret = rdma_query_gid(device, port, i, &tmp_gid);
1104 if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
1115 EXPORT_SYMBOL(ib_find_gid);
1118 * ib_find_pkey - Returns the PKey table index where a specified
1119 * PKey value occurs.
1120 * @device: The device to query.
1121 * @port_num: The port number of the device to search for the PKey.
1122 * @pkey: The PKey value to search for.
1123 * @index: The index into the PKey table where the PKey was found.
1125 int ib_find_pkey(struct ib_device *device,
1126 u8 port_num, u16 pkey, u16 *index)
1130 int partial_ix = -1;
1132 for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) {
1133 ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
1136 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
1137 /* if there is full-member pkey take it.*/
1138 if (tmp_pkey & 0x8000) {
1147 /*no full-member, if exists take the limited*/
1148 if (partial_ix >= 0) {
1149 *index = partial_ix;
1154 EXPORT_SYMBOL(ib_find_pkey);
1157 * ib_get_net_dev_by_params() - Return the appropriate net_dev
1158 * for a received CM request
1159 * @dev: An RDMA device on which the request has been received.
1160 * @port: Port number on the RDMA device.
1161 * @pkey: The Pkey the request came on.
1162 * @gid: A GID that the net_dev uses to communicate.
1163 * @addr: Contains the IP address that the request specified as its
1166 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
1169 const union ib_gid *gid,
1170 const struct sockaddr *addr)
1172 struct net_device *net_dev = NULL;
1173 struct ib_client_data *context;
1175 if (!rdma_protocol_ib(dev, port))
1178 down_read(&lists_rwsem);
1180 list_for_each_entry(context, &dev->client_data_list, list) {
1181 struct ib_client *client = context->client;
1183 if (context->going_down)
1186 if (client->get_net_dev_by_params) {
1187 net_dev = client->get_net_dev_by_params(dev, port, pkey,
1195 up_read(&lists_rwsem);
1199 EXPORT_SYMBOL(ib_get_net_dev_by_params);
1201 static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
1202 [RDMA_NL_LS_OP_RESOLVE] = {
1203 .doit = ib_nl_handle_resolve_resp,
1204 .flags = RDMA_NL_ADMIN_PERM,
1206 [RDMA_NL_LS_OP_SET_TIMEOUT] = {
1207 .doit = ib_nl_handle_set_timeout,
1208 .flags = RDMA_NL_ADMIN_PERM,
1210 [RDMA_NL_LS_OP_IP_RESOLVE] = {
1211 .doit = ib_nl_handle_ip_res_resp,
1212 .flags = RDMA_NL_ADMIN_PERM,
1216 static int __init ib_core_init(void)
1220 ib_wq = alloc_workqueue("infiniband", 0, 0);
1224 ib_comp_wq = alloc_workqueue("ib-comp-wq",
1225 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
1231 ib_comp_unbound_wq =
1232 alloc_workqueue("ib-comp-unb-wq",
1233 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM |
1234 WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE);
1235 if (!ib_comp_unbound_wq) {
1240 ret = class_register(&ib_class);
1242 pr_warn("Couldn't create InfiniBand device class\n");
1243 goto err_comp_unbound;
1246 ret = rdma_nl_init();
1248 pr_warn("Couldn't init IB netlink interface: err %d\n", ret);
1254 pr_warn("Could't init IB address resolution\n");
1258 ret = ib_mad_init();
1260 pr_warn("Couldn't init IB MAD\n");
1266 pr_warn("Couldn't init SA\n");
1270 ret = register_lsm_notifier(&ibdev_lsm_nb);
1272 pr_warn("Couldn't register LSM notifier. ret %d\n", ret);
1277 rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
1278 roce_gid_mgmt_init();
1291 class_unregister(&ib_class);
1293 destroy_workqueue(ib_comp_unbound_wq);
1295 destroy_workqueue(ib_comp_wq);
1297 destroy_workqueue(ib_wq);
1301 static void __exit ib_core_cleanup(void)
1303 roce_gid_mgmt_cleanup();
1305 rdma_nl_unregister(RDMA_NL_LS);
1306 unregister_lsm_notifier(&ibdev_lsm_nb);
1311 class_unregister(&ib_class);
1312 destroy_workqueue(ib_comp_unbound_wq);
1313 destroy_workqueue(ib_comp_wq);
1314 /* Make sure that any pending umem accounting work is done. */
1315 destroy_workqueue(ib_wq);
1318 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
1320 subsys_initcall(ib_core_init);
1321 module_exit(ib_core_cleanup);