2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
35 #include <linux/string.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/netdevice.h>
41 #include <net/net_namespace.h>
42 #include <net/netns/generic.h>
43 #include <linux/security.h>
44 #include <linux/notifier.h>
45 #include <linux/hashtable.h>
46 #include <rdma/rdma_netlink.h>
47 #include <rdma/ib_addr.h>
48 #include <rdma/ib_cache.h>
50 #include "core_priv.h"
53 MODULE_AUTHOR("Roland Dreier");
54 MODULE_DESCRIPTION("core kernel InfiniBand API");
55 MODULE_LICENSE("Dual BSD/GPL");
57 struct workqueue_struct *ib_comp_wq;
58 struct workqueue_struct *ib_comp_unbound_wq;
59 struct workqueue_struct *ib_wq;
60 EXPORT_SYMBOL_GPL(ib_wq);
63 * Each of the three rwsem locks (devices, clients, client_data) protects the
64 * xarray of the same name. Specifically it allows the caller to assert that
65 * the MARK will/will not be changing under the lock, and for devices and
66 * clients, that the value in the xarray is still a valid pointer. Change of
67 * the MARK is linked to the object state, so holding the lock and testing the
68 * MARK also asserts that the contained object is in a certain state.
70 * This is used to build a two stage register/unregister flow where objects
71 * can continue to be in the xarray even though they are still in progress to
72 * register/unregister.
74 * The xarray itself provides additional locking, and restartable iteration,
75 * which is also relied on.
77 * Locks should not be nested, with the exception of client_data, which is
78 * allowed to nest under the read side of the other two locks.
80 * The devices_rwsem also protects the device name list, any change or
81 * assignment of device name must also hold the write side to guarantee unique
86 * devices contains devices that have had their names assigned. The
87 * devices may not be registered. Users that care about the registration
88 * status need to call ib_device_try_get() on the device to ensure it is
89 * registered, and keep it registered, for the required duration.
92 static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC);
93 static DECLARE_RWSEM(devices_rwsem);
94 #define DEVICE_REGISTERED XA_MARK_1
96 static LIST_HEAD(client_list);
97 #define CLIENT_REGISTERED XA_MARK_1
98 static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC);
99 static DECLARE_RWSEM(clients_rwsem);
102 * If client_data is registered then the corresponding client must also still
105 #define CLIENT_DATA_REGISTERED XA_MARK_1
108 * struct rdma_dev_net - rdma net namespace metadata for a net
109 * @net: Pointer to owner net namespace
110 * @id: xarray id to identify the net namespace.
112 struct rdma_dev_net {
117 static unsigned int rdma_dev_net_id;
120 * A list of net namespaces is maintained in an xarray. This is necessary
121 * because we can't get the locking right using the existing net ns list. We
122 * would require a init_net callback after the list is updated.
124 static DEFINE_XARRAY_FLAGS(rdma_nets, XA_FLAGS_ALLOC);
126 * rwsem to protect accessing the rdma_nets xarray entries.
128 static DECLARE_RWSEM(rdma_nets_rwsem);
130 bool ib_devices_shared_netns = true;
131 module_param_named(netns_mode, ib_devices_shared_netns, bool, 0444);
132 MODULE_PARM_DESC(netns_mode,
133 "Share device among net namespaces; default=1 (shared)");
135 * rdma_dev_access_netns() - Return whether a rdma device can be accessed
136 * from a specified net namespace or not.
137 * @device: Pointer to rdma device which needs to be checked
138 * @net: Pointer to net namesapce for which access to be checked
140 * rdma_dev_access_netns() - Return whether a rdma device can be accessed
141 * from a specified net namespace or not. When
142 * rdma device is in shared mode, it ignores the
143 * net namespace. When rdma device is exclusive
144 * to a net namespace, rdma device net namespace is
145 * checked against the specified one.
147 bool rdma_dev_access_netns(const struct ib_device *dev, const struct net *net)
149 return (ib_devices_shared_netns ||
150 net_eq(read_pnet(&dev->coredev.rdma_net), net));
152 EXPORT_SYMBOL(rdma_dev_access_netns);
155 * xarray has this behavior where it won't iterate over NULL values stored in
156 * allocated arrays. So we need our own iterator to see all values stored in
157 * the array. This does the same thing as xa_for_each except that it also
158 * returns NULL valued entries if the array is allocating. Simplified to only
159 * work on simple xarrays.
161 static void *xan_find_marked(struct xarray *xa, unsigned long *indexp,
164 XA_STATE(xas, xa, *indexp);
169 entry = xas_find_marked(&xas, ULONG_MAX, filter);
170 if (xa_is_zero(entry))
172 } while (xas_retry(&xas, entry));
176 *indexp = xas.xa_index;
177 if (xa_is_zero(entry))
181 return XA_ERROR(-ENOENT);
183 #define xan_for_each_marked(xa, index, entry, filter) \
184 for (index = 0, entry = xan_find_marked(xa, &(index), filter); \
186 (index)++, entry = xan_find_marked(xa, &(index), filter))
188 /* RCU hash table mapping netdevice pointers to struct ib_port_data */
189 static DEFINE_SPINLOCK(ndev_hash_lock);
190 static DECLARE_HASHTABLE(ndev_hash, 5);
192 static void free_netdevs(struct ib_device *ib_dev);
193 static void ib_unregister_work(struct work_struct *work);
194 static void __ib_unregister_device(struct ib_device *device);
195 static int ib_security_change(struct notifier_block *nb, unsigned long event,
197 static void ib_policy_change_task(struct work_struct *work);
198 static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task);
200 static void __ibdev_printk(const char *level, const struct ib_device *ibdev,
201 struct va_format *vaf)
203 if (ibdev && ibdev->dev.parent)
204 dev_printk_emit(level[1] - '0',
207 dev_driver_string(ibdev->dev.parent),
208 dev_name(ibdev->dev.parent),
209 dev_name(&ibdev->dev),
213 level, dev_name(&ibdev->dev), vaf);
215 printk("%s(NULL ib_device): %pV", level, vaf);
218 void ibdev_printk(const char *level, const struct ib_device *ibdev,
219 const char *format, ...)
221 struct va_format vaf;
224 va_start(args, format);
229 __ibdev_printk(level, ibdev, &vaf);
233 EXPORT_SYMBOL(ibdev_printk);
235 #define define_ibdev_printk_level(func, level) \
236 void func(const struct ib_device *ibdev, const char *fmt, ...) \
238 struct va_format vaf; \
241 va_start(args, fmt); \
246 __ibdev_printk(level, ibdev, &vaf); \
252 define_ibdev_printk_level(ibdev_emerg, KERN_EMERG);
253 define_ibdev_printk_level(ibdev_alert, KERN_ALERT);
254 define_ibdev_printk_level(ibdev_crit, KERN_CRIT);
255 define_ibdev_printk_level(ibdev_err, KERN_ERR);
256 define_ibdev_printk_level(ibdev_warn, KERN_WARNING);
257 define_ibdev_printk_level(ibdev_notice, KERN_NOTICE);
258 define_ibdev_printk_level(ibdev_info, KERN_INFO);
260 static struct notifier_block ibdev_lsm_nb = {
261 .notifier_call = ib_security_change,
264 static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net,
267 /* Pointer to the RCU head at the start of the ib_port_data array */
268 struct ib_port_data_rcu {
269 struct rcu_head rcu_head;
270 struct ib_port_data pdata[];
273 static int ib_device_check_mandatory(struct ib_device *device)
275 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device_ops, x), #x }
276 static const struct {
279 } mandatory_table[] = {
280 IB_MANDATORY_FUNC(query_device),
281 IB_MANDATORY_FUNC(query_port),
282 IB_MANDATORY_FUNC(query_pkey),
283 IB_MANDATORY_FUNC(alloc_pd),
284 IB_MANDATORY_FUNC(dealloc_pd),
285 IB_MANDATORY_FUNC(create_qp),
286 IB_MANDATORY_FUNC(modify_qp),
287 IB_MANDATORY_FUNC(destroy_qp),
288 IB_MANDATORY_FUNC(post_send),
289 IB_MANDATORY_FUNC(post_recv),
290 IB_MANDATORY_FUNC(create_cq),
291 IB_MANDATORY_FUNC(destroy_cq),
292 IB_MANDATORY_FUNC(poll_cq),
293 IB_MANDATORY_FUNC(req_notify_cq),
294 IB_MANDATORY_FUNC(get_dma_mr),
295 IB_MANDATORY_FUNC(dereg_mr),
296 IB_MANDATORY_FUNC(get_port_immutable)
300 device->kverbs_provider = true;
301 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
302 if (!*(void **) ((void *) &device->ops +
303 mandatory_table[i].offset)) {
304 device->kverbs_provider = false;
313 * Caller must perform ib_device_put() to return the device reference count
314 * when ib_device_get_by_index() returns valid device pointer.
316 struct ib_device *ib_device_get_by_index(const struct net *net, u32 index)
318 struct ib_device *device;
320 down_read(&devices_rwsem);
321 device = xa_load(&devices, index);
323 if (!rdma_dev_access_netns(device, net)) {
328 if (!ib_device_try_get(device))
332 up_read(&devices_rwsem);
337 * ib_device_put - Release IB device reference
338 * @device: device whose reference to be released
340 * ib_device_put() releases reference to the IB device to allow it to be
341 * unregistered and eventually free.
343 void ib_device_put(struct ib_device *device)
345 if (refcount_dec_and_test(&device->refcount))
346 complete(&device->unreg_completion);
348 EXPORT_SYMBOL(ib_device_put);
350 static struct ib_device *__ib_device_get_by_name(const char *name)
352 struct ib_device *device;
355 xa_for_each (&devices, index, device)
356 if (!strcmp(name, dev_name(&device->dev)))
363 * ib_device_get_by_name - Find an IB device by name
364 * @name: The name to look for
365 * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
367 * Find and hold an ib_device by its name. The caller must call
368 * ib_device_put() on the returned pointer.
370 struct ib_device *ib_device_get_by_name(const char *name,
371 enum rdma_driver_id driver_id)
373 struct ib_device *device;
375 down_read(&devices_rwsem);
376 device = __ib_device_get_by_name(name);
377 if (device && driver_id != RDMA_DRIVER_UNKNOWN &&
378 device->driver_id != driver_id)
382 if (!ib_device_try_get(device))
385 up_read(&devices_rwsem);
388 EXPORT_SYMBOL(ib_device_get_by_name);
390 static int rename_compat_devs(struct ib_device *device)
392 struct ib_core_device *cdev;
396 mutex_lock(&device->compat_devs_mutex);
397 xa_for_each (&device->compat_devs, index, cdev) {
398 ret = device_rename(&cdev->dev, dev_name(&device->dev));
401 "Fail to rename compatdev to new name %s\n",
402 dev_name(&device->dev));
406 mutex_unlock(&device->compat_devs_mutex);
410 int ib_device_rename(struct ib_device *ibdev, const char *name)
414 down_write(&devices_rwsem);
415 if (!strcmp(name, dev_name(&ibdev->dev))) {
420 if (__ib_device_get_by_name(name)) {
425 ret = device_rename(&ibdev->dev, name);
428 strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
429 ret = rename_compat_devs(ibdev);
431 up_write(&devices_rwsem);
435 static int alloc_name(struct ib_device *ibdev, const char *name)
437 struct ib_device *device;
443 lockdep_assert_held_exclusive(&devices_rwsem);
445 xa_for_each (&devices, index, device) {
446 char buf[IB_DEVICE_NAME_MAX];
448 if (sscanf(dev_name(&device->dev), name, &i) != 1)
450 if (i < 0 || i >= INT_MAX)
452 snprintf(buf, sizeof buf, name, i);
453 if (strcmp(buf, dev_name(&device->dev)) != 0)
456 rc = ida_alloc_range(&inuse, i, i, GFP_KERNEL);
461 rc = ida_alloc(&inuse, GFP_KERNEL);
465 rc = dev_set_name(&ibdev->dev, name, rc);
471 static void ib_device_release(struct device *device)
473 struct ib_device *dev = container_of(device, struct ib_device, dev);
476 WARN_ON(refcount_read(&dev->refcount));
477 ib_cache_release_one(dev);
478 ib_security_release_port_pkey_list(dev);
479 xa_destroy(&dev->compat_devs);
480 xa_destroy(&dev->client_data);
482 kfree_rcu(container_of(dev->port_data, struct ib_port_data_rcu,
485 kfree_rcu(dev, rcu_head);
488 static int ib_device_uevent(struct device *device,
489 struct kobj_uevent_env *env)
491 if (add_uevent_var(env, "NAME=%s", dev_name(device)))
495 * It would be nice to pass the node GUID with the event...
501 static const void *net_namespace(struct device *d)
503 struct ib_core_device *coredev =
504 container_of(d, struct ib_core_device, dev);
506 return read_pnet(&coredev->rdma_net);
509 static struct class ib_class = {
510 .name = "infiniband",
511 .dev_release = ib_device_release,
512 .dev_uevent = ib_device_uevent,
513 .ns_type = &net_ns_type_operations,
514 .namespace = net_namespace,
517 static void rdma_init_coredev(struct ib_core_device *coredev,
518 struct ib_device *dev, struct net *net)
520 /* This BUILD_BUG_ON is intended to catch layout change
521 * of union of ib_core_device and device.
522 * dev must be the first element as ib_core and providers
523 * driver uses it. Adding anything in ib_core_device before
524 * device will break this assumption.
526 BUILD_BUG_ON(offsetof(struct ib_device, coredev.dev) !=
527 offsetof(struct ib_device, dev));
529 coredev->dev.class = &ib_class;
530 coredev->dev.groups = dev->groups;
531 device_initialize(&coredev->dev);
532 coredev->owner = dev;
533 INIT_LIST_HEAD(&coredev->port_list);
534 write_pnet(&coredev->rdma_net, net);
538 * _ib_alloc_device - allocate an IB device struct
539 * @size:size of structure to allocate
541 * Low-level drivers should use ib_alloc_device() to allocate &struct
542 * ib_device. @size is the size of the structure to be allocated,
543 * including any private data used by the low-level driver.
544 * ib_dealloc_device() must be used to free structures allocated with
547 struct ib_device *_ib_alloc_device(size_t size)
549 struct ib_device *device;
551 if (WARN_ON(size < sizeof(struct ib_device)))
554 device = kzalloc(size, GFP_KERNEL);
558 if (rdma_restrack_init(device)) {
563 device->groups[0] = &ib_dev_attr_group;
564 rdma_init_coredev(&device->coredev, device, &init_net);
566 INIT_LIST_HEAD(&device->event_handler_list);
567 spin_lock_init(&device->event_handler_lock);
568 mutex_init(&device->unregistration_lock);
570 * client_data needs to be alloc because we don't want our mark to be
571 * destroyed if the user stores NULL in the client data.
573 xa_init_flags(&device->client_data, XA_FLAGS_ALLOC);
574 init_rwsem(&device->client_data_rwsem);
575 xa_init_flags(&device->compat_devs, XA_FLAGS_ALLOC);
576 mutex_init(&device->compat_devs_mutex);
577 init_completion(&device->unreg_completion);
578 INIT_WORK(&device->unregistration_work, ib_unregister_work);
582 EXPORT_SYMBOL(_ib_alloc_device);
585 * ib_dealloc_device - free an IB device struct
586 * @device:structure to free
588 * Free a structure allocated with ib_alloc_device().
590 void ib_dealloc_device(struct ib_device *device)
592 if (device->ops.dealloc_driver)
593 device->ops.dealloc_driver(device);
596 * ib_unregister_driver() requires all devices to remain in the xarray
597 * while their ops are callable. The last op we call is dealloc_driver
598 * above. This is needed to create a fence on op callbacks prior to
599 * allowing the driver module to unload.
601 down_write(&devices_rwsem);
602 if (xa_load(&devices, device->index) == device)
603 xa_erase(&devices, device->index);
604 up_write(&devices_rwsem);
606 /* Expedite releasing netdev references */
607 free_netdevs(device);
609 WARN_ON(!xa_empty(&device->compat_devs));
610 WARN_ON(!xa_empty(&device->client_data));
611 WARN_ON(refcount_read(&device->refcount));
612 rdma_restrack_clean(device);
613 /* Balances with device_initialize */
614 put_device(&device->dev);
616 EXPORT_SYMBOL(ib_dealloc_device);
619 * add_client_context() and remove_client_context() must be safe against
620 * parallel calls on the same device - registration/unregistration of both the
621 * device and client can be occurring in parallel.
623 * The routines need to be a fence, any caller must not return until the add
624 * or remove is fully completed.
626 static int add_client_context(struct ib_device *device,
627 struct ib_client *client)
631 if (!device->kverbs_provider && !client->no_kverbs_req)
634 down_write(&device->client_data_rwsem);
636 * Another caller to add_client_context got here first and has already
637 * completely initialized context.
639 if (xa_get_mark(&device->client_data, client->client_id,
640 CLIENT_DATA_REGISTERED))
643 ret = xa_err(xa_store(&device->client_data, client->client_id, NULL,
647 downgrade_write(&device->client_data_rwsem);
651 /* Readers shall not see a client until add has been completed */
652 xa_set_mark(&device->client_data, client->client_id,
653 CLIENT_DATA_REGISTERED);
654 up_read(&device->client_data_rwsem);
658 up_write(&device->client_data_rwsem);
662 static void remove_client_context(struct ib_device *device,
663 unsigned int client_id)
665 struct ib_client *client;
668 down_write(&device->client_data_rwsem);
669 if (!xa_get_mark(&device->client_data, client_id,
670 CLIENT_DATA_REGISTERED)) {
671 up_write(&device->client_data_rwsem);
674 client_data = xa_load(&device->client_data, client_id);
675 xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED);
676 client = xa_load(&clients, client_id);
677 downgrade_write(&device->client_data_rwsem);
680 * Notice we cannot be holding any exclusive locks when calling the
681 * remove callback as the remove callback can recurse back into any
682 * public functions in this module and thus try for any locks those
685 * For this reason clients and drivers should not call the
686 * unregistration functions will holdling any locks.
688 * It tempting to drop the client_data_rwsem too, but this is required
689 * to ensure that unregister_client does not return until all clients
690 * are completely unregistered, which is required to avoid module
694 client->remove(device, client_data);
696 xa_erase(&device->client_data, client_id);
697 up_read(&device->client_data_rwsem);
700 static int alloc_port_data(struct ib_device *device)
702 struct ib_port_data_rcu *pdata_rcu;
705 if (device->port_data)
708 /* This can only be called once the physical port range is defined */
709 if (WARN_ON(!device->phys_port_cnt))
713 * device->port_data is indexed directly by the port number to make
714 * access to this data as efficient as possible.
716 * Therefore port_data is declared as a 1 based array with potential
717 * empty slots at the beginning.
719 pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata,
720 rdma_end_port(device) + 1),
725 * The rcu_head is put in front of the port data array and the stored
726 * pointer is adjusted since we never need to see that member until
729 device->port_data = pdata_rcu->pdata;
731 rdma_for_each_port (device, port) {
732 struct ib_port_data *pdata = &device->port_data[port];
734 pdata->ib_dev = device;
735 spin_lock_init(&pdata->pkey_list_lock);
736 INIT_LIST_HEAD(&pdata->pkey_list);
737 spin_lock_init(&pdata->netdev_lock);
738 INIT_HLIST_NODE(&pdata->ndev_hash_link);
743 static int verify_immutable(const struct ib_device *dev, u8 port)
745 return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
746 rdma_max_mad_size(dev, port) != 0);
749 static int setup_port_data(struct ib_device *device)
754 ret = alloc_port_data(device);
758 rdma_for_each_port (device, port) {
759 struct ib_port_data *pdata = &device->port_data[port];
761 ret = device->ops.get_port_immutable(device, port,
766 if (verify_immutable(device, port))
772 void ib_get_device_fw_str(struct ib_device *dev, char *str)
774 if (dev->ops.get_dev_fw_str)
775 dev->ops.get_dev_fw_str(dev, str);
779 EXPORT_SYMBOL(ib_get_device_fw_str);
781 static void ib_policy_change_task(struct work_struct *work)
783 struct ib_device *dev;
786 down_read(&devices_rwsem);
787 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
790 rdma_for_each_port (dev, i) {
792 int ret = ib_get_cached_subnet_prefix(dev,
797 "ib_get_cached_subnet_prefix err: %d, this should never happen here\n",
800 ib_security_cache_change(dev, i, sp);
803 up_read(&devices_rwsem);
806 static int ib_security_change(struct notifier_block *nb, unsigned long event,
809 if (event != LSM_POLICY_CHANGE)
812 schedule_work(&ib_policy_change_work);
813 ib_mad_agent_security_change();
818 static void compatdev_release(struct device *dev)
820 struct ib_core_device *cdev =
821 container_of(dev, struct ib_core_device, dev);
826 static int add_one_compat_dev(struct ib_device *device,
827 struct rdma_dev_net *rnet)
829 struct ib_core_device *cdev;
832 lockdep_assert_held(&rdma_nets_rwsem);
833 if (!ib_devices_shared_netns)
837 * Create and add compat device in all namespaces other than where it
838 * is currently bound to.
840 if (net_eq(read_pnet(&rnet->net),
841 read_pnet(&device->coredev.rdma_net)))
845 * The first of init_net() or ib_register_device() to take the
846 * compat_devs_mutex wins and gets to add the device. Others will wait
847 * for completion here.
849 mutex_lock(&device->compat_devs_mutex);
850 cdev = xa_load(&device->compat_devs, rnet->id);
855 ret = xa_reserve(&device->compat_devs, rnet->id, GFP_KERNEL);
859 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
865 cdev->dev.parent = device->dev.parent;
866 rdma_init_coredev(cdev, device, read_pnet(&rnet->net));
867 cdev->dev.release = compatdev_release;
868 dev_set_name(&cdev->dev, "%s", dev_name(&device->dev));
870 ret = device_add(&cdev->dev);
873 ret = ib_setup_port_attrs(cdev);
877 ret = xa_err(xa_store(&device->compat_devs, rnet->id,
882 mutex_unlock(&device->compat_devs_mutex);
886 ib_free_port_attrs(cdev);
888 device_del(&cdev->dev);
890 put_device(&cdev->dev);
892 xa_release(&device->compat_devs, rnet->id);
894 mutex_unlock(&device->compat_devs_mutex);
898 static void remove_one_compat_dev(struct ib_device *device, u32 id)
900 struct ib_core_device *cdev;
902 mutex_lock(&device->compat_devs_mutex);
903 cdev = xa_erase(&device->compat_devs, id);
904 mutex_unlock(&device->compat_devs_mutex);
906 ib_free_port_attrs(cdev);
907 device_del(&cdev->dev);
908 put_device(&cdev->dev);
912 static void remove_compat_devs(struct ib_device *device)
914 struct ib_core_device *cdev;
917 xa_for_each (&device->compat_devs, index, cdev)
918 remove_one_compat_dev(device, index);
921 static int add_compat_devs(struct ib_device *device)
923 struct rdma_dev_net *rnet;
927 lockdep_assert_held(&devices_rwsem);
929 down_read(&rdma_nets_rwsem);
930 xa_for_each (&rdma_nets, index, rnet) {
931 ret = add_one_compat_dev(device, rnet);
935 up_read(&rdma_nets_rwsem);
939 static void remove_all_compat_devs(void)
941 struct ib_compat_device *cdev;
942 struct ib_device *dev;
945 down_read(&devices_rwsem);
946 xa_for_each (&devices, index, dev) {
947 unsigned long c_index = 0;
949 /* Hold nets_rwsem so that any other thread modifying this
950 * system param can sync with this thread.
952 down_read(&rdma_nets_rwsem);
953 xa_for_each (&dev->compat_devs, c_index, cdev)
954 remove_one_compat_dev(dev, c_index);
955 up_read(&rdma_nets_rwsem);
957 up_read(&devices_rwsem);
960 static int add_all_compat_devs(void)
962 struct rdma_dev_net *rnet;
963 struct ib_device *dev;
967 down_read(&devices_rwsem);
968 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
969 unsigned long net_index = 0;
971 /* Hold nets_rwsem so that any other thread modifying this
972 * system param can sync with this thread.
974 down_read(&rdma_nets_rwsem);
975 xa_for_each (&rdma_nets, net_index, rnet) {
976 ret = add_one_compat_dev(dev, rnet);
980 up_read(&rdma_nets_rwsem);
982 up_read(&devices_rwsem);
984 remove_all_compat_devs();
988 int rdma_compatdev_set(u8 enable)
990 struct rdma_dev_net *rnet;
994 down_write(&rdma_nets_rwsem);
995 if (ib_devices_shared_netns == enable) {
996 up_write(&rdma_nets_rwsem);
1000 /* enable/disable of compat devices is not supported
1001 * when more than default init_net exists.
1003 xa_for_each (&rdma_nets, index, rnet) {
1008 ib_devices_shared_netns = enable;
1009 up_write(&rdma_nets_rwsem);
1014 ret = add_all_compat_devs();
1016 remove_all_compat_devs();
1020 static void rdma_dev_exit_net(struct net *net)
1022 struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id);
1023 struct ib_device *dev;
1024 unsigned long index;
1027 down_write(&rdma_nets_rwsem);
1029 * Prevent the ID from being re-used and hide the id from xa_for_each.
1031 ret = xa_err(xa_store(&rdma_nets, rnet->id, NULL, GFP_KERNEL));
1033 up_write(&rdma_nets_rwsem);
1035 down_read(&devices_rwsem);
1036 xa_for_each (&devices, index, dev) {
1037 get_device(&dev->dev);
1039 * Release the devices_rwsem so that pontentially blocking
1040 * device_del, doesn't hold the devices_rwsem for too long.
1042 up_read(&devices_rwsem);
1044 remove_one_compat_dev(dev, rnet->id);
1047 * If the real device is in the NS then move it back to init.
1049 rdma_dev_change_netns(dev, net, &init_net);
1051 put_device(&dev->dev);
1052 down_read(&devices_rwsem);
1054 up_read(&devices_rwsem);
1056 xa_erase(&rdma_nets, rnet->id);
1059 static __net_init int rdma_dev_init_net(struct net *net)
1061 struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id);
1062 unsigned long index;
1063 struct ib_device *dev;
1066 /* No need to create any compat devices in default init_net. */
1067 if (net_eq(net, &init_net))
1070 write_pnet(&rnet->net, net);
1072 ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL);
1076 down_read(&devices_rwsem);
1077 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
1078 /* Hold nets_rwsem so that netlink command cannot change
1079 * system configuration for device sharing mode.
1081 down_read(&rdma_nets_rwsem);
1082 ret = add_one_compat_dev(dev, rnet);
1083 up_read(&rdma_nets_rwsem);
1087 up_read(&devices_rwsem);
1090 rdma_dev_exit_net(net);
1096 * Assign the unique string device name and the unique device index. This is
1097 * undone by ib_dealloc_device.
1099 static int assign_name(struct ib_device *device, const char *name)
1104 down_write(&devices_rwsem);
1105 /* Assign a unique name to the device */
1106 if (strchr(name, '%'))
1107 ret = alloc_name(device, name);
1109 ret = dev_set_name(&device->dev, name);
1113 if (__ib_device_get_by_name(dev_name(&device->dev))) {
1117 strlcpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX);
1119 ret = xa_alloc_cyclic(&devices, &device->index, device, xa_limit_31b,
1120 &last_id, GFP_KERNEL);
1125 up_write(&devices_rwsem);
1129 static void setup_dma_device(struct ib_device *device)
1131 struct device *parent = device->dev.parent;
1133 WARN_ON_ONCE(device->dma_device);
1134 if (device->dev.dma_ops) {
1136 * The caller provided custom DMA operations. Copy the
1137 * DMA-related fields that are used by e.g. dma_alloc_coherent()
1140 device->dma_device = &device->dev;
1141 if (!device->dev.dma_mask) {
1143 device->dev.dma_mask = parent->dma_mask;
1147 if (!device->dev.coherent_dma_mask) {
1149 device->dev.coherent_dma_mask =
1150 parent->coherent_dma_mask;
1156 * The caller did not provide custom DMA operations. Use the
1157 * DMA mapping operations of the parent device.
1159 WARN_ON_ONCE(!parent);
1160 device->dma_device = parent;
1162 /* Setup default max segment size for all IB devices */
1163 dma_set_max_seg_size(device->dma_device, SZ_2G);
1168 * setup_device() allocates memory and sets up data that requires calling the
1169 * device ops, this is the only reason these actions are not done during
1170 * ib_alloc_device. It is undone by ib_dealloc_device().
1172 static int setup_device(struct ib_device *device)
1174 struct ib_udata uhw = {.outlen = 0, .inlen = 0};
1177 setup_dma_device(device);
1179 ret = ib_device_check_mandatory(device);
1183 ret = setup_port_data(device);
1185 dev_warn(&device->dev, "Couldn't create per-port data\n");
1189 memset(&device->attrs, 0, sizeof(device->attrs));
1190 ret = device->ops.query_device(device, &device->attrs, &uhw);
1192 dev_warn(&device->dev,
1193 "Couldn't query the device attributes\n");
1200 static void disable_device(struct ib_device *device)
1202 struct ib_client *client;
1204 WARN_ON(!refcount_read(&device->refcount));
1206 down_write(&devices_rwsem);
1207 xa_clear_mark(&devices, device->index, DEVICE_REGISTERED);
1208 up_write(&devices_rwsem);
1210 down_read(&clients_rwsem);
1211 list_for_each_entry_reverse(client, &client_list, list)
1212 remove_client_context(device, client->client_id);
1213 up_read(&clients_rwsem);
1215 /* Pairs with refcount_set in enable_device */
1216 ib_device_put(device);
1217 wait_for_completion(&device->unreg_completion);
1220 * compat devices must be removed after device refcount drops to zero.
1221 * Otherwise init_net() may add more compatdevs after removing compat
1222 * devices and before device is disabled.
1224 remove_compat_devs(device);
1228 * An enabled device is visible to all clients and to all the public facing
1229 * APIs that return a device pointer. This always returns with a new get, even
1232 static int enable_device_and_get(struct ib_device *device)
1234 struct ib_client *client;
1235 unsigned long index;
1239 * One ref belongs to the xa and the other belongs to this
1240 * thread. This is needed to guard against parallel unregistration.
1242 refcount_set(&device->refcount, 2);
1243 down_write(&devices_rwsem);
1244 xa_set_mark(&devices, device->index, DEVICE_REGISTERED);
1247 * By using downgrade_write() we ensure that no other thread can clear
1248 * DEVICE_REGISTERED while we are completing the client setup.
1250 downgrade_write(&devices_rwsem);
1252 if (device->ops.enable_driver) {
1253 ret = device->ops.enable_driver(device);
1258 down_read(&clients_rwsem);
1259 xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) {
1260 ret = add_client_context(device, client);
1264 up_read(&clients_rwsem);
1266 ret = add_compat_devs(device);
1268 up_read(&devices_rwsem);
1273 * ib_register_device - Register an IB device with IB core
1274 * @device:Device to register
1276 * Low-level drivers use ib_register_device() to register their
1277 * devices with the IB core. All registered clients will receive a
1278 * callback for each device that is added. @device must be allocated
1279 * with ib_alloc_device().
1281 * If the driver uses ops.dealloc_driver and calls any ib_unregister_device()
1282 * asynchronously then the device pointer may become freed as soon as this
1285 int ib_register_device(struct ib_device *device, const char *name)
1289 ret = assign_name(device, name);
1293 ret = setup_device(device);
1297 ret = ib_cache_setup_one(device);
1299 dev_warn(&device->dev,
1300 "Couldn't set up InfiniBand P_Key/GID cache\n");
1304 ib_device_register_rdmacg(device);
1306 ret = device_add(&device->dev);
1310 ret = ib_device_register_sysfs(device);
1312 dev_warn(&device->dev,
1313 "Couldn't register device with driver model\n");
1317 ret = enable_device_and_get(device);
1319 void (*dealloc_fn)(struct ib_device *);
1322 * If we hit this error flow then we don't want to
1323 * automatically dealloc the device since the caller is
1324 * expected to call ib_dealloc_device() after
1325 * ib_register_device() fails. This is tricky due to the
1326 * possibility for a parallel unregistration along with this
1327 * error flow. Since we have a refcount here we know any
1328 * parallel flow is stopped in disable_device and will see the
1329 * NULL pointers, causing the responsibility to
1330 * ib_dealloc_device() to revert back to this thread.
1332 dealloc_fn = device->ops.dealloc_driver;
1333 device->ops.dealloc_driver = NULL;
1334 ib_device_put(device);
1335 __ib_unregister_device(device);
1336 device->ops.dealloc_driver = dealloc_fn;
1339 ib_device_put(device);
1344 device_del(&device->dev);
1346 ib_device_unregister_rdmacg(device);
1347 ib_cache_cleanup_one(device);
1350 EXPORT_SYMBOL(ib_register_device);
1352 /* Callers must hold a get on the device. */
1353 static void __ib_unregister_device(struct ib_device *ib_dev)
1356 * We have a registration lock so that all the calls to unregister are
1357 * fully fenced, once any unregister returns the device is truely
1358 * unregistered even if multiple callers are unregistering it at the
1359 * same time. This also interacts with the registration flow and
1360 * provides sane semantics if register and unregister are racing.
1362 mutex_lock(&ib_dev->unregistration_lock);
1363 if (!refcount_read(&ib_dev->refcount))
1366 disable_device(ib_dev);
1368 /* Expedite removing unregistered pointers from the hash table */
1369 free_netdevs(ib_dev);
1371 ib_device_unregister_sysfs(ib_dev);
1372 device_del(&ib_dev->dev);
1373 ib_device_unregister_rdmacg(ib_dev);
1374 ib_cache_cleanup_one(ib_dev);
1377 * Drivers using the new flow may not call ib_dealloc_device except
1378 * in error unwind prior to registration success.
1380 if (ib_dev->ops.dealloc_driver) {
1381 WARN_ON(kref_read(&ib_dev->dev.kobj.kref) <= 1);
1382 ib_dealloc_device(ib_dev);
1385 mutex_unlock(&ib_dev->unregistration_lock);
1389 * ib_unregister_device - Unregister an IB device
1390 * @device: The device to unregister
1392 * Unregister an IB device. All clients will receive a remove callback.
1394 * Callers should call this routine only once, and protect against races with
1395 * registration. Typically it should only be called as part of a remove
1396 * callback in an implementation of driver core's struct device_driver and
1399 * If ops.dealloc_driver is used then ib_dev will be freed upon return from
1402 void ib_unregister_device(struct ib_device *ib_dev)
1404 get_device(&ib_dev->dev);
1405 __ib_unregister_device(ib_dev);
1406 put_device(&ib_dev->dev);
1408 EXPORT_SYMBOL(ib_unregister_device);
1411 * ib_unregister_device_and_put - Unregister a device while holding a 'get'
1412 * device: The device to unregister
1414 * This is the same as ib_unregister_device(), except it includes an internal
1415 * ib_device_put() that should match a 'get' obtained by the caller.
1417 * It is safe to call this routine concurrently from multiple threads while
1418 * holding the 'get'. When the function returns the device is fully
1421 * Drivers using this flow MUST use the driver_unregister callback to clean up
1422 * their resources associated with the device and dealloc it.
1424 void ib_unregister_device_and_put(struct ib_device *ib_dev)
1426 WARN_ON(!ib_dev->ops.dealloc_driver);
1427 get_device(&ib_dev->dev);
1428 ib_device_put(ib_dev);
1429 __ib_unregister_device(ib_dev);
1430 put_device(&ib_dev->dev);
1432 EXPORT_SYMBOL(ib_unregister_device_and_put);
1435 * ib_unregister_driver - Unregister all IB devices for a driver
1436 * @driver_id: The driver to unregister
1438 * This implements a fence for device unregistration. It only returns once all
1439 * devices associated with the driver_id have fully completed their
1440 * unregistration and returned from ib_unregister_device*().
1442 * If device's are not yet unregistered it goes ahead and starts unregistering
1445 * This does not block creation of new devices with the given driver_id, that
1446 * is the responsibility of the caller.
1448 void ib_unregister_driver(enum rdma_driver_id driver_id)
1450 struct ib_device *ib_dev;
1451 unsigned long index;
1453 down_read(&devices_rwsem);
1454 xa_for_each (&devices, index, ib_dev) {
1455 if (ib_dev->driver_id != driver_id)
1458 get_device(&ib_dev->dev);
1459 up_read(&devices_rwsem);
1461 WARN_ON(!ib_dev->ops.dealloc_driver);
1462 __ib_unregister_device(ib_dev);
1464 put_device(&ib_dev->dev);
1465 down_read(&devices_rwsem);
1467 up_read(&devices_rwsem);
1469 EXPORT_SYMBOL(ib_unregister_driver);
1471 static void ib_unregister_work(struct work_struct *work)
1473 struct ib_device *ib_dev =
1474 container_of(work, struct ib_device, unregistration_work);
1476 __ib_unregister_device(ib_dev);
1477 put_device(&ib_dev->dev);
1481 * ib_unregister_device_queued - Unregister a device using a work queue
1482 * device: The device to unregister
1484 * This schedules an asynchronous unregistration using a WQ for the device. A
1485 * driver should use this to avoid holding locks while doing unregistration,
1486 * such as holding the RTNL lock.
1488 * Drivers using this API must use ib_unregister_driver before module unload
1489 * to ensure that all scheduled unregistrations have completed.
1491 void ib_unregister_device_queued(struct ib_device *ib_dev)
1493 WARN_ON(!refcount_read(&ib_dev->refcount));
1494 WARN_ON(!ib_dev->ops.dealloc_driver);
1495 get_device(&ib_dev->dev);
1496 if (!queue_work(system_unbound_wq, &ib_dev->unregistration_work))
1497 put_device(&ib_dev->dev);
1499 EXPORT_SYMBOL(ib_unregister_device_queued);
1502 * The caller must pass in a device that has the kref held and the refcount
1503 * released. If the device is in cur_net and still registered then it is moved
1506 static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net,
1512 mutex_lock(&device->unregistration_lock);
1515 * If a device not under ib_device_get() or if the unregistration_lock
1516 * is not held, the namespace can be changed, or it can be unregistered.
1517 * Check again under the lock.
1519 if (refcount_read(&device->refcount) == 0 ||
1520 !net_eq(cur_net, read_pnet(&device->coredev.rdma_net))) {
1525 kobject_uevent(&device->dev.kobj, KOBJ_REMOVE);
1526 disable_device(device);
1529 * At this point no one can be using the device, so it is safe to
1530 * change the namespace.
1532 write_pnet(&device->coredev.rdma_net, net);
1534 down_read(&devices_rwsem);
1536 * Currently rdma devices are system wide unique. So the device name
1537 * is guaranteed free in the new namespace. Publish the new namespace
1538 * at the sysfs level.
1540 ret = device_rename(&device->dev, dev_name(&device->dev));
1541 up_read(&devices_rwsem);
1543 dev_warn(&device->dev,
1544 "%s: Couldn't rename device after namespace change\n",
1546 /* Try and put things back and re-enable the device */
1547 write_pnet(&device->coredev.rdma_net, cur_net);
1550 ret2 = enable_device_and_get(device);
1553 * This shouldn't really happen, but if it does, let the user
1554 * retry at later point. So don't disable the device.
1556 dev_warn(&device->dev,
1557 "%s: Couldn't re-enable device after namespace change\n",
1560 kobject_uevent(&device->dev.kobj, KOBJ_ADD);
1562 ib_device_put(device);
1564 mutex_unlock(&device->unregistration_lock);
1570 int ib_device_set_netns_put(struct sk_buff *skb,
1571 struct ib_device *dev, u32 ns_fd)
1576 net = get_net_ns_by_fd(ns_fd);
1582 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
1588 * Currently supported only for those providers which support
1589 * disassociation and don't do port specific sysfs init. Once a
1590 * port_cleanup infrastructure is implemented, this limitation will be
1593 if (!dev->ops.disassociate_ucontext || dev->ops.init_port ||
1594 ib_devices_shared_netns) {
1599 get_device(&dev->dev);
1601 ret = rdma_dev_change_netns(dev, current->nsproxy->net_ns, net);
1602 put_device(&dev->dev);
1614 static struct pernet_operations rdma_dev_net_ops = {
1615 .init = rdma_dev_init_net,
1616 .exit = rdma_dev_exit_net,
1617 .id = &rdma_dev_net_id,
1618 .size = sizeof(struct rdma_dev_net),
1621 static int assign_client_id(struct ib_client *client)
1625 down_write(&clients_rwsem);
1627 * The add/remove callbacks must be called in FIFO/LIFO order. To
1628 * achieve this we assign client_ids so they are sorted in
1629 * registration order, and retain a linked list we can reverse iterate
1630 * to get the LIFO order. The extra linked list can go away if xarray
1631 * learns to reverse iterate.
1633 if (list_empty(&client_list)) {
1634 client->client_id = 0;
1636 struct ib_client *last;
1638 last = list_last_entry(&client_list, struct ib_client, list);
1639 client->client_id = last->client_id + 1;
1641 ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
1645 xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
1646 list_add_tail(&client->list, &client_list);
1649 up_write(&clients_rwsem);
1654 * ib_register_client - Register an IB client
1655 * @client:Client to register
1657 * Upper level users of the IB drivers can use ib_register_client() to
1658 * register callbacks for IB device addition and removal. When an IB
1659 * device is added, each registered client's add method will be called
1660 * (in the order the clients were registered), and when a device is
1661 * removed, each client's remove method will be called (in the reverse
1662 * order that clients were registered). In addition, when
1663 * ib_register_client() is called, the client will receive an add
1664 * callback for all devices already registered.
1666 int ib_register_client(struct ib_client *client)
1668 struct ib_device *device;
1669 unsigned long index;
1672 ret = assign_client_id(client);
1676 down_read(&devices_rwsem);
1677 xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) {
1678 ret = add_client_context(device, client);
1680 up_read(&devices_rwsem);
1681 ib_unregister_client(client);
1685 up_read(&devices_rwsem);
1688 EXPORT_SYMBOL(ib_register_client);
1691 * ib_unregister_client - Unregister an IB client
1692 * @client:Client to unregister
1694 * Upper level users use ib_unregister_client() to remove their client
1695 * registration. When ib_unregister_client() is called, the client
1696 * will receive a remove callback for each IB device still registered.
1698 * This is a full fence, once it returns no client callbacks will be called,
1699 * or are running in another thread.
1701 void ib_unregister_client(struct ib_client *client)
1703 struct ib_device *device;
1704 unsigned long index;
1706 down_write(&clients_rwsem);
1707 xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED);
1708 up_write(&clients_rwsem);
1710 * Every device still known must be serialized to make sure we are
1711 * done with the client callbacks before we return.
1713 down_read(&devices_rwsem);
1714 xa_for_each (&devices, index, device)
1715 remove_client_context(device, client->client_id);
1716 up_read(&devices_rwsem);
1718 down_write(&clients_rwsem);
1719 list_del(&client->list);
1720 xa_erase(&clients, client->client_id);
1721 up_write(&clients_rwsem);
1723 EXPORT_SYMBOL(ib_unregister_client);
1726 * ib_set_client_data - Set IB client context
1727 * @device:Device to set context for
1728 * @client:Client to set context for
1729 * @data:Context to set
1731 * ib_set_client_data() sets client context data that can be retrieved with
1732 * ib_get_client_data(). This can only be called while the client is
1733 * registered to the device, once the ib_client remove() callback returns this
1736 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1741 if (WARN_ON(IS_ERR(data)))
1744 rc = xa_store(&device->client_data, client->client_id, data,
1746 WARN_ON(xa_is_err(rc));
1748 EXPORT_SYMBOL(ib_set_client_data);
1751 * ib_register_event_handler - Register an IB event handler
1752 * @event_handler:Handler to register
1754 * ib_register_event_handler() registers an event handler that will be
1755 * called back when asynchronous IB events occur (as defined in
1756 * chapter 11 of the InfiniBand Architecture Specification). This
1757 * callback may occur in interrupt context.
1759 void ib_register_event_handler(struct ib_event_handler *event_handler)
1761 unsigned long flags;
1763 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
1764 list_add_tail(&event_handler->list,
1765 &event_handler->device->event_handler_list);
1766 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
1768 EXPORT_SYMBOL(ib_register_event_handler);
1771 * ib_unregister_event_handler - Unregister an event handler
1772 * @event_handler:Handler to unregister
1774 * Unregister an event handler registered with
1775 * ib_register_event_handler().
1777 void ib_unregister_event_handler(struct ib_event_handler *event_handler)
1779 unsigned long flags;
1781 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
1782 list_del(&event_handler->list);
1783 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
1785 EXPORT_SYMBOL(ib_unregister_event_handler);
1788 * ib_dispatch_event - Dispatch an asynchronous event
1789 * @event:Event to dispatch
1791 * Low-level drivers must call ib_dispatch_event() to dispatch the
1792 * event to all registered event handlers when an asynchronous event
1795 void ib_dispatch_event(struct ib_event *event)
1797 unsigned long flags;
1798 struct ib_event_handler *handler;
1800 spin_lock_irqsave(&event->device->event_handler_lock, flags);
1802 list_for_each_entry(handler, &event->device->event_handler_list, list)
1803 handler->handler(handler, event);
1805 spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
1807 EXPORT_SYMBOL(ib_dispatch_event);
1810 * ib_query_port - Query IB port attributes
1811 * @device:Device to query
1812 * @port_num:Port number to query
1813 * @port_attr:Port attributes
1815 * ib_query_port() returns the attributes of a port through the
1816 * @port_attr pointer.
1818 int ib_query_port(struct ib_device *device,
1820 struct ib_port_attr *port_attr)
1825 if (!rdma_is_port_valid(device, port_num))
1828 memset(port_attr, 0, sizeof(*port_attr));
1829 err = device->ops.query_port(device, port_num, port_attr);
1830 if (err || port_attr->subnet_prefix)
1833 if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
1836 err = device->ops.query_gid(device, port_num, 0, &gid);
1840 port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
1843 EXPORT_SYMBOL(ib_query_port);
1845 static void add_ndev_hash(struct ib_port_data *pdata)
1847 unsigned long flags;
1851 spin_lock_irqsave(&ndev_hash_lock, flags);
1852 if (hash_hashed(&pdata->ndev_hash_link)) {
1853 hash_del_rcu(&pdata->ndev_hash_link);
1854 spin_unlock_irqrestore(&ndev_hash_lock, flags);
1856 * We cannot do hash_add_rcu after a hash_del_rcu until the
1860 spin_lock_irqsave(&ndev_hash_lock, flags);
1863 hash_add_rcu(ndev_hash, &pdata->ndev_hash_link,
1864 (uintptr_t)pdata->netdev);
1865 spin_unlock_irqrestore(&ndev_hash_lock, flags);
1869 * ib_device_set_netdev - Associate the ib_dev with an underlying net_device
1870 * @ib_dev: Device to modify
1871 * @ndev: net_device to affiliate, may be NULL
1872 * @port: IB port the net_device is connected to
1874 * Drivers should use this to link the ib_device to a netdev so the netdev
1875 * shows up in interfaces like ib_enum_roce_netdev. Only one netdev may be
1876 * affiliated with any port.
1878 * The caller must ensure that the given ndev is not unregistered or
1879 * unregistering, and that either the ib_device is unregistered or
1880 * ib_device_set_netdev() is called with NULL when the ndev sends a
1881 * NETDEV_UNREGISTER event.
1883 int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
1886 struct net_device *old_ndev;
1887 struct ib_port_data *pdata;
1888 unsigned long flags;
1892 * Drivers wish to call this before ib_register_driver, so we have to
1893 * setup the port data early.
1895 ret = alloc_port_data(ib_dev);
1899 if (!rdma_is_port_valid(ib_dev, port))
1902 pdata = &ib_dev->port_data[port];
1903 spin_lock_irqsave(&pdata->netdev_lock, flags);
1904 old_ndev = rcu_dereference_protected(
1905 pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
1906 if (old_ndev == ndev) {
1907 spin_unlock_irqrestore(&pdata->netdev_lock, flags);
1913 rcu_assign_pointer(pdata->netdev, ndev);
1914 spin_unlock_irqrestore(&pdata->netdev_lock, flags);
1916 add_ndev_hash(pdata);
1922 EXPORT_SYMBOL(ib_device_set_netdev);
1924 static void free_netdevs(struct ib_device *ib_dev)
1926 unsigned long flags;
1929 rdma_for_each_port (ib_dev, port) {
1930 struct ib_port_data *pdata = &ib_dev->port_data[port];
1931 struct net_device *ndev;
1933 spin_lock_irqsave(&pdata->netdev_lock, flags);
1934 ndev = rcu_dereference_protected(
1935 pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
1937 spin_lock(&ndev_hash_lock);
1938 hash_del_rcu(&pdata->ndev_hash_link);
1939 spin_unlock(&ndev_hash_lock);
1942 * If this is the last dev_put there is still a
1943 * synchronize_rcu before the netdev is kfreed, so we
1944 * can continue to rely on unlocked pointer
1945 * comparisons after the put
1947 rcu_assign_pointer(pdata->netdev, NULL);
1950 spin_unlock_irqrestore(&pdata->netdev_lock, flags);
1954 struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
1957 struct ib_port_data *pdata;
1958 struct net_device *res;
1960 if (!rdma_is_port_valid(ib_dev, port))
1963 pdata = &ib_dev->port_data[port];
1966 * New drivers should use ib_device_set_netdev() not the legacy
1969 if (ib_dev->ops.get_netdev)
1970 res = ib_dev->ops.get_netdev(ib_dev, port);
1972 spin_lock(&pdata->netdev_lock);
1973 res = rcu_dereference_protected(
1974 pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
1977 spin_unlock(&pdata->netdev_lock);
1981 * If we are starting to unregister expedite things by preventing
1982 * propagation of an unregistering netdev.
1984 if (res && res->reg_state != NETREG_REGISTERED) {
1993 * ib_device_get_by_netdev - Find an IB device associated with a netdev
1994 * @ndev: netdev to locate
1995 * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
1997 * Find and hold an ib_device that is associated with a netdev via
1998 * ib_device_set_netdev(). The caller must call ib_device_put() on the
2001 struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
2002 enum rdma_driver_id driver_id)
2004 struct ib_device *res = NULL;
2005 struct ib_port_data *cur;
2008 hash_for_each_possible_rcu (ndev_hash, cur, ndev_hash_link,
2010 if (rcu_access_pointer(cur->netdev) == ndev &&
2011 (driver_id == RDMA_DRIVER_UNKNOWN ||
2012 cur->ib_dev->driver_id == driver_id) &&
2013 ib_device_try_get(cur->ib_dev)) {
2022 EXPORT_SYMBOL(ib_device_get_by_netdev);
2025 * ib_enum_roce_netdev - enumerate all RoCE ports
2026 * @ib_dev : IB device we want to query
2027 * @filter: Should we call the callback?
2028 * @filter_cookie: Cookie passed to filter
2029 * @cb: Callback to call for each found RoCE ports
2030 * @cookie: Cookie passed back to the callback
2032 * Enumerates all of the physical RoCE ports of ib_dev
2033 * which are related to netdevice and calls callback() on each
2034 * device for which filter() function returns non zero.
2036 void ib_enum_roce_netdev(struct ib_device *ib_dev,
2037 roce_netdev_filter filter,
2038 void *filter_cookie,
2039 roce_netdev_callback cb,
2044 rdma_for_each_port (ib_dev, port)
2045 if (rdma_protocol_roce(ib_dev, port)) {
2046 struct net_device *idev =
2047 ib_device_get_netdev(ib_dev, port);
2049 if (filter(ib_dev, port, idev, filter_cookie))
2050 cb(ib_dev, port, idev, cookie);
2058 * ib_enum_all_roce_netdevs - enumerate all RoCE devices
2059 * @filter: Should we call the callback?
2060 * @filter_cookie: Cookie passed to filter
2061 * @cb: Callback to call for each found RoCE ports
2062 * @cookie: Cookie passed back to the callback
2064 * Enumerates all RoCE devices' physical ports which are related
2065 * to netdevices and calls callback() on each device for which
2066 * filter() function returns non zero.
2068 void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
2069 void *filter_cookie,
2070 roce_netdev_callback cb,
2073 struct ib_device *dev;
2074 unsigned long index;
2076 down_read(&devices_rwsem);
2077 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED)
2078 ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
2079 up_read(&devices_rwsem);
2083 * ib_enum_all_devs - enumerate all ib_devices
2084 * @cb: Callback to call for each found ib_device
2086 * Enumerates all ib_devices and calls callback() on each device.
2088 int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
2089 struct netlink_callback *cb)
2091 unsigned long index;
2092 struct ib_device *dev;
2093 unsigned int idx = 0;
2096 down_read(&devices_rwsem);
2097 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
2098 if (!rdma_dev_access_netns(dev, sock_net(skb->sk)))
2101 ret = nldev_cb(dev, skb, cb, idx);
2106 up_read(&devices_rwsem);
2111 * ib_query_pkey - Get P_Key table entry
2112 * @device:Device to query
2113 * @port_num:Port number to query
2114 * @index:P_Key table index to query
2115 * @pkey:Returned P_Key
2117 * ib_query_pkey() fetches the specified P_Key table entry.
2119 int ib_query_pkey(struct ib_device *device,
2120 u8 port_num, u16 index, u16 *pkey)
2122 if (!rdma_is_port_valid(device, port_num))
2125 return device->ops.query_pkey(device, port_num, index, pkey);
2127 EXPORT_SYMBOL(ib_query_pkey);
2130 * ib_modify_device - Change IB device attributes
2131 * @device:Device to modify
2132 * @device_modify_mask:Mask of attributes to change
2133 * @device_modify:New attribute values
2135 * ib_modify_device() changes a device's attributes as specified by
2136 * the @device_modify_mask and @device_modify structure.
2138 int ib_modify_device(struct ib_device *device,
2139 int device_modify_mask,
2140 struct ib_device_modify *device_modify)
2142 if (!device->ops.modify_device)
2145 return device->ops.modify_device(device, device_modify_mask,
2148 EXPORT_SYMBOL(ib_modify_device);
2151 * ib_modify_port - Modifies the attributes for the specified port.
2152 * @device: The device to modify.
2153 * @port_num: The number of the port to modify.
2154 * @port_modify_mask: Mask used to specify which attributes of the port
2156 * @port_modify: New attribute values for the port.
2158 * ib_modify_port() changes a port's attributes as specified by the
2159 * @port_modify_mask and @port_modify structure.
2161 int ib_modify_port(struct ib_device *device,
2162 u8 port_num, int port_modify_mask,
2163 struct ib_port_modify *port_modify)
2167 if (!rdma_is_port_valid(device, port_num))
2170 if (device->ops.modify_port)
2171 rc = device->ops.modify_port(device, port_num,
2175 rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS;
2178 EXPORT_SYMBOL(ib_modify_port);
2181 * ib_find_gid - Returns the port number and GID table index where
2182 * a specified GID value occurs. Its searches only for IB link layer.
2183 * @device: The device to query.
2184 * @gid: The GID value to search for.
2185 * @port_num: The port number of the device where the GID value was found.
2186 * @index: The index into the GID table where the GID was found. This
2187 * parameter may be NULL.
2189 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2190 u8 *port_num, u16 *index)
2192 union ib_gid tmp_gid;
2196 rdma_for_each_port (device, port) {
2197 if (!rdma_protocol_ib(device, port))
2200 for (i = 0; i < device->port_data[port].immutable.gid_tbl_len;
2202 ret = rdma_query_gid(device, port, i, &tmp_gid);
2205 if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
2216 EXPORT_SYMBOL(ib_find_gid);
2219 * ib_find_pkey - Returns the PKey table index where a specified
2220 * PKey value occurs.
2221 * @device: The device to query.
2222 * @port_num: The port number of the device to search for the PKey.
2223 * @pkey: The PKey value to search for.
2224 * @index: The index into the PKey table where the PKey was found.
2226 int ib_find_pkey(struct ib_device *device,
2227 u8 port_num, u16 pkey, u16 *index)
2231 int partial_ix = -1;
2233 for (i = 0; i < device->port_data[port_num].immutable.pkey_tbl_len;
2235 ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
2238 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
2239 /* if there is full-member pkey take it.*/
2240 if (tmp_pkey & 0x8000) {
2249 /*no full-member, if exists take the limited*/
2250 if (partial_ix >= 0) {
2251 *index = partial_ix;
2256 EXPORT_SYMBOL(ib_find_pkey);
2259 * ib_get_net_dev_by_params() - Return the appropriate net_dev
2260 * for a received CM request
2261 * @dev: An RDMA device on which the request has been received.
2262 * @port: Port number on the RDMA device.
2263 * @pkey: The Pkey the request came on.
2264 * @gid: A GID that the net_dev uses to communicate.
2265 * @addr: Contains the IP address that the request specified as its
2269 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
2272 const union ib_gid *gid,
2273 const struct sockaddr *addr)
2275 struct net_device *net_dev = NULL;
2276 unsigned long index;
2279 if (!rdma_protocol_ib(dev, port))
2283 * Holding the read side guarantees that the client will not become
2284 * unregistered while we are calling get_net_dev_by_params()
2286 down_read(&dev->client_data_rwsem);
2287 xan_for_each_marked (&dev->client_data, index, client_data,
2288 CLIENT_DATA_REGISTERED) {
2289 struct ib_client *client = xa_load(&clients, index);
2291 if (!client || !client->get_net_dev_by_params)
2294 net_dev = client->get_net_dev_by_params(dev, port, pkey, gid,
2299 up_read(&dev->client_data_rwsem);
2303 EXPORT_SYMBOL(ib_get_net_dev_by_params);
2305 void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
2307 struct ib_device_ops *dev_ops = &dev->ops;
2308 #define SET_DEVICE_OP(ptr, name) \
2311 if (!((ptr)->name)) \
2312 (ptr)->name = ops->name; \
2315 #define SET_OBJ_SIZE(ptr, name) SET_DEVICE_OP(ptr, size_##name)
2317 SET_DEVICE_OP(dev_ops, add_gid);
2318 SET_DEVICE_OP(dev_ops, advise_mr);
2319 SET_DEVICE_OP(dev_ops, alloc_dm);
2320 SET_DEVICE_OP(dev_ops, alloc_fmr);
2321 SET_DEVICE_OP(dev_ops, alloc_hw_stats);
2322 SET_DEVICE_OP(dev_ops, alloc_mr);
2323 SET_DEVICE_OP(dev_ops, alloc_mw);
2324 SET_DEVICE_OP(dev_ops, alloc_pd);
2325 SET_DEVICE_OP(dev_ops, alloc_rdma_netdev);
2326 SET_DEVICE_OP(dev_ops, alloc_ucontext);
2327 SET_DEVICE_OP(dev_ops, alloc_xrcd);
2328 SET_DEVICE_OP(dev_ops, attach_mcast);
2329 SET_DEVICE_OP(dev_ops, check_mr_status);
2330 SET_DEVICE_OP(dev_ops, create_ah);
2331 SET_DEVICE_OP(dev_ops, create_counters);
2332 SET_DEVICE_OP(dev_ops, create_cq);
2333 SET_DEVICE_OP(dev_ops, create_flow);
2334 SET_DEVICE_OP(dev_ops, create_flow_action_esp);
2335 SET_DEVICE_OP(dev_ops, create_qp);
2336 SET_DEVICE_OP(dev_ops, create_rwq_ind_table);
2337 SET_DEVICE_OP(dev_ops, create_srq);
2338 SET_DEVICE_OP(dev_ops, create_wq);
2339 SET_DEVICE_OP(dev_ops, dealloc_dm);
2340 SET_DEVICE_OP(dev_ops, dealloc_driver);
2341 SET_DEVICE_OP(dev_ops, dealloc_fmr);
2342 SET_DEVICE_OP(dev_ops, dealloc_mw);
2343 SET_DEVICE_OP(dev_ops, dealloc_pd);
2344 SET_DEVICE_OP(dev_ops, dealloc_ucontext);
2345 SET_DEVICE_OP(dev_ops, dealloc_xrcd);
2346 SET_DEVICE_OP(dev_ops, del_gid);
2347 SET_DEVICE_OP(dev_ops, dereg_mr);
2348 SET_DEVICE_OP(dev_ops, destroy_ah);
2349 SET_DEVICE_OP(dev_ops, destroy_counters);
2350 SET_DEVICE_OP(dev_ops, destroy_cq);
2351 SET_DEVICE_OP(dev_ops, destroy_flow);
2352 SET_DEVICE_OP(dev_ops, destroy_flow_action);
2353 SET_DEVICE_OP(dev_ops, destroy_qp);
2354 SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table);
2355 SET_DEVICE_OP(dev_ops, destroy_srq);
2356 SET_DEVICE_OP(dev_ops, destroy_wq);
2357 SET_DEVICE_OP(dev_ops, detach_mcast);
2358 SET_DEVICE_OP(dev_ops, disassociate_ucontext);
2359 SET_DEVICE_OP(dev_ops, drain_rq);
2360 SET_DEVICE_OP(dev_ops, drain_sq);
2361 SET_DEVICE_OP(dev_ops, enable_driver);
2362 SET_DEVICE_OP(dev_ops, fill_res_entry);
2363 SET_DEVICE_OP(dev_ops, get_dev_fw_str);
2364 SET_DEVICE_OP(dev_ops, get_dma_mr);
2365 SET_DEVICE_OP(dev_ops, get_hw_stats);
2366 SET_DEVICE_OP(dev_ops, get_link_layer);
2367 SET_DEVICE_OP(dev_ops, get_netdev);
2368 SET_DEVICE_OP(dev_ops, get_port_immutable);
2369 SET_DEVICE_OP(dev_ops, get_vector_affinity);
2370 SET_DEVICE_OP(dev_ops, get_vf_config);
2371 SET_DEVICE_OP(dev_ops, get_vf_stats);
2372 SET_DEVICE_OP(dev_ops, init_port);
2373 SET_DEVICE_OP(dev_ops, iw_accept);
2374 SET_DEVICE_OP(dev_ops, iw_add_ref);
2375 SET_DEVICE_OP(dev_ops, iw_connect);
2376 SET_DEVICE_OP(dev_ops, iw_create_listen);
2377 SET_DEVICE_OP(dev_ops, iw_destroy_listen);
2378 SET_DEVICE_OP(dev_ops, iw_get_qp);
2379 SET_DEVICE_OP(dev_ops, iw_reject);
2380 SET_DEVICE_OP(dev_ops, iw_rem_ref);
2381 SET_DEVICE_OP(dev_ops, map_mr_sg);
2382 SET_DEVICE_OP(dev_ops, map_phys_fmr);
2383 SET_DEVICE_OP(dev_ops, mmap);
2384 SET_DEVICE_OP(dev_ops, modify_ah);
2385 SET_DEVICE_OP(dev_ops, modify_cq);
2386 SET_DEVICE_OP(dev_ops, modify_device);
2387 SET_DEVICE_OP(dev_ops, modify_flow_action_esp);
2388 SET_DEVICE_OP(dev_ops, modify_port);
2389 SET_DEVICE_OP(dev_ops, modify_qp);
2390 SET_DEVICE_OP(dev_ops, modify_srq);
2391 SET_DEVICE_OP(dev_ops, modify_wq);
2392 SET_DEVICE_OP(dev_ops, peek_cq);
2393 SET_DEVICE_OP(dev_ops, poll_cq);
2394 SET_DEVICE_OP(dev_ops, post_recv);
2395 SET_DEVICE_OP(dev_ops, post_send);
2396 SET_DEVICE_OP(dev_ops, post_srq_recv);
2397 SET_DEVICE_OP(dev_ops, process_mad);
2398 SET_DEVICE_OP(dev_ops, query_ah);
2399 SET_DEVICE_OP(dev_ops, query_device);
2400 SET_DEVICE_OP(dev_ops, query_gid);
2401 SET_DEVICE_OP(dev_ops, query_pkey);
2402 SET_DEVICE_OP(dev_ops, query_port);
2403 SET_DEVICE_OP(dev_ops, query_qp);
2404 SET_DEVICE_OP(dev_ops, query_srq);
2405 SET_DEVICE_OP(dev_ops, rdma_netdev_get_params);
2406 SET_DEVICE_OP(dev_ops, read_counters);
2407 SET_DEVICE_OP(dev_ops, reg_dm_mr);
2408 SET_DEVICE_OP(dev_ops, reg_user_mr);
2409 SET_DEVICE_OP(dev_ops, req_ncomp_notif);
2410 SET_DEVICE_OP(dev_ops, req_notify_cq);
2411 SET_DEVICE_OP(dev_ops, rereg_user_mr);
2412 SET_DEVICE_OP(dev_ops, resize_cq);
2413 SET_DEVICE_OP(dev_ops, set_vf_guid);
2414 SET_DEVICE_OP(dev_ops, set_vf_link_state);
2415 SET_DEVICE_OP(dev_ops, unmap_fmr);
2417 SET_OBJ_SIZE(dev_ops, ib_ah);
2418 SET_OBJ_SIZE(dev_ops, ib_pd);
2419 SET_OBJ_SIZE(dev_ops, ib_srq);
2420 SET_OBJ_SIZE(dev_ops, ib_ucontext);
2422 EXPORT_SYMBOL(ib_set_device_ops);
2424 static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
2425 [RDMA_NL_LS_OP_RESOLVE] = {
2426 .doit = ib_nl_handle_resolve_resp,
2427 .flags = RDMA_NL_ADMIN_PERM,
2429 [RDMA_NL_LS_OP_SET_TIMEOUT] = {
2430 .doit = ib_nl_handle_set_timeout,
2431 .flags = RDMA_NL_ADMIN_PERM,
2433 [RDMA_NL_LS_OP_IP_RESOLVE] = {
2434 .doit = ib_nl_handle_ip_res_resp,
2435 .flags = RDMA_NL_ADMIN_PERM,
2439 static int __init ib_core_init(void)
2443 ib_wq = alloc_workqueue("infiniband", 0, 0);
2447 ib_comp_wq = alloc_workqueue("ib-comp-wq",
2448 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
2454 ib_comp_unbound_wq =
2455 alloc_workqueue("ib-comp-unb-wq",
2456 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM |
2457 WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE);
2458 if (!ib_comp_unbound_wq) {
2463 ret = class_register(&ib_class);
2465 pr_warn("Couldn't create InfiniBand device class\n");
2466 goto err_comp_unbound;
2469 ret = rdma_nl_init();
2471 pr_warn("Couldn't init IB netlink interface: err %d\n", ret);
2477 pr_warn("Could't init IB address resolution\n");
2481 ret = ib_mad_init();
2483 pr_warn("Couldn't init IB MAD\n");
2489 pr_warn("Couldn't init SA\n");
2493 ret = register_lsm_notifier(&ibdev_lsm_nb);
2495 pr_warn("Couldn't register LSM notifier. ret %d\n", ret);
2499 ret = register_pernet_device(&rdma_dev_net_ops);
2501 pr_warn("Couldn't init compat dev. ret %d\n", ret);
2506 rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
2507 roce_gid_mgmt_init();
2512 unregister_lsm_notifier(&ibdev_lsm_nb);
2522 class_unregister(&ib_class);
2524 destroy_workqueue(ib_comp_unbound_wq);
2526 destroy_workqueue(ib_comp_wq);
2528 destroy_workqueue(ib_wq);
2532 static void __exit ib_core_cleanup(void)
2534 roce_gid_mgmt_cleanup();
2536 rdma_nl_unregister(RDMA_NL_LS);
2537 unregister_pernet_device(&rdma_dev_net_ops);
2538 unregister_lsm_notifier(&ibdev_lsm_nb);
2543 class_unregister(&ib_class);
2544 destroy_workqueue(ib_comp_unbound_wq);
2545 destroy_workqueue(ib_comp_wq);
2546 /* Make sure that any pending umem accounting work is done. */
2547 destroy_workqueue(ib_wq);
2548 flush_workqueue(system_unbound_wq);
2549 WARN_ON(!xa_empty(&clients));
2550 WARN_ON(!xa_empty(&devices));
2553 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
2555 /* ib core relies on netdev stack to first register net_ns_type_operations
2556 * ns kobject type before ib_core initialization.
2558 fs_initcall(ib_core_init);
2559 module_exit(ib_core_cleanup);