]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/infiniband/core/uverbs_main.c
6a1284c8a07d900eb7e9ddb09b51438d7a6b6839
[linux.git] / drivers / infiniband / core / uverbs_main.c
1 /*
2  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5  * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6  * Copyright (c) 2005 PathScale, Inc. All rights reserved.
7  *
8  * This software is available to you under a choice of one of two
9  * licenses.  You may choose to be licensed under the terms of the GNU
10  * General Public License (GPL) Version 2, available from the file
11  * COPYING in the main directory of this source tree, or the
12  * OpenIB.org BSD license below:
13  *
14  *     Redistribution and use in source and binary forms, with or
15  *     without modification, are permitted provided that the following
16  *     conditions are met:
17  *
18  *      - Redistributions of source code must retain the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer.
21  *
22  *      - Redistributions in binary form must reproduce the above
23  *        copyright notice, this list of conditions and the following
24  *        disclaimer in the documentation and/or other materials
25  *        provided with the distribution.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34  * SOFTWARE.
35  */
36
37 #include <linux/module.h>
38 #include <linux/init.h>
39 #include <linux/device.h>
40 #include <linux/err.h>
41 #include <linux/fs.h>
42 #include <linux/poll.h>
43 #include <linux/sched.h>
44 #include <linux/file.h>
45 #include <linux/cdev.h>
46 #include <linux/anon_inodes.h>
47 #include <linux/slab.h>
48 #include <linux/sched/mm.h>
49
50 #include <linux/uaccess.h>
51
52 #include <rdma/ib.h>
53 #include <rdma/uverbs_std_types.h>
54
55 #include "uverbs.h"
56 #include "core_priv.h"
57 #include "rdma_core.h"
58
59 MODULE_AUTHOR("Roland Dreier");
60 MODULE_DESCRIPTION("InfiniBand userspace verbs access");
61 MODULE_LICENSE("Dual BSD/GPL");
62
63 enum {
64         IB_UVERBS_MAJOR       = 231,
65         IB_UVERBS_BASE_MINOR  = 192,
66         IB_UVERBS_MAX_DEVICES = RDMA_MAX_PORTS,
67         IB_UVERBS_NUM_FIXED_MINOR = 32,
68         IB_UVERBS_NUM_DYNAMIC_MINOR = IB_UVERBS_MAX_DEVICES - IB_UVERBS_NUM_FIXED_MINOR,
69 };
70
71 #define IB_UVERBS_BASE_DEV      MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
72
73 static dev_t dynamic_uverbs_dev;
74 static struct class *uverbs_class;
75
76 static DEFINE_IDA(uverbs_ida);
77 static void ib_uverbs_add_one(struct ib_device *device);
78 static void ib_uverbs_remove_one(struct ib_device *device, void *client_data);
79
80 /*
81  * Must be called with the ufile->device->disassociate_srcu held, and the lock
82  * must be held until use of the ucontext is finished.
83  */
84 struct ib_ucontext *ib_uverbs_get_ucontext(struct ib_uverbs_file *ufile)
85 {
86         /*
87          * We do not hold the hw_destroy_rwsem lock for this flow, instead
88          * srcu is used. It does not matter if someone races this with
89          * get_context, we get NULL or valid ucontext.
90          */
91         struct ib_ucontext *ucontext = smp_load_acquire(&ufile->ucontext);
92
93         if (!srcu_dereference(ufile->device->ib_dev,
94                               &ufile->device->disassociate_srcu))
95                 return ERR_PTR(-EIO);
96
97         if (!ucontext)
98                 return ERR_PTR(-EINVAL);
99
100         return ucontext;
101 }
102 EXPORT_SYMBOL(ib_uverbs_get_ucontext);
103
104 int uverbs_dealloc_mw(struct ib_mw *mw)
105 {
106         struct ib_pd *pd = mw->pd;
107         int ret;
108
109         ret = mw->device->dealloc_mw(mw);
110         if (!ret)
111                 atomic_dec(&pd->usecnt);
112         return ret;
113 }
114
115 static void ib_uverbs_release_dev(struct device *device)
116 {
117         struct ib_uverbs_device *dev =
118                         container_of(device, struct ib_uverbs_device, dev);
119
120         uverbs_destroy_api(dev->uapi);
121         cleanup_srcu_struct(&dev->disassociate_srcu);
122         kfree(dev);
123 }
124
125 static void ib_uverbs_release_async_event_file(struct kref *ref)
126 {
127         struct ib_uverbs_async_event_file *file =
128                 container_of(ref, struct ib_uverbs_async_event_file, ref);
129
130         kfree(file);
131 }
132
133 void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
134                           struct ib_uverbs_completion_event_file *ev_file,
135                           struct ib_ucq_object *uobj)
136 {
137         struct ib_uverbs_event *evt, *tmp;
138
139         if (ev_file) {
140                 spin_lock_irq(&ev_file->ev_queue.lock);
141                 list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
142                         list_del(&evt->list);
143                         kfree(evt);
144                 }
145                 spin_unlock_irq(&ev_file->ev_queue.lock);
146
147                 uverbs_uobject_put(&ev_file->uobj);
148         }
149
150         spin_lock_irq(&file->async_file->ev_queue.lock);
151         list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
152                 list_del(&evt->list);
153                 kfree(evt);
154         }
155         spin_unlock_irq(&file->async_file->ev_queue.lock);
156 }
157
158 void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
159                               struct ib_uevent_object *uobj)
160 {
161         struct ib_uverbs_event *evt, *tmp;
162
163         spin_lock_irq(&file->async_file->ev_queue.lock);
164         list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
165                 list_del(&evt->list);
166                 kfree(evt);
167         }
168         spin_unlock_irq(&file->async_file->ev_queue.lock);
169 }
170
171 void ib_uverbs_detach_umcast(struct ib_qp *qp,
172                              struct ib_uqp_object *uobj)
173 {
174         struct ib_uverbs_mcast_entry *mcast, *tmp;
175
176         list_for_each_entry_safe(mcast, tmp, &uobj->mcast_list, list) {
177                 ib_detach_mcast(qp, &mcast->gid, mcast->lid);
178                 list_del(&mcast->list);
179                 kfree(mcast);
180         }
181 }
182
183 static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev)
184 {
185         complete(&dev->comp);
186 }
187
188 void ib_uverbs_release_file(struct kref *ref)
189 {
190         struct ib_uverbs_file *file =
191                 container_of(ref, struct ib_uverbs_file, ref);
192         struct ib_device *ib_dev;
193         int srcu_key;
194
195         release_ufile_idr_uobject(file);
196
197         srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
198         ib_dev = srcu_dereference(file->device->ib_dev,
199                                   &file->device->disassociate_srcu);
200         if (ib_dev && !ib_dev->disassociate_ucontext)
201                 module_put(ib_dev->owner);
202         srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
203
204         if (atomic_dec_and_test(&file->device->refcount))
205                 ib_uverbs_comp_dev(file->device);
206
207         put_device(&file->device->dev);
208         kfree(file);
209 }
210
211 static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
212                                     struct ib_uverbs_file *uverbs_file,
213                                     struct file *filp, char __user *buf,
214                                     size_t count, loff_t *pos,
215                                     size_t eventsz)
216 {
217         struct ib_uverbs_event *event;
218         int ret = 0;
219
220         spin_lock_irq(&ev_queue->lock);
221
222         while (list_empty(&ev_queue->event_list)) {
223                 spin_unlock_irq(&ev_queue->lock);
224
225                 if (filp->f_flags & O_NONBLOCK)
226                         return -EAGAIN;
227
228                 if (wait_event_interruptible(ev_queue->poll_wait,
229                                              (!list_empty(&ev_queue->event_list) ||
230                         /* The barriers built into wait_event_interruptible()
231                          * and wake_up() guarentee this will see the null set
232                          * without using RCU
233                          */
234                                              !uverbs_file->device->ib_dev)))
235                         return -ERESTARTSYS;
236
237                 /* If device was disassociated and no event exists set an error */
238                 if (list_empty(&ev_queue->event_list) &&
239                     !uverbs_file->device->ib_dev)
240                         return -EIO;
241
242                 spin_lock_irq(&ev_queue->lock);
243         }
244
245         event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list);
246
247         if (eventsz > count) {
248                 ret   = -EINVAL;
249                 event = NULL;
250         } else {
251                 list_del(ev_queue->event_list.next);
252                 if (event->counter) {
253                         ++(*event->counter);
254                         list_del(&event->obj_list);
255                 }
256         }
257
258         spin_unlock_irq(&ev_queue->lock);
259
260         if (event) {
261                 if (copy_to_user(buf, event, eventsz))
262                         ret = -EFAULT;
263                 else
264                         ret = eventsz;
265         }
266
267         kfree(event);
268
269         return ret;
270 }
271
272 static ssize_t ib_uverbs_async_event_read(struct file *filp, char __user *buf,
273                                           size_t count, loff_t *pos)
274 {
275         struct ib_uverbs_async_event_file *file = filp->private_data;
276
277         return ib_uverbs_event_read(&file->ev_queue, file->uverbs_file, filp,
278                                     buf, count, pos,
279                                     sizeof(struct ib_uverbs_async_event_desc));
280 }
281
282 static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf,
283                                          size_t count, loff_t *pos)
284 {
285         struct ib_uverbs_completion_event_file *comp_ev_file =
286                 filp->private_data;
287
288         return ib_uverbs_event_read(&comp_ev_file->ev_queue,
289                                     comp_ev_file->uobj.ufile, filp,
290                                     buf, count, pos,
291                                     sizeof(struct ib_uverbs_comp_event_desc));
292 }
293
294 static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
295                                          struct file *filp,
296                                          struct poll_table_struct *wait)
297 {
298         __poll_t pollflags = 0;
299
300         poll_wait(filp, &ev_queue->poll_wait, wait);
301
302         spin_lock_irq(&ev_queue->lock);
303         if (!list_empty(&ev_queue->event_list))
304                 pollflags = EPOLLIN | EPOLLRDNORM;
305         spin_unlock_irq(&ev_queue->lock);
306
307         return pollflags;
308 }
309
310 static __poll_t ib_uverbs_async_event_poll(struct file *filp,
311                                                struct poll_table_struct *wait)
312 {
313         return ib_uverbs_event_poll(filp->private_data, filp, wait);
314 }
315
316 static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
317                                               struct poll_table_struct *wait)
318 {
319         struct ib_uverbs_completion_event_file *comp_ev_file =
320                 filp->private_data;
321
322         return ib_uverbs_event_poll(&comp_ev_file->ev_queue, filp, wait);
323 }
324
325 static int ib_uverbs_async_event_fasync(int fd, struct file *filp, int on)
326 {
327         struct ib_uverbs_event_queue *ev_queue = filp->private_data;
328
329         return fasync_helper(fd, filp, on, &ev_queue->async_queue);
330 }
331
332 static int ib_uverbs_comp_event_fasync(int fd, struct file *filp, int on)
333 {
334         struct ib_uverbs_completion_event_file *comp_ev_file =
335                 filp->private_data;
336
337         return fasync_helper(fd, filp, on, &comp_ev_file->ev_queue.async_queue);
338 }
339
340 static int ib_uverbs_async_event_close(struct inode *inode, struct file *filp)
341 {
342         struct ib_uverbs_async_event_file *file = filp->private_data;
343         struct ib_uverbs_file *uverbs_file = file->uverbs_file;
344         struct ib_uverbs_event *entry, *tmp;
345         int closed_already = 0;
346
347         mutex_lock(&uverbs_file->device->lists_mutex);
348         spin_lock_irq(&file->ev_queue.lock);
349         closed_already = file->ev_queue.is_closed;
350         file->ev_queue.is_closed = 1;
351         list_for_each_entry_safe(entry, tmp, &file->ev_queue.event_list, list) {
352                 if (entry->counter)
353                         list_del(&entry->obj_list);
354                 kfree(entry);
355         }
356         spin_unlock_irq(&file->ev_queue.lock);
357         if (!closed_already) {
358                 list_del(&file->list);
359                 ib_unregister_event_handler(&uverbs_file->event_handler);
360         }
361         mutex_unlock(&uverbs_file->device->lists_mutex);
362
363         kref_put(&uverbs_file->ref, ib_uverbs_release_file);
364         kref_put(&file->ref, ib_uverbs_release_async_event_file);
365
366         return 0;
367 }
368
369 static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp)
370 {
371         struct ib_uobject *uobj = filp->private_data;
372         struct ib_uverbs_completion_event_file *file = container_of(
373                 uobj, struct ib_uverbs_completion_event_file, uobj);
374         struct ib_uverbs_event *entry, *tmp;
375
376         spin_lock_irq(&file->ev_queue.lock);
377         list_for_each_entry_safe(entry, tmp, &file->ev_queue.event_list, list) {
378                 if (entry->counter)
379                         list_del(&entry->obj_list);
380                 kfree(entry);
381         }
382         file->ev_queue.is_closed = 1;
383         spin_unlock_irq(&file->ev_queue.lock);
384
385         uverbs_close_fd(filp);
386
387         return 0;
388 }
389
390 const struct file_operations uverbs_event_fops = {
391         .owner   = THIS_MODULE,
392         .read    = ib_uverbs_comp_event_read,
393         .poll    = ib_uverbs_comp_event_poll,
394         .release = ib_uverbs_comp_event_close,
395         .fasync  = ib_uverbs_comp_event_fasync,
396         .llseek  = no_llseek,
397 };
398
399 static const struct file_operations uverbs_async_event_fops = {
400         .owner   = THIS_MODULE,
401         .read    = ib_uverbs_async_event_read,
402         .poll    = ib_uverbs_async_event_poll,
403         .release = ib_uverbs_async_event_close,
404         .fasync  = ib_uverbs_async_event_fasync,
405         .llseek  = no_llseek,
406 };
407
408 void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
409 {
410         struct ib_uverbs_event_queue   *ev_queue = cq_context;
411         struct ib_ucq_object           *uobj;
412         struct ib_uverbs_event         *entry;
413         unsigned long                   flags;
414
415         if (!ev_queue)
416                 return;
417
418         spin_lock_irqsave(&ev_queue->lock, flags);
419         if (ev_queue->is_closed) {
420                 spin_unlock_irqrestore(&ev_queue->lock, flags);
421                 return;
422         }
423
424         entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
425         if (!entry) {
426                 spin_unlock_irqrestore(&ev_queue->lock, flags);
427                 return;
428         }
429
430         uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
431
432         entry->desc.comp.cq_handle = cq->uobject->user_handle;
433         entry->counter             = &uobj->comp_events_reported;
434
435         list_add_tail(&entry->list, &ev_queue->event_list);
436         list_add_tail(&entry->obj_list, &uobj->comp_list);
437         spin_unlock_irqrestore(&ev_queue->lock, flags);
438
439         wake_up_interruptible(&ev_queue->poll_wait);
440         kill_fasync(&ev_queue->async_queue, SIGIO, POLL_IN);
441 }
442
443 static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
444                                     __u64 element, __u64 event,
445                                     struct list_head *obj_list,
446                                     u32 *counter)
447 {
448         struct ib_uverbs_event *entry;
449         unsigned long flags;
450
451         spin_lock_irqsave(&file->async_file->ev_queue.lock, flags);
452         if (file->async_file->ev_queue.is_closed) {
453                 spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
454                 return;
455         }
456
457         entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
458         if (!entry) {
459                 spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
460                 return;
461         }
462
463         entry->desc.async.element    = element;
464         entry->desc.async.event_type = event;
465         entry->desc.async.reserved   = 0;
466         entry->counter               = counter;
467
468         list_add_tail(&entry->list, &file->async_file->ev_queue.event_list);
469         if (obj_list)
470                 list_add_tail(&entry->obj_list, obj_list);
471         spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
472
473         wake_up_interruptible(&file->async_file->ev_queue.poll_wait);
474         kill_fasync(&file->async_file->ev_queue.async_queue, SIGIO, POLL_IN);
475 }
476
477 void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
478 {
479         struct ib_ucq_object *uobj = container_of(event->element.cq->uobject,
480                                                   struct ib_ucq_object, uobject);
481
482         ib_uverbs_async_handler(uobj->uobject.ufile, uobj->uobject.user_handle,
483                                 event->event, &uobj->async_list,
484                                 &uobj->async_events_reported);
485 }
486
487 void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr)
488 {
489         struct ib_uevent_object *uobj;
490
491         /* for XRC target qp's, check that qp is live */
492         if (!event->element.qp->uobject)
493                 return;
494
495         uobj = container_of(event->element.qp->uobject,
496                             struct ib_uevent_object, uobject);
497
498         ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
499                                 event->event, &uobj->event_list,
500                                 &uobj->events_reported);
501 }
502
503 void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr)
504 {
505         struct ib_uevent_object *uobj = container_of(event->element.wq->uobject,
506                                                   struct ib_uevent_object, uobject);
507
508         ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
509                                 event->event, &uobj->event_list,
510                                 &uobj->events_reported);
511 }
512
513 void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr)
514 {
515         struct ib_uevent_object *uobj;
516
517         uobj = container_of(event->element.srq->uobject,
518                             struct ib_uevent_object, uobject);
519
520         ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
521                                 event->event, &uobj->event_list,
522                                 &uobj->events_reported);
523 }
524
525 void ib_uverbs_event_handler(struct ib_event_handler *handler,
526                              struct ib_event *event)
527 {
528         struct ib_uverbs_file *file =
529                 container_of(handler, struct ib_uverbs_file, event_handler);
530
531         ib_uverbs_async_handler(file, event->element.port_num, event->event,
532                                 NULL, NULL);
533 }
534
535 void ib_uverbs_free_async_event_file(struct ib_uverbs_file *file)
536 {
537         kref_put(&file->async_file->ref, ib_uverbs_release_async_event_file);
538         file->async_file = NULL;
539 }
540
541 void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue)
542 {
543         spin_lock_init(&ev_queue->lock);
544         INIT_LIST_HEAD(&ev_queue->event_list);
545         init_waitqueue_head(&ev_queue->poll_wait);
546         ev_queue->is_closed   = 0;
547         ev_queue->async_queue = NULL;
548 }
549
550 struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file,
551                                               struct ib_device  *ib_dev)
552 {
553         struct ib_uverbs_async_event_file *ev_file;
554         struct file *filp;
555
556         ev_file = kzalloc(sizeof(*ev_file), GFP_KERNEL);
557         if (!ev_file)
558                 return ERR_PTR(-ENOMEM);
559
560         ib_uverbs_init_event_queue(&ev_file->ev_queue);
561         ev_file->uverbs_file = uverbs_file;
562         kref_get(&ev_file->uverbs_file->ref);
563         kref_init(&ev_file->ref);
564         filp = anon_inode_getfile("[infinibandevent]", &uverbs_async_event_fops,
565                                   ev_file, O_RDONLY);
566         if (IS_ERR(filp))
567                 goto err_put_refs;
568
569         mutex_lock(&uverbs_file->device->lists_mutex);
570         list_add_tail(&ev_file->list,
571                       &uverbs_file->device->uverbs_events_file_list);
572         mutex_unlock(&uverbs_file->device->lists_mutex);
573
574         WARN_ON(uverbs_file->async_file);
575         uverbs_file->async_file = ev_file;
576         kref_get(&uverbs_file->async_file->ref);
577         INIT_IB_EVENT_HANDLER(&uverbs_file->event_handler,
578                               ib_dev,
579                               ib_uverbs_event_handler);
580         ib_register_event_handler(&uverbs_file->event_handler);
581         /* At that point async file stuff was fully set */
582
583         return filp;
584
585 err_put_refs:
586         kref_put(&ev_file->uverbs_file->ref, ib_uverbs_release_file);
587         kref_put(&ev_file->ref, ib_uverbs_release_async_event_file);
588         return filp;
589 }
590
591 static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr,
592                           struct ib_uverbs_ex_cmd_hdr *ex_hdr,
593                           size_t count, bool extended)
594 {
595         if (extended) {
596                 count -= sizeof(*hdr) + sizeof(*ex_hdr);
597
598                 if ((hdr->in_words + ex_hdr->provider_in_words) * 8 != count)
599                         return -EINVAL;
600
601                 if (ex_hdr->cmd_hdr_reserved)
602                         return -EINVAL;
603
604                 if (ex_hdr->response) {
605                         if (!hdr->out_words && !ex_hdr->provider_out_words)
606                                 return -EINVAL;
607
608                         if (!access_ok(VERIFY_WRITE,
609                                        u64_to_user_ptr(ex_hdr->response),
610                                        (hdr->out_words + ex_hdr->provider_out_words) * 8))
611                                 return -EFAULT;
612                 } else {
613                         if (hdr->out_words || ex_hdr->provider_out_words)
614                                 return -EINVAL;
615                 }
616
617                 return 0;
618         }
619
620         /* not extended command */
621         if (hdr->in_words * 4 != count)
622                 return -EINVAL;
623
624         return 0;
625 }
626
627 static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
628                              size_t count, loff_t *pos)
629 {
630         struct ib_uverbs_file *file = filp->private_data;
631         const struct uverbs_api_write_method *method_elm;
632         struct uverbs_api *uapi = file->device->uapi;
633         struct ib_uverbs_ex_cmd_hdr ex_hdr;
634         struct ib_uverbs_cmd_hdr hdr;
635         int srcu_key;
636         ssize_t ret;
637
638         if (!ib_safe_file_access(filp)) {
639                 pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
640                             task_tgid_vnr(current), current->comm);
641                 return -EACCES;
642         }
643
644         if (count < sizeof(hdr))
645                 return -EINVAL;
646
647         if (copy_from_user(&hdr, buf, sizeof(hdr)))
648                 return -EFAULT;
649
650         method_elm = uapi_get_method(uapi, hdr.command);
651         if (IS_ERR(method_elm))
652                 return PTR_ERR(method_elm);
653
654         if (method_elm->is_ex) {
655                 if (count < (sizeof(hdr) + sizeof(ex_hdr)))
656                         return -EINVAL;
657                 if (copy_from_user(&ex_hdr, buf + sizeof(hdr), sizeof(ex_hdr)))
658                         return -EFAULT;
659         }
660
661         ret = verify_hdr(&hdr, &ex_hdr, count, method_elm->is_ex);
662         if (ret)
663                 return ret;
664
665         srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
666
667         buf += sizeof(hdr);
668
669         if (!method_elm->is_ex) {
670                 ret = method_elm->handler(file, buf, hdr.in_words * 4,
671                                           hdr.out_words * 4);
672         } else {
673                 struct ib_udata ucore;
674                 struct ib_udata uhw;
675
676                 buf += sizeof(ex_hdr);
677
678                 ib_uverbs_init_udata_buf_or_null(&ucore, buf,
679                                         u64_to_user_ptr(ex_hdr.response),
680                                         hdr.in_words * 8, hdr.out_words * 8);
681
682                 ib_uverbs_init_udata_buf_or_null(&uhw,
683                                         buf + ucore.inlen,
684                                         u64_to_user_ptr(ex_hdr.response) + ucore.outlen,
685                                         ex_hdr.provider_in_words * 8,
686                                         ex_hdr.provider_out_words * 8);
687
688                 ret = method_elm->handler_ex(file, &ucore, &uhw);
689                 ret = (ret) ? : count;
690         }
691
692         srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
693         return ret;
694 }
695
696 static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
697 {
698         struct ib_uverbs_file *file = filp->private_data;
699         struct ib_ucontext *ucontext;
700         int ret = 0;
701         int srcu_key;
702
703         srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
704         ucontext = ib_uverbs_get_ucontext(file);
705         if (IS_ERR(ucontext)) {
706                 ret = PTR_ERR(ucontext);
707                 goto out;
708         }
709
710         ret = ucontext->device->mmap(ucontext, vma);
711 out:
712         srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
713         return ret;
714 }
715
716 /*
717  * Each time we map IO memory into user space this keeps track of the mapping.
718  * When the device is hot-unplugged we 'zap' the mmaps in user space to point
719  * to the zero page and allow the hot unplug to proceed.
720  *
721  * This is necessary for cases like PCI physical hot unplug as the actual BAR
722  * memory may vanish after this and access to it from userspace could MCE.
723  *
724  * RDMA drivers supporting disassociation must have their user space designed
725  * to cope in some way with their IO pages going to the zero page.
726  */
727 struct rdma_umap_priv {
728         struct vm_area_struct *vma;
729         struct list_head list;
730 };
731
732 static const struct vm_operations_struct rdma_umap_ops;
733
734 static void rdma_umap_priv_init(struct rdma_umap_priv *priv,
735                                 struct vm_area_struct *vma)
736 {
737         struct ib_uverbs_file *ufile = vma->vm_file->private_data;
738
739         priv->vma = vma;
740         vma->vm_private_data = priv;
741         vma->vm_ops = &rdma_umap_ops;
742
743         mutex_lock(&ufile->umap_lock);
744         list_add(&priv->list, &ufile->umaps);
745         mutex_unlock(&ufile->umap_lock);
746 }
747
748 /*
749  * The VMA has been dup'd, initialize the vm_private_data with a new tracking
750  * struct
751  */
752 static void rdma_umap_open(struct vm_area_struct *vma)
753 {
754         struct ib_uverbs_file *ufile = vma->vm_file->private_data;
755         struct rdma_umap_priv *opriv = vma->vm_private_data;
756         struct rdma_umap_priv *priv;
757
758         if (!opriv)
759                 return;
760
761         /* We are racing with disassociation */
762         if (!down_read_trylock(&ufile->hw_destroy_rwsem))
763                 goto out_zap;
764         /*
765          * Disassociation already completed, the VMA should already be zapped.
766          */
767         if (!ufile->ucontext)
768                 goto out_unlock;
769
770         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
771         if (!priv)
772                 goto out_unlock;
773         rdma_umap_priv_init(priv, vma);
774
775         up_read(&ufile->hw_destroy_rwsem);
776         return;
777
778 out_unlock:
779         up_read(&ufile->hw_destroy_rwsem);
780 out_zap:
781         /*
782          * We can't allow the VMA to be created with the actual IO pages, that
783          * would break our API contract, and it can't be stopped at this
784          * point, so zap it.
785          */
786         vma->vm_private_data = NULL;
787         zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
788 }
789
790 static void rdma_umap_close(struct vm_area_struct *vma)
791 {
792         struct ib_uverbs_file *ufile = vma->vm_file->private_data;
793         struct rdma_umap_priv *priv = vma->vm_private_data;
794
795         if (!priv)
796                 return;
797
798         /*
799          * The vma holds a reference on the struct file that created it, which
800          * in turn means that the ib_uverbs_file is guaranteed to exist at
801          * this point.
802          */
803         mutex_lock(&ufile->umap_lock);
804         list_del(&priv->list);
805         mutex_unlock(&ufile->umap_lock);
806         kfree(priv);
807 }
808
809 static const struct vm_operations_struct rdma_umap_ops = {
810         .open = rdma_umap_open,
811         .close = rdma_umap_close,
812 };
813
814 static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
815                                                  struct vm_area_struct *vma,
816                                                  unsigned long size)
817 {
818         struct ib_uverbs_file *ufile = ucontext->ufile;
819         struct rdma_umap_priv *priv;
820
821         if (vma->vm_end - vma->vm_start != size)
822                 return ERR_PTR(-EINVAL);
823
824         /* Driver is using this wrong, must be called by ib_uverbs_mmap */
825         if (WARN_ON(!vma->vm_file ||
826                     vma->vm_file->private_data != ufile))
827                 return ERR_PTR(-EINVAL);
828         lockdep_assert_held(&ufile->device->disassociate_srcu);
829
830         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
831         if (!priv)
832                 return ERR_PTR(-ENOMEM);
833         return priv;
834 }
835
836 /*
837  * Map IO memory into a process. This is to be called by drivers as part of
838  * their mmap() functions if they wish to send something like PCI-E BAR memory
839  * to userspace.
840  */
841 int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
842                       unsigned long pfn, unsigned long size, pgprot_t prot)
843 {
844         struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size);
845
846         if (IS_ERR(priv))
847                 return PTR_ERR(priv);
848
849         vma->vm_page_prot = prot;
850         if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
851                 kfree(priv);
852                 return -EAGAIN;
853         }
854
855         rdma_umap_priv_init(priv, vma);
856         return 0;
857 }
858 EXPORT_SYMBOL(rdma_user_mmap_io);
859
860 /*
861  * The page case is here for a slightly different reason, the driver expects
862  * to be able to free the page it is sharing to user space when it destroys
863  * its ucontext, which means we need to zap the user space references.
864  *
865  * We could handle this differently by providing an API to allocate a shared
866  * page and then only freeing the shared page when the last ufile is
867  * destroyed.
868  */
869 int rdma_user_mmap_page(struct ib_ucontext *ucontext,
870                         struct vm_area_struct *vma, struct page *page,
871                         unsigned long size)
872 {
873         struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size);
874
875         if (IS_ERR(priv))
876                 return PTR_ERR(priv);
877
878         if (remap_pfn_range(vma, vma->vm_start, page_to_pfn(page), size,
879                             vma->vm_page_prot)) {
880                 kfree(priv);
881                 return -EAGAIN;
882         }
883
884         rdma_umap_priv_init(priv, vma);
885         return 0;
886 }
887 EXPORT_SYMBOL(rdma_user_mmap_page);
888
889 void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
890 {
891         struct rdma_umap_priv *priv, *next_priv;
892
893         lockdep_assert_held(&ufile->hw_destroy_rwsem);
894
895         while (1) {
896                 struct mm_struct *mm = NULL;
897
898                 /* Get an arbitrary mm pointer that hasn't been cleaned yet */
899                 mutex_lock(&ufile->umap_lock);
900                 if (!list_empty(&ufile->umaps)) {
901                         mm = list_first_entry(&ufile->umaps,
902                                               struct rdma_umap_priv, list)
903                                      ->vma->vm_mm;
904                         mmget(mm);
905                 }
906                 mutex_unlock(&ufile->umap_lock);
907                 if (!mm)
908                         return;
909
910                 /*
911                  * The umap_lock is nested under mmap_sem since it used within
912                  * the vma_ops callbacks, so we have to clean the list one mm
913                  * at a time to get the lock ordering right. Typically there
914                  * will only be one mm, so no big deal.
915                  */
916                 down_write(&mm->mmap_sem);
917                 mutex_lock(&ufile->umap_lock);
918                 list_for_each_entry_safe (priv, next_priv, &ufile->umaps,
919                                           list) {
920                         struct vm_area_struct *vma = priv->vma;
921
922                         if (vma->vm_mm != mm)
923                                 continue;
924                         list_del_init(&priv->list);
925
926                         zap_vma_ptes(vma, vma->vm_start,
927                                      vma->vm_end - vma->vm_start);
928                         vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
929                 }
930                 mutex_unlock(&ufile->umap_lock);
931                 up_write(&mm->mmap_sem);
932                 mmput(mm);
933         }
934 }
935
936 /*
937  * ib_uverbs_open() does not need the BKL:
938  *
939  *  - the ib_uverbs_device structures are properly reference counted and
940  *    everything else is purely local to the file being created, so
941  *    races against other open calls are not a problem;
942  *  - there is no ioctl method to race against;
943  *  - the open method will either immediately run -ENXIO, or all
944  *    required initialization will be done.
945  */
946 static int ib_uverbs_open(struct inode *inode, struct file *filp)
947 {
948         struct ib_uverbs_device *dev;
949         struct ib_uverbs_file *file;
950         struct ib_device *ib_dev;
951         int ret;
952         int module_dependent;
953         int srcu_key;
954
955         dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
956         if (!atomic_inc_not_zero(&dev->refcount))
957                 return -ENXIO;
958
959         get_device(&dev->dev);
960         srcu_key = srcu_read_lock(&dev->disassociate_srcu);
961         mutex_lock(&dev->lists_mutex);
962         ib_dev = srcu_dereference(dev->ib_dev,
963                                   &dev->disassociate_srcu);
964         if (!ib_dev) {
965                 ret = -EIO;
966                 goto err;
967         }
968
969         /* In case IB device supports disassociate ucontext, there is no hard
970          * dependency between uverbs device and its low level device.
971          */
972         module_dependent = !(ib_dev->disassociate_ucontext);
973
974         if (module_dependent) {
975                 if (!try_module_get(ib_dev->owner)) {
976                         ret = -ENODEV;
977                         goto err;
978                 }
979         }
980
981         file = kzalloc(sizeof(*file), GFP_KERNEL);
982         if (!file) {
983                 ret = -ENOMEM;
984                 if (module_dependent)
985                         goto err_module;
986
987                 goto err;
988         }
989
990         file->device     = dev;
991         kref_init(&file->ref);
992         mutex_init(&file->ucontext_lock);
993
994         spin_lock_init(&file->uobjects_lock);
995         INIT_LIST_HEAD(&file->uobjects);
996         init_rwsem(&file->hw_destroy_rwsem);
997         mutex_init(&file->umap_lock);
998         INIT_LIST_HEAD(&file->umaps);
999
1000         filp->private_data = file;
1001         list_add_tail(&file->list, &dev->uverbs_file_list);
1002         mutex_unlock(&dev->lists_mutex);
1003         srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1004
1005         setup_ufile_idr_uobject(file);
1006
1007         return nonseekable_open(inode, filp);
1008
1009 err_module:
1010         module_put(ib_dev->owner);
1011
1012 err:
1013         mutex_unlock(&dev->lists_mutex);
1014         srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1015         if (atomic_dec_and_test(&dev->refcount))
1016                 ib_uverbs_comp_dev(dev);
1017
1018         put_device(&dev->dev);
1019         return ret;
1020 }
1021
1022 static int ib_uverbs_close(struct inode *inode, struct file *filp)
1023 {
1024         struct ib_uverbs_file *file = filp->private_data;
1025
1026         uverbs_destroy_ufile_hw(file, RDMA_REMOVE_CLOSE);
1027
1028         mutex_lock(&file->device->lists_mutex);
1029         list_del_init(&file->list);
1030         mutex_unlock(&file->device->lists_mutex);
1031
1032         if (file->async_file)
1033                 kref_put(&file->async_file->ref,
1034                          ib_uverbs_release_async_event_file);
1035
1036         kref_put(&file->ref, ib_uverbs_release_file);
1037
1038         return 0;
1039 }
1040
1041 static const struct file_operations uverbs_fops = {
1042         .owner   = THIS_MODULE,
1043         .write   = ib_uverbs_write,
1044         .open    = ib_uverbs_open,
1045         .release = ib_uverbs_close,
1046         .llseek  = no_llseek,
1047         .unlocked_ioctl = ib_uverbs_ioctl,
1048         .compat_ioctl = ib_uverbs_ioctl,
1049 };
1050
1051 static const struct file_operations uverbs_mmap_fops = {
1052         .owner   = THIS_MODULE,
1053         .write   = ib_uverbs_write,
1054         .mmap    = ib_uverbs_mmap,
1055         .open    = ib_uverbs_open,
1056         .release = ib_uverbs_close,
1057         .llseek  = no_llseek,
1058         .unlocked_ioctl = ib_uverbs_ioctl,
1059         .compat_ioctl = ib_uverbs_ioctl,
1060 };
1061
1062 static struct ib_client uverbs_client = {
1063         .name   = "uverbs",
1064         .add    = ib_uverbs_add_one,
1065         .remove = ib_uverbs_remove_one
1066 };
1067
1068 static ssize_t ibdev_show(struct device *device, struct device_attribute *attr,
1069                           char *buf)
1070 {
1071         struct ib_uverbs_device *dev =
1072                         container_of(device, struct ib_uverbs_device, dev);
1073         int ret = -ENODEV;
1074         int srcu_key;
1075         struct ib_device *ib_dev;
1076
1077         srcu_key = srcu_read_lock(&dev->disassociate_srcu);
1078         ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
1079         if (ib_dev)
1080                 ret = sprintf(buf, "%s\n", dev_name(&ib_dev->dev));
1081         srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1082
1083         return ret;
1084 }
1085 static DEVICE_ATTR_RO(ibdev);
1086
1087 static ssize_t abi_version_show(struct device *device,
1088                                 struct device_attribute *attr, char *buf)
1089 {
1090         struct ib_uverbs_device *dev =
1091                         container_of(device, struct ib_uverbs_device, dev);
1092         int ret = -ENODEV;
1093         int srcu_key;
1094         struct ib_device *ib_dev;
1095
1096         srcu_key = srcu_read_lock(&dev->disassociate_srcu);
1097         ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
1098         if (ib_dev)
1099                 ret = sprintf(buf, "%d\n", ib_dev->uverbs_abi_ver);
1100         srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1101
1102         return ret;
1103 }
1104 static DEVICE_ATTR_RO(abi_version);
1105
1106 static struct attribute *ib_dev_attrs[] = {
1107         &dev_attr_abi_version.attr,
1108         &dev_attr_ibdev.attr,
1109         NULL,
1110 };
1111
1112 static const struct attribute_group dev_attr_group = {
1113         .attrs = ib_dev_attrs,
1114 };
1115
1116 static CLASS_ATTR_STRING(abi_version, S_IRUGO,
1117                          __stringify(IB_USER_VERBS_ABI_VERSION));
1118
1119 static int ib_uverbs_create_uapi(struct ib_device *device,
1120                                  struct ib_uverbs_device *uverbs_dev)
1121 {
1122         struct uverbs_api *uapi;
1123
1124         uapi = uverbs_alloc_api(device);
1125         if (IS_ERR(uapi))
1126                 return PTR_ERR(uapi);
1127
1128         uverbs_dev->uapi = uapi;
1129         return 0;
1130 }
1131
1132 static void ib_uverbs_add_one(struct ib_device *device)
1133 {
1134         int devnum;
1135         dev_t base;
1136         struct ib_uverbs_device *uverbs_dev;
1137         int ret;
1138
1139         if (!device->alloc_ucontext)
1140                 return;
1141
1142         uverbs_dev = kzalloc(sizeof(*uverbs_dev), GFP_KERNEL);
1143         if (!uverbs_dev)
1144                 return;
1145
1146         ret = init_srcu_struct(&uverbs_dev->disassociate_srcu);
1147         if (ret) {
1148                 kfree(uverbs_dev);
1149                 return;
1150         }
1151
1152         device_initialize(&uverbs_dev->dev);
1153         uverbs_dev->dev.class = uverbs_class;
1154         uverbs_dev->dev.parent = device->dev.parent;
1155         uverbs_dev->dev.release = ib_uverbs_release_dev;
1156         uverbs_dev->groups[0] = &dev_attr_group;
1157         uverbs_dev->dev.groups = uverbs_dev->groups;
1158         atomic_set(&uverbs_dev->refcount, 1);
1159         init_completion(&uverbs_dev->comp);
1160         uverbs_dev->xrcd_tree = RB_ROOT;
1161         mutex_init(&uverbs_dev->xrcd_tree_mutex);
1162         mutex_init(&uverbs_dev->lists_mutex);
1163         INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list);
1164         INIT_LIST_HEAD(&uverbs_dev->uverbs_events_file_list);
1165         rcu_assign_pointer(uverbs_dev->ib_dev, device);
1166         uverbs_dev->num_comp_vectors = device->num_comp_vectors;
1167
1168         devnum = ida_alloc_max(&uverbs_ida, IB_UVERBS_MAX_DEVICES - 1,
1169                                GFP_KERNEL);
1170         if (devnum < 0)
1171                 goto err;
1172         uverbs_dev->devnum = devnum;
1173         if (devnum >= IB_UVERBS_NUM_FIXED_MINOR)
1174                 base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR;
1175         else
1176                 base = IB_UVERBS_BASE_DEV + devnum;
1177
1178         if (ib_uverbs_create_uapi(device, uverbs_dev))
1179                 goto err_uapi;
1180
1181         uverbs_dev->dev.devt = base;
1182         dev_set_name(&uverbs_dev->dev, "uverbs%d", uverbs_dev->devnum);
1183
1184         cdev_init(&uverbs_dev->cdev,
1185                   device->mmap ? &uverbs_mmap_fops : &uverbs_fops);
1186         uverbs_dev->cdev.owner = THIS_MODULE;
1187
1188         ret = cdev_device_add(&uverbs_dev->cdev, &uverbs_dev->dev);
1189         if (ret)
1190                 goto err_uapi;
1191
1192         ib_set_client_data(device, &uverbs_client, uverbs_dev);
1193         return;
1194
1195 err_uapi:
1196         ida_free(&uverbs_ida, devnum);
1197 err:
1198         if (atomic_dec_and_test(&uverbs_dev->refcount))
1199                 ib_uverbs_comp_dev(uverbs_dev);
1200         wait_for_completion(&uverbs_dev->comp);
1201         put_device(&uverbs_dev->dev);
1202         return;
1203 }
1204
1205 static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
1206                                         struct ib_device *ib_dev)
1207 {
1208         struct ib_uverbs_file *file;
1209         struct ib_uverbs_async_event_file *event_file;
1210         struct ib_event event;
1211
1212         /* Pending running commands to terminate */
1213         uverbs_disassociate_api_pre(uverbs_dev);
1214         event.event = IB_EVENT_DEVICE_FATAL;
1215         event.element.port_num = 0;
1216         event.device = ib_dev;
1217
1218         mutex_lock(&uverbs_dev->lists_mutex);
1219         while (!list_empty(&uverbs_dev->uverbs_file_list)) {
1220                 file = list_first_entry(&uverbs_dev->uverbs_file_list,
1221                                         struct ib_uverbs_file, list);
1222                 list_del_init(&file->list);
1223                 kref_get(&file->ref);
1224
1225                 /* We must release the mutex before going ahead and calling
1226                  * uverbs_cleanup_ufile, as it might end up indirectly calling
1227                  * uverbs_close, for example due to freeing the resources (e.g
1228                  * mmput).
1229                  */
1230                 mutex_unlock(&uverbs_dev->lists_mutex);
1231
1232                 ib_uverbs_event_handler(&file->event_handler, &event);
1233                 uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE);
1234                 kref_put(&file->ref, ib_uverbs_release_file);
1235
1236                 mutex_lock(&uverbs_dev->lists_mutex);
1237         }
1238
1239         while (!list_empty(&uverbs_dev->uverbs_events_file_list)) {
1240                 event_file = list_first_entry(&uverbs_dev->
1241                                               uverbs_events_file_list,
1242                                               struct ib_uverbs_async_event_file,
1243                                               list);
1244                 spin_lock_irq(&event_file->ev_queue.lock);
1245                 event_file->ev_queue.is_closed = 1;
1246                 spin_unlock_irq(&event_file->ev_queue.lock);
1247
1248                 list_del(&event_file->list);
1249                 ib_unregister_event_handler(
1250                         &event_file->uverbs_file->event_handler);
1251                 event_file->uverbs_file->event_handler.device =
1252                         NULL;
1253
1254                 wake_up_interruptible(&event_file->ev_queue.poll_wait);
1255                 kill_fasync(&event_file->ev_queue.async_queue, SIGIO, POLL_IN);
1256         }
1257         mutex_unlock(&uverbs_dev->lists_mutex);
1258
1259         uverbs_disassociate_api(uverbs_dev->uapi);
1260 }
1261
1262 static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
1263 {
1264         struct ib_uverbs_device *uverbs_dev = client_data;
1265         int wait_clients = 1;
1266
1267         if (!uverbs_dev)
1268                 return;
1269
1270         cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev);
1271         ida_free(&uverbs_ida, uverbs_dev->devnum);
1272
1273         if (device->disassociate_ucontext) {
1274                 /* We disassociate HW resources and immediately return.
1275                  * Userspace will see a EIO errno for all future access.
1276                  * Upon returning, ib_device may be freed internally and is not
1277                  * valid any more.
1278                  * uverbs_device is still available until all clients close
1279                  * their files, then the uverbs device ref count will be zero
1280                  * and its resources will be freed.
1281                  * Note: At this point no more files can be opened since the
1282                  * cdev was deleted, however active clients can still issue
1283                  * commands and close their open files.
1284                  */
1285                 ib_uverbs_free_hw_resources(uverbs_dev, device);
1286                 wait_clients = 0;
1287         }
1288
1289         if (atomic_dec_and_test(&uverbs_dev->refcount))
1290                 ib_uverbs_comp_dev(uverbs_dev);
1291         if (wait_clients)
1292                 wait_for_completion(&uverbs_dev->comp);
1293
1294         put_device(&uverbs_dev->dev);
1295 }
1296
1297 static char *uverbs_devnode(struct device *dev, umode_t *mode)
1298 {
1299         if (mode)
1300                 *mode = 0666;
1301         return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
1302 }
1303
1304 static int __init ib_uverbs_init(void)
1305 {
1306         int ret;
1307
1308         ret = register_chrdev_region(IB_UVERBS_BASE_DEV,
1309                                      IB_UVERBS_NUM_FIXED_MINOR,
1310                                      "infiniband_verbs");
1311         if (ret) {
1312                 pr_err("user_verbs: couldn't register device number\n");
1313                 goto out;
1314         }
1315
1316         ret = alloc_chrdev_region(&dynamic_uverbs_dev, 0,
1317                                   IB_UVERBS_NUM_DYNAMIC_MINOR,
1318                                   "infiniband_verbs");
1319         if (ret) {
1320                 pr_err("couldn't register dynamic device number\n");
1321                 goto out_alloc;
1322         }
1323
1324         uverbs_class = class_create(THIS_MODULE, "infiniband_verbs");
1325         if (IS_ERR(uverbs_class)) {
1326                 ret = PTR_ERR(uverbs_class);
1327                 pr_err("user_verbs: couldn't create class infiniband_verbs\n");
1328                 goto out_chrdev;
1329         }
1330
1331         uverbs_class->devnode = uverbs_devnode;
1332
1333         ret = class_create_file(uverbs_class, &class_attr_abi_version.attr);
1334         if (ret) {
1335                 pr_err("user_verbs: couldn't create abi_version attribute\n");
1336                 goto out_class;
1337         }
1338
1339         ret = ib_register_client(&uverbs_client);
1340         if (ret) {
1341                 pr_err("user_verbs: couldn't register client\n");
1342                 goto out_class;
1343         }
1344
1345         return 0;
1346
1347 out_class:
1348         class_destroy(uverbs_class);
1349
1350 out_chrdev:
1351         unregister_chrdev_region(dynamic_uverbs_dev,
1352                                  IB_UVERBS_NUM_DYNAMIC_MINOR);
1353
1354 out_alloc:
1355         unregister_chrdev_region(IB_UVERBS_BASE_DEV,
1356                                  IB_UVERBS_NUM_FIXED_MINOR);
1357
1358 out:
1359         return ret;
1360 }
1361
1362 static void __exit ib_uverbs_cleanup(void)
1363 {
1364         ib_unregister_client(&uverbs_client);
1365         class_destroy(uverbs_class);
1366         unregister_chrdev_region(IB_UVERBS_BASE_DEV,
1367                                  IB_UVERBS_NUM_FIXED_MINOR);
1368         unregister_chrdev_region(dynamic_uverbs_dev,
1369                                  IB_UVERBS_NUM_DYNAMIC_MINOR);
1370 }
1371
1372 module_init(ib_uverbs_init);
1373 module_exit(ib_uverbs_cleanup);