2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/module.h>
38 #include <linux/init.h>
39 #include <linux/device.h>
40 #include <linux/err.h>
42 #include <linux/poll.h>
43 #include <linux/sched.h>
44 #include <linux/file.h>
45 #include <linux/cdev.h>
46 #include <linux/anon_inodes.h>
47 #include <linux/slab.h>
48 #include <linux/sched/mm.h>
50 #include <linux/uaccess.h>
53 #include <rdma/uverbs_std_types.h>
56 #include "core_priv.h"
57 #include "rdma_core.h"
59 MODULE_AUTHOR("Roland Dreier");
60 MODULE_DESCRIPTION("InfiniBand userspace verbs access");
61 MODULE_LICENSE("Dual BSD/GPL");
64 IB_UVERBS_MAJOR = 231,
65 IB_UVERBS_BASE_MINOR = 192,
66 IB_UVERBS_MAX_DEVICES = RDMA_MAX_PORTS,
67 IB_UVERBS_NUM_FIXED_MINOR = 32,
68 IB_UVERBS_NUM_DYNAMIC_MINOR = IB_UVERBS_MAX_DEVICES - IB_UVERBS_NUM_FIXED_MINOR,
71 #define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
73 static dev_t dynamic_uverbs_dev;
74 static struct class *uverbs_class;
76 static DEFINE_IDA(uverbs_ida);
77 static void ib_uverbs_add_one(struct ib_device *device);
78 static void ib_uverbs_remove_one(struct ib_device *device, void *client_data);
81 * Must be called with the ufile->device->disassociate_srcu held, and the lock
82 * must be held until use of the ucontext is finished.
84 struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile)
87 * We do not hold the hw_destroy_rwsem lock for this flow, instead
88 * srcu is used. It does not matter if someone races this with
89 * get_context, we get NULL or valid ucontext.
91 struct ib_ucontext *ucontext = smp_load_acquire(&ufile->ucontext);
93 if (!srcu_dereference(ufile->device->ib_dev,
94 &ufile->device->disassociate_srcu))
98 return ERR_PTR(-EINVAL);
102 EXPORT_SYMBOL(ib_uverbs_get_ucontext_file);
104 int uverbs_dealloc_mw(struct ib_mw *mw)
106 struct ib_pd *pd = mw->pd;
109 ret = mw->device->ops.dealloc_mw(mw);
111 atomic_dec(&pd->usecnt);
115 static void ib_uverbs_release_dev(struct device *device)
117 struct ib_uverbs_device *dev =
118 container_of(device, struct ib_uverbs_device, dev);
120 uverbs_destroy_api(dev->uapi);
121 cleanup_srcu_struct(&dev->disassociate_srcu);
125 static void ib_uverbs_release_async_event_file(struct kref *ref)
127 struct ib_uverbs_async_event_file *file =
128 container_of(ref, struct ib_uverbs_async_event_file, ref);
133 void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
134 struct ib_uverbs_completion_event_file *ev_file,
135 struct ib_ucq_object *uobj)
137 struct ib_uverbs_event *evt, *tmp;
140 spin_lock_irq(&ev_file->ev_queue.lock);
141 list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
142 list_del(&evt->list);
145 spin_unlock_irq(&ev_file->ev_queue.lock);
147 uverbs_uobject_put(&ev_file->uobj);
150 spin_lock_irq(&file->async_file->ev_queue.lock);
151 list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
152 list_del(&evt->list);
155 spin_unlock_irq(&file->async_file->ev_queue.lock);
158 void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
159 struct ib_uevent_object *uobj)
161 struct ib_uverbs_event *evt, *tmp;
163 spin_lock_irq(&file->async_file->ev_queue.lock);
164 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
165 list_del(&evt->list);
168 spin_unlock_irq(&file->async_file->ev_queue.lock);
171 void ib_uverbs_detach_umcast(struct ib_qp *qp,
172 struct ib_uqp_object *uobj)
174 struct ib_uverbs_mcast_entry *mcast, *tmp;
176 list_for_each_entry_safe(mcast, tmp, &uobj->mcast_list, list) {
177 ib_detach_mcast(qp, &mcast->gid, mcast->lid);
178 list_del(&mcast->list);
183 static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev)
185 complete(&dev->comp);
188 void ib_uverbs_release_file(struct kref *ref)
190 struct ib_uverbs_file *file =
191 container_of(ref, struct ib_uverbs_file, ref);
192 struct ib_device *ib_dev;
195 release_ufile_idr_uobject(file);
197 srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
198 ib_dev = srcu_dereference(file->device->ib_dev,
199 &file->device->disassociate_srcu);
200 if (ib_dev && !ib_dev->ops.disassociate_ucontext)
201 module_put(ib_dev->owner);
202 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
204 if (atomic_dec_and_test(&file->device->refcount))
205 ib_uverbs_comp_dev(file->device);
207 put_device(&file->device->dev);
211 static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
212 struct ib_uverbs_file *uverbs_file,
213 struct file *filp, char __user *buf,
214 size_t count, loff_t *pos,
217 struct ib_uverbs_event *event;
220 spin_lock_irq(&ev_queue->lock);
222 while (list_empty(&ev_queue->event_list)) {
223 spin_unlock_irq(&ev_queue->lock);
225 if (filp->f_flags & O_NONBLOCK)
228 if (wait_event_interruptible(ev_queue->poll_wait,
229 (!list_empty(&ev_queue->event_list) ||
230 /* The barriers built into wait_event_interruptible()
231 * and wake_up() guarentee this will see the null set
234 !uverbs_file->device->ib_dev)))
237 /* If device was disassociated and no event exists set an error */
238 if (list_empty(&ev_queue->event_list) &&
239 !uverbs_file->device->ib_dev)
242 spin_lock_irq(&ev_queue->lock);
245 event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list);
247 if (eventsz > count) {
251 list_del(ev_queue->event_list.next);
252 if (event->counter) {
254 list_del(&event->obj_list);
258 spin_unlock_irq(&ev_queue->lock);
261 if (copy_to_user(buf, event, eventsz))
272 static ssize_t ib_uverbs_async_event_read(struct file *filp, char __user *buf,
273 size_t count, loff_t *pos)
275 struct ib_uverbs_async_event_file *file = filp->private_data;
277 return ib_uverbs_event_read(&file->ev_queue, file->uverbs_file, filp,
279 sizeof(struct ib_uverbs_async_event_desc));
282 static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf,
283 size_t count, loff_t *pos)
285 struct ib_uverbs_completion_event_file *comp_ev_file =
288 return ib_uverbs_event_read(&comp_ev_file->ev_queue,
289 comp_ev_file->uobj.ufile, filp,
291 sizeof(struct ib_uverbs_comp_event_desc));
294 static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
296 struct poll_table_struct *wait)
298 __poll_t pollflags = 0;
300 poll_wait(filp, &ev_queue->poll_wait, wait);
302 spin_lock_irq(&ev_queue->lock);
303 if (!list_empty(&ev_queue->event_list))
304 pollflags = EPOLLIN | EPOLLRDNORM;
305 spin_unlock_irq(&ev_queue->lock);
310 static __poll_t ib_uverbs_async_event_poll(struct file *filp,
311 struct poll_table_struct *wait)
313 return ib_uverbs_event_poll(filp->private_data, filp, wait);
316 static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
317 struct poll_table_struct *wait)
319 struct ib_uverbs_completion_event_file *comp_ev_file =
322 return ib_uverbs_event_poll(&comp_ev_file->ev_queue, filp, wait);
325 static int ib_uverbs_async_event_fasync(int fd, struct file *filp, int on)
327 struct ib_uverbs_event_queue *ev_queue = filp->private_data;
329 return fasync_helper(fd, filp, on, &ev_queue->async_queue);
332 static int ib_uverbs_comp_event_fasync(int fd, struct file *filp, int on)
334 struct ib_uverbs_completion_event_file *comp_ev_file =
337 return fasync_helper(fd, filp, on, &comp_ev_file->ev_queue.async_queue);
340 static int ib_uverbs_async_event_close(struct inode *inode, struct file *filp)
342 struct ib_uverbs_async_event_file *file = filp->private_data;
343 struct ib_uverbs_file *uverbs_file = file->uverbs_file;
344 struct ib_uverbs_event *entry, *tmp;
345 int closed_already = 0;
347 mutex_lock(&uverbs_file->device->lists_mutex);
348 spin_lock_irq(&file->ev_queue.lock);
349 closed_already = file->ev_queue.is_closed;
350 file->ev_queue.is_closed = 1;
351 list_for_each_entry_safe(entry, tmp, &file->ev_queue.event_list, list) {
353 list_del(&entry->obj_list);
356 spin_unlock_irq(&file->ev_queue.lock);
357 if (!closed_already) {
358 list_del(&file->list);
359 ib_unregister_event_handler(&uverbs_file->event_handler);
361 mutex_unlock(&uverbs_file->device->lists_mutex);
363 kref_put(&uverbs_file->ref, ib_uverbs_release_file);
364 kref_put(&file->ref, ib_uverbs_release_async_event_file);
369 static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp)
371 struct ib_uobject *uobj = filp->private_data;
372 struct ib_uverbs_completion_event_file *file = container_of(
373 uobj, struct ib_uverbs_completion_event_file, uobj);
374 struct ib_uverbs_event *entry, *tmp;
376 spin_lock_irq(&file->ev_queue.lock);
377 list_for_each_entry_safe(entry, tmp, &file->ev_queue.event_list, list) {
379 list_del(&entry->obj_list);
382 file->ev_queue.is_closed = 1;
383 spin_unlock_irq(&file->ev_queue.lock);
385 uverbs_close_fd(filp);
390 const struct file_operations uverbs_event_fops = {
391 .owner = THIS_MODULE,
392 .read = ib_uverbs_comp_event_read,
393 .poll = ib_uverbs_comp_event_poll,
394 .release = ib_uverbs_comp_event_close,
395 .fasync = ib_uverbs_comp_event_fasync,
399 static const struct file_operations uverbs_async_event_fops = {
400 .owner = THIS_MODULE,
401 .read = ib_uverbs_async_event_read,
402 .poll = ib_uverbs_async_event_poll,
403 .release = ib_uverbs_async_event_close,
404 .fasync = ib_uverbs_async_event_fasync,
408 void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
410 struct ib_uverbs_event_queue *ev_queue = cq_context;
411 struct ib_ucq_object *uobj;
412 struct ib_uverbs_event *entry;
418 spin_lock_irqsave(&ev_queue->lock, flags);
419 if (ev_queue->is_closed) {
420 spin_unlock_irqrestore(&ev_queue->lock, flags);
424 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
426 spin_unlock_irqrestore(&ev_queue->lock, flags);
430 uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
432 entry->desc.comp.cq_handle = cq->uobject->user_handle;
433 entry->counter = &uobj->comp_events_reported;
435 list_add_tail(&entry->list, &ev_queue->event_list);
436 list_add_tail(&entry->obj_list, &uobj->comp_list);
437 spin_unlock_irqrestore(&ev_queue->lock, flags);
439 wake_up_interruptible(&ev_queue->poll_wait);
440 kill_fasync(&ev_queue->async_queue, SIGIO, POLL_IN);
443 static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
444 __u64 element, __u64 event,
445 struct list_head *obj_list,
448 struct ib_uverbs_event *entry;
451 spin_lock_irqsave(&file->async_file->ev_queue.lock, flags);
452 if (file->async_file->ev_queue.is_closed) {
453 spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
457 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
459 spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
463 entry->desc.async.element = element;
464 entry->desc.async.event_type = event;
465 entry->desc.async.reserved = 0;
466 entry->counter = counter;
468 list_add_tail(&entry->list, &file->async_file->ev_queue.event_list);
470 list_add_tail(&entry->obj_list, obj_list);
471 spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
473 wake_up_interruptible(&file->async_file->ev_queue.poll_wait);
474 kill_fasync(&file->async_file->ev_queue.async_queue, SIGIO, POLL_IN);
477 void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
479 struct ib_ucq_object *uobj = container_of(event->element.cq->uobject,
480 struct ib_ucq_object, uobject);
482 ib_uverbs_async_handler(uobj->uobject.ufile, uobj->uobject.user_handle,
483 event->event, &uobj->async_list,
484 &uobj->async_events_reported);
487 void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr)
489 struct ib_uevent_object *uobj;
491 /* for XRC target qp's, check that qp is live */
492 if (!event->element.qp->uobject)
495 uobj = container_of(event->element.qp->uobject,
496 struct ib_uevent_object, uobject);
498 ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
499 event->event, &uobj->event_list,
500 &uobj->events_reported);
503 void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr)
505 struct ib_uevent_object *uobj = container_of(event->element.wq->uobject,
506 struct ib_uevent_object, uobject);
508 ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
509 event->event, &uobj->event_list,
510 &uobj->events_reported);
513 void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr)
515 struct ib_uevent_object *uobj;
517 uobj = container_of(event->element.srq->uobject,
518 struct ib_uevent_object, uobject);
520 ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
521 event->event, &uobj->event_list,
522 &uobj->events_reported);
525 void ib_uverbs_event_handler(struct ib_event_handler *handler,
526 struct ib_event *event)
528 struct ib_uverbs_file *file =
529 container_of(handler, struct ib_uverbs_file, event_handler);
531 ib_uverbs_async_handler(file, event->element.port_num, event->event,
535 void ib_uverbs_free_async_event_file(struct ib_uverbs_file *file)
537 kref_put(&file->async_file->ref, ib_uverbs_release_async_event_file);
538 file->async_file = NULL;
541 void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue)
543 spin_lock_init(&ev_queue->lock);
544 INIT_LIST_HEAD(&ev_queue->event_list);
545 init_waitqueue_head(&ev_queue->poll_wait);
546 ev_queue->is_closed = 0;
547 ev_queue->async_queue = NULL;
550 struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file,
551 struct ib_device *ib_dev)
553 struct ib_uverbs_async_event_file *ev_file;
556 ev_file = kzalloc(sizeof(*ev_file), GFP_KERNEL);
558 return ERR_PTR(-ENOMEM);
560 ib_uverbs_init_event_queue(&ev_file->ev_queue);
561 ev_file->uverbs_file = uverbs_file;
562 kref_get(&ev_file->uverbs_file->ref);
563 kref_init(&ev_file->ref);
564 filp = anon_inode_getfile("[infinibandevent]", &uverbs_async_event_fops,
569 mutex_lock(&uverbs_file->device->lists_mutex);
570 list_add_tail(&ev_file->list,
571 &uverbs_file->device->uverbs_events_file_list);
572 mutex_unlock(&uverbs_file->device->lists_mutex);
574 WARN_ON(uverbs_file->async_file);
575 uverbs_file->async_file = ev_file;
576 kref_get(&uverbs_file->async_file->ref);
577 INIT_IB_EVENT_HANDLER(&uverbs_file->event_handler,
579 ib_uverbs_event_handler);
580 ib_register_event_handler(&uverbs_file->event_handler);
581 /* At that point async file stuff was fully set */
586 kref_put(&ev_file->uverbs_file->ref, ib_uverbs_release_file);
587 kref_put(&ev_file->ref, ib_uverbs_release_async_event_file);
591 static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr,
592 struct ib_uverbs_ex_cmd_hdr *ex_hdr, size_t count,
593 const struct uverbs_api_write_method *method_elm)
595 if (method_elm->is_ex) {
596 count -= sizeof(*hdr) + sizeof(*ex_hdr);
598 if ((hdr->in_words + ex_hdr->provider_in_words) * 8 != count)
601 if (hdr->in_words * 8 < method_elm->req_size)
604 if (ex_hdr->cmd_hdr_reserved)
607 if (ex_hdr->response) {
608 if (!hdr->out_words && !ex_hdr->provider_out_words)
611 if (hdr->out_words * 8 < method_elm->resp_size)
614 if (!access_ok(u64_to_user_ptr(ex_hdr->response),
615 (hdr->out_words + ex_hdr->provider_out_words) * 8))
618 if (hdr->out_words || ex_hdr->provider_out_words)
625 /* not extended command */
626 if (hdr->in_words * 4 != count)
629 if (count < method_elm->req_size + sizeof(hdr)) {
631 * rdma-core v18 and v19 have a bug where they send DESTROY_CQ
632 * with a 16 byte write instead of 24. Old kernels didn't
633 * check the size so they allowed this. Now that the size is
634 * checked provide a compatibility work around to not break
637 if (hdr->command == IB_USER_VERBS_CMD_DESTROY_CQ &&
644 if (hdr->out_words * 4 < method_elm->resp_size)
650 static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
651 size_t count, loff_t *pos)
653 struct ib_uverbs_file *file = filp->private_data;
654 const struct uverbs_api_write_method *method_elm;
655 struct uverbs_api *uapi = file->device->uapi;
656 struct ib_uverbs_ex_cmd_hdr ex_hdr;
657 struct ib_uverbs_cmd_hdr hdr;
658 struct uverbs_attr_bundle bundle;
662 if (!ib_safe_file_access(filp)) {
663 pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
664 task_tgid_vnr(current), current->comm);
668 if (count < sizeof(hdr))
671 if (copy_from_user(&hdr, buf, sizeof(hdr)))
674 method_elm = uapi_get_method(uapi, hdr.command);
675 if (IS_ERR(method_elm))
676 return PTR_ERR(method_elm);
678 if (method_elm->is_ex) {
679 if (count < (sizeof(hdr) + sizeof(ex_hdr)))
681 if (copy_from_user(&ex_hdr, buf + sizeof(hdr), sizeof(ex_hdr)))
685 ret = verify_hdr(&hdr, &ex_hdr, count, method_elm);
689 srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
694 if (!method_elm->is_ex) {
695 size_t in_len = hdr.in_words * 4 - sizeof(hdr);
696 size_t out_len = hdr.out_words * 4;
699 if (method_elm->has_udata) {
700 bundle.driver_udata.inlen =
701 in_len - method_elm->req_size;
702 in_len = method_elm->req_size;
703 if (bundle.driver_udata.inlen)
704 bundle.driver_udata.inbuf = buf + in_len;
706 bundle.driver_udata.inbuf = NULL;
708 memset(&bundle.driver_udata, 0,
709 sizeof(bundle.driver_udata));
712 if (method_elm->has_resp) {
714 * The macros check that if has_resp is set
715 * then the command request structure starts
716 * with a '__aligned u64 response' member.
718 ret = get_user(response, (const u64 *)buf);
722 if (method_elm->has_udata) {
723 bundle.driver_udata.outlen =
724 out_len - method_elm->resp_size;
725 out_len = method_elm->resp_size;
726 if (bundle.driver_udata.outlen)
727 bundle.driver_udata.outbuf =
728 u64_to_user_ptr(response +
731 bundle.driver_udata.outbuf = NULL;
734 bundle.driver_udata.outlen = 0;
735 bundle.driver_udata.outbuf = NULL;
738 ib_uverbs_init_udata_buf_or_null(
739 &bundle.ucore, buf, u64_to_user_ptr(response),
742 buf += sizeof(ex_hdr);
744 ib_uverbs_init_udata_buf_or_null(&bundle.ucore, buf,
745 u64_to_user_ptr(ex_hdr.response),
746 hdr.in_words * 8, hdr.out_words * 8);
748 ib_uverbs_init_udata_buf_or_null(
749 &bundle.driver_udata, buf + bundle.ucore.inlen,
750 u64_to_user_ptr(ex_hdr.response) + bundle.ucore.outlen,
751 ex_hdr.provider_in_words * 8,
752 ex_hdr.provider_out_words * 8);
756 ret = method_elm->handler(&bundle);
758 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
759 return (ret) ? : count;
762 static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
764 struct ib_uverbs_file *file = filp->private_data;
765 struct ib_ucontext *ucontext;
769 srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
770 ucontext = ib_uverbs_get_ucontext_file(file);
771 if (IS_ERR(ucontext)) {
772 ret = PTR_ERR(ucontext);
776 ret = ucontext->device->ops.mmap(ucontext, vma);
778 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
783 * Each time we map IO memory into user space this keeps track of the mapping.
784 * When the device is hot-unplugged we 'zap' the mmaps in user space to point
785 * to the zero page and allow the hot unplug to proceed.
787 * This is necessary for cases like PCI physical hot unplug as the actual BAR
788 * memory may vanish after this and access to it from userspace could MCE.
790 * RDMA drivers supporting disassociation must have their user space designed
791 * to cope in some way with their IO pages going to the zero page.
793 struct rdma_umap_priv {
794 struct vm_area_struct *vma;
795 struct list_head list;
798 static const struct vm_operations_struct rdma_umap_ops;
800 static void rdma_umap_priv_init(struct rdma_umap_priv *priv,
801 struct vm_area_struct *vma)
803 struct ib_uverbs_file *ufile = vma->vm_file->private_data;
806 vma->vm_private_data = priv;
807 vma->vm_ops = &rdma_umap_ops;
809 mutex_lock(&ufile->umap_lock);
810 list_add(&priv->list, &ufile->umaps);
811 mutex_unlock(&ufile->umap_lock);
815 * The VMA has been dup'd, initialize the vm_private_data with a new tracking
818 static void rdma_umap_open(struct vm_area_struct *vma)
820 struct ib_uverbs_file *ufile = vma->vm_file->private_data;
821 struct rdma_umap_priv *opriv = vma->vm_private_data;
822 struct rdma_umap_priv *priv;
827 /* We are racing with disassociation */
828 if (!down_read_trylock(&ufile->hw_destroy_rwsem))
831 * Disassociation already completed, the VMA should already be zapped.
833 if (!ufile->ucontext)
836 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
839 rdma_umap_priv_init(priv, vma);
841 up_read(&ufile->hw_destroy_rwsem);
845 up_read(&ufile->hw_destroy_rwsem);
848 * We can't allow the VMA to be created with the actual IO pages, that
849 * would break our API contract, and it can't be stopped at this
852 vma->vm_private_data = NULL;
853 zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
856 static void rdma_umap_close(struct vm_area_struct *vma)
858 struct ib_uverbs_file *ufile = vma->vm_file->private_data;
859 struct rdma_umap_priv *priv = vma->vm_private_data;
865 * The vma holds a reference on the struct file that created it, which
866 * in turn means that the ib_uverbs_file is guaranteed to exist at
869 mutex_lock(&ufile->umap_lock);
870 list_del(&priv->list);
871 mutex_unlock(&ufile->umap_lock);
875 static const struct vm_operations_struct rdma_umap_ops = {
876 .open = rdma_umap_open,
877 .close = rdma_umap_close,
880 static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
881 struct vm_area_struct *vma,
884 struct ib_uverbs_file *ufile = ucontext->ufile;
885 struct rdma_umap_priv *priv;
887 if (vma->vm_end - vma->vm_start != size)
888 return ERR_PTR(-EINVAL);
890 /* Driver is using this wrong, must be called by ib_uverbs_mmap */
891 if (WARN_ON(!vma->vm_file ||
892 vma->vm_file->private_data != ufile))
893 return ERR_PTR(-EINVAL);
894 lockdep_assert_held(&ufile->device->disassociate_srcu);
896 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
898 return ERR_PTR(-ENOMEM);
903 * Map IO memory into a process. This is to be called by drivers as part of
904 * their mmap() functions if they wish to send something like PCI-E BAR memory
907 int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
908 unsigned long pfn, unsigned long size, pgprot_t prot)
910 struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size);
913 return PTR_ERR(priv);
915 vma->vm_page_prot = prot;
916 if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
921 rdma_umap_priv_init(priv, vma);
924 EXPORT_SYMBOL(rdma_user_mmap_io);
927 * The page case is here for a slightly different reason, the driver expects
928 * to be able to free the page it is sharing to user space when it destroys
929 * its ucontext, which means we need to zap the user space references.
931 * We could handle this differently by providing an API to allocate a shared
932 * page and then only freeing the shared page when the last ufile is
935 int rdma_user_mmap_page(struct ib_ucontext *ucontext,
936 struct vm_area_struct *vma, struct page *page,
939 struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size);
942 return PTR_ERR(priv);
944 if (remap_pfn_range(vma, vma->vm_start, page_to_pfn(page), size,
945 vma->vm_page_prot)) {
950 rdma_umap_priv_init(priv, vma);
953 EXPORT_SYMBOL(rdma_user_mmap_page);
955 void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
957 struct rdma_umap_priv *priv, *next_priv;
959 lockdep_assert_held(&ufile->hw_destroy_rwsem);
962 struct mm_struct *mm = NULL;
964 /* Get an arbitrary mm pointer that hasn't been cleaned yet */
965 mutex_lock(&ufile->umap_lock);
966 if (!list_empty(&ufile->umaps)) {
967 mm = list_first_entry(&ufile->umaps,
968 struct rdma_umap_priv, list)
972 mutex_unlock(&ufile->umap_lock);
977 * The umap_lock is nested under mmap_sem since it used within
978 * the vma_ops callbacks, so we have to clean the list one mm
979 * at a time to get the lock ordering right. Typically there
980 * will only be one mm, so no big deal.
982 down_write(&mm->mmap_sem);
983 mutex_lock(&ufile->umap_lock);
984 list_for_each_entry_safe (priv, next_priv, &ufile->umaps,
986 struct vm_area_struct *vma = priv->vma;
988 if (vma->vm_mm != mm)
990 list_del_init(&priv->list);
992 zap_vma_ptes(vma, vma->vm_start,
993 vma->vm_end - vma->vm_start);
994 vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
996 mutex_unlock(&ufile->umap_lock);
997 up_write(&mm->mmap_sem);
1003 * ib_uverbs_open() does not need the BKL:
1005 * - the ib_uverbs_device structures are properly reference counted and
1006 * everything else is purely local to the file being created, so
1007 * races against other open calls are not a problem;
1008 * - there is no ioctl method to race against;
1009 * - the open method will either immediately run -ENXIO, or all
1010 * required initialization will be done.
1012 static int ib_uverbs_open(struct inode *inode, struct file *filp)
1014 struct ib_uverbs_device *dev;
1015 struct ib_uverbs_file *file;
1016 struct ib_device *ib_dev;
1018 int module_dependent;
1021 dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
1022 if (!atomic_inc_not_zero(&dev->refcount))
1025 get_device(&dev->dev);
1026 srcu_key = srcu_read_lock(&dev->disassociate_srcu);
1027 mutex_lock(&dev->lists_mutex);
1028 ib_dev = srcu_dereference(dev->ib_dev,
1029 &dev->disassociate_srcu);
1035 /* In case IB device supports disassociate ucontext, there is no hard
1036 * dependency between uverbs device and its low level device.
1038 module_dependent = !(ib_dev->ops.disassociate_ucontext);
1040 if (module_dependent) {
1041 if (!try_module_get(ib_dev->owner)) {
1047 file = kzalloc(sizeof(*file), GFP_KERNEL);
1050 if (module_dependent)
1057 kref_init(&file->ref);
1058 mutex_init(&file->ucontext_lock);
1060 spin_lock_init(&file->uobjects_lock);
1061 INIT_LIST_HEAD(&file->uobjects);
1062 init_rwsem(&file->hw_destroy_rwsem);
1063 mutex_init(&file->umap_lock);
1064 INIT_LIST_HEAD(&file->umaps);
1066 filp->private_data = file;
1067 list_add_tail(&file->list, &dev->uverbs_file_list);
1068 mutex_unlock(&dev->lists_mutex);
1069 srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1071 setup_ufile_idr_uobject(file);
1073 return nonseekable_open(inode, filp);
1076 module_put(ib_dev->owner);
1079 mutex_unlock(&dev->lists_mutex);
1080 srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1081 if (atomic_dec_and_test(&dev->refcount))
1082 ib_uverbs_comp_dev(dev);
1084 put_device(&dev->dev);
1088 static int ib_uverbs_close(struct inode *inode, struct file *filp)
1090 struct ib_uverbs_file *file = filp->private_data;
1092 uverbs_destroy_ufile_hw(file, RDMA_REMOVE_CLOSE);
1094 mutex_lock(&file->device->lists_mutex);
1095 list_del_init(&file->list);
1096 mutex_unlock(&file->device->lists_mutex);
1098 if (file->async_file)
1099 kref_put(&file->async_file->ref,
1100 ib_uverbs_release_async_event_file);
1102 kref_put(&file->ref, ib_uverbs_release_file);
1107 static const struct file_operations uverbs_fops = {
1108 .owner = THIS_MODULE,
1109 .write = ib_uverbs_write,
1110 .open = ib_uverbs_open,
1111 .release = ib_uverbs_close,
1112 .llseek = no_llseek,
1113 .unlocked_ioctl = ib_uverbs_ioctl,
1114 .compat_ioctl = ib_uverbs_ioctl,
1117 static const struct file_operations uverbs_mmap_fops = {
1118 .owner = THIS_MODULE,
1119 .write = ib_uverbs_write,
1120 .mmap = ib_uverbs_mmap,
1121 .open = ib_uverbs_open,
1122 .release = ib_uverbs_close,
1123 .llseek = no_llseek,
1124 .unlocked_ioctl = ib_uverbs_ioctl,
1125 .compat_ioctl = ib_uverbs_ioctl,
1128 static struct ib_client uverbs_client = {
1130 .add = ib_uverbs_add_one,
1131 .remove = ib_uverbs_remove_one
1134 static ssize_t ibdev_show(struct device *device, struct device_attribute *attr,
1137 struct ib_uverbs_device *dev =
1138 container_of(device, struct ib_uverbs_device, dev);
1141 struct ib_device *ib_dev;
1143 srcu_key = srcu_read_lock(&dev->disassociate_srcu);
1144 ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
1146 ret = sprintf(buf, "%s\n", dev_name(&ib_dev->dev));
1147 srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1151 static DEVICE_ATTR_RO(ibdev);
1153 static ssize_t abi_version_show(struct device *device,
1154 struct device_attribute *attr, char *buf)
1156 struct ib_uverbs_device *dev =
1157 container_of(device, struct ib_uverbs_device, dev);
1160 struct ib_device *ib_dev;
1162 srcu_key = srcu_read_lock(&dev->disassociate_srcu);
1163 ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
1165 ret = sprintf(buf, "%d\n", ib_dev->uverbs_abi_ver);
1166 srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1170 static DEVICE_ATTR_RO(abi_version);
1172 static struct attribute *ib_dev_attrs[] = {
1173 &dev_attr_abi_version.attr,
1174 &dev_attr_ibdev.attr,
1178 static const struct attribute_group dev_attr_group = {
1179 .attrs = ib_dev_attrs,
1182 static CLASS_ATTR_STRING(abi_version, S_IRUGO,
1183 __stringify(IB_USER_VERBS_ABI_VERSION));
1185 static int ib_uverbs_create_uapi(struct ib_device *device,
1186 struct ib_uverbs_device *uverbs_dev)
1188 struct uverbs_api *uapi;
1190 uapi = uverbs_alloc_api(device);
1192 return PTR_ERR(uapi);
1194 uverbs_dev->uapi = uapi;
1198 static void ib_uverbs_add_one(struct ib_device *device)
1202 struct ib_uverbs_device *uverbs_dev;
1205 if (!device->ops.alloc_ucontext)
1208 uverbs_dev = kzalloc(sizeof(*uverbs_dev), GFP_KERNEL);
1212 ret = init_srcu_struct(&uverbs_dev->disassociate_srcu);
1218 device_initialize(&uverbs_dev->dev);
1219 uverbs_dev->dev.class = uverbs_class;
1220 uverbs_dev->dev.parent = device->dev.parent;
1221 uverbs_dev->dev.release = ib_uverbs_release_dev;
1222 uverbs_dev->groups[0] = &dev_attr_group;
1223 uverbs_dev->dev.groups = uverbs_dev->groups;
1224 atomic_set(&uverbs_dev->refcount, 1);
1225 init_completion(&uverbs_dev->comp);
1226 uverbs_dev->xrcd_tree = RB_ROOT;
1227 mutex_init(&uverbs_dev->xrcd_tree_mutex);
1228 mutex_init(&uverbs_dev->lists_mutex);
1229 INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list);
1230 INIT_LIST_HEAD(&uverbs_dev->uverbs_events_file_list);
1231 rcu_assign_pointer(uverbs_dev->ib_dev, device);
1232 uverbs_dev->num_comp_vectors = device->num_comp_vectors;
1234 devnum = ida_alloc_max(&uverbs_ida, IB_UVERBS_MAX_DEVICES - 1,
1238 uverbs_dev->devnum = devnum;
1239 if (devnum >= IB_UVERBS_NUM_FIXED_MINOR)
1240 base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR;
1242 base = IB_UVERBS_BASE_DEV + devnum;
1244 if (ib_uverbs_create_uapi(device, uverbs_dev))
1247 uverbs_dev->dev.devt = base;
1248 dev_set_name(&uverbs_dev->dev, "uverbs%d", uverbs_dev->devnum);
1250 cdev_init(&uverbs_dev->cdev,
1251 device->ops.mmap ? &uverbs_mmap_fops : &uverbs_fops);
1252 uverbs_dev->cdev.owner = THIS_MODULE;
1254 ret = cdev_device_add(&uverbs_dev->cdev, &uverbs_dev->dev);
1258 ib_set_client_data(device, &uverbs_client, uverbs_dev);
1262 ida_free(&uverbs_ida, devnum);
1264 if (atomic_dec_and_test(&uverbs_dev->refcount))
1265 ib_uverbs_comp_dev(uverbs_dev);
1266 wait_for_completion(&uverbs_dev->comp);
1267 put_device(&uverbs_dev->dev);
1271 static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
1272 struct ib_device *ib_dev)
1274 struct ib_uverbs_file *file;
1275 struct ib_uverbs_async_event_file *event_file;
1276 struct ib_event event;
1278 /* Pending running commands to terminate */
1279 uverbs_disassociate_api_pre(uverbs_dev);
1280 event.event = IB_EVENT_DEVICE_FATAL;
1281 event.element.port_num = 0;
1282 event.device = ib_dev;
1284 mutex_lock(&uverbs_dev->lists_mutex);
1285 while (!list_empty(&uverbs_dev->uverbs_file_list)) {
1286 file = list_first_entry(&uverbs_dev->uverbs_file_list,
1287 struct ib_uverbs_file, list);
1288 list_del_init(&file->list);
1289 kref_get(&file->ref);
1291 /* We must release the mutex before going ahead and calling
1292 * uverbs_cleanup_ufile, as it might end up indirectly calling
1293 * uverbs_close, for example due to freeing the resources (e.g
1296 mutex_unlock(&uverbs_dev->lists_mutex);
1298 ib_uverbs_event_handler(&file->event_handler, &event);
1299 uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE);
1300 kref_put(&file->ref, ib_uverbs_release_file);
1302 mutex_lock(&uverbs_dev->lists_mutex);
1305 while (!list_empty(&uverbs_dev->uverbs_events_file_list)) {
1306 event_file = list_first_entry(&uverbs_dev->
1307 uverbs_events_file_list,
1308 struct ib_uverbs_async_event_file,
1310 spin_lock_irq(&event_file->ev_queue.lock);
1311 event_file->ev_queue.is_closed = 1;
1312 spin_unlock_irq(&event_file->ev_queue.lock);
1314 list_del(&event_file->list);
1315 ib_unregister_event_handler(
1316 &event_file->uverbs_file->event_handler);
1317 event_file->uverbs_file->event_handler.device =
1320 wake_up_interruptible(&event_file->ev_queue.poll_wait);
1321 kill_fasync(&event_file->ev_queue.async_queue, SIGIO, POLL_IN);
1323 mutex_unlock(&uverbs_dev->lists_mutex);
1325 uverbs_disassociate_api(uverbs_dev->uapi);
1328 static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
1330 struct ib_uverbs_device *uverbs_dev = client_data;
1331 int wait_clients = 1;
1336 cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev);
1337 ida_free(&uverbs_ida, uverbs_dev->devnum);
1339 if (device->ops.disassociate_ucontext) {
1340 /* We disassociate HW resources and immediately return.
1341 * Userspace will see a EIO errno for all future access.
1342 * Upon returning, ib_device may be freed internally and is not
1344 * uverbs_device is still available until all clients close
1345 * their files, then the uverbs device ref count will be zero
1346 * and its resources will be freed.
1347 * Note: At this point no more files can be opened since the
1348 * cdev was deleted, however active clients can still issue
1349 * commands and close their open files.
1351 ib_uverbs_free_hw_resources(uverbs_dev, device);
1355 if (atomic_dec_and_test(&uverbs_dev->refcount))
1356 ib_uverbs_comp_dev(uverbs_dev);
1358 wait_for_completion(&uverbs_dev->comp);
1360 put_device(&uverbs_dev->dev);
1363 static char *uverbs_devnode(struct device *dev, umode_t *mode)
1367 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
1370 static int __init ib_uverbs_init(void)
1374 ret = register_chrdev_region(IB_UVERBS_BASE_DEV,
1375 IB_UVERBS_NUM_FIXED_MINOR,
1376 "infiniband_verbs");
1378 pr_err("user_verbs: couldn't register device number\n");
1382 ret = alloc_chrdev_region(&dynamic_uverbs_dev, 0,
1383 IB_UVERBS_NUM_DYNAMIC_MINOR,
1384 "infiniband_verbs");
1386 pr_err("couldn't register dynamic device number\n");
1390 uverbs_class = class_create(THIS_MODULE, "infiniband_verbs");
1391 if (IS_ERR(uverbs_class)) {
1392 ret = PTR_ERR(uverbs_class);
1393 pr_err("user_verbs: couldn't create class infiniband_verbs\n");
1397 uverbs_class->devnode = uverbs_devnode;
1399 ret = class_create_file(uverbs_class, &class_attr_abi_version.attr);
1401 pr_err("user_verbs: couldn't create abi_version attribute\n");
1405 ret = ib_register_client(&uverbs_client);
1407 pr_err("user_verbs: couldn't register client\n");
1414 class_destroy(uverbs_class);
1417 unregister_chrdev_region(dynamic_uverbs_dev,
1418 IB_UVERBS_NUM_DYNAMIC_MINOR);
1421 unregister_chrdev_region(IB_UVERBS_BASE_DEV,
1422 IB_UVERBS_NUM_FIXED_MINOR);
1428 static void __exit ib_uverbs_cleanup(void)
1430 ib_unregister_client(&uverbs_client);
1431 class_destroy(uverbs_class);
1432 unregister_chrdev_region(IB_UVERBS_BASE_DEV,
1433 IB_UVERBS_NUM_FIXED_MINOR);
1434 unregister_chrdev_region(dynamic_uverbs_dev,
1435 IB_UVERBS_NUM_DYNAMIC_MINOR);
1438 module_init(ib_uverbs_init);
1439 module_exit(ib_uverbs_cleanup);