2 * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/rdma_user_ioctl.h>
34 #include <rdma/uverbs_ioctl.h>
35 #include "rdma_core.h"
38 struct bundle_alloc_head {
39 struct bundle_alloc_head *next;
45 struct bundle_alloc_head alloc_head;
46 struct bundle_alloc_head *allocated_mem;
47 size_t internal_avail;
50 struct radix_tree_root *radix;
51 const struct uverbs_api_ioctl_method *method_elm;
52 void __rcu **radix_slots;
53 unsigned long radix_slots_len;
56 struct ib_uverbs_attr __user *user_attrs;
57 struct ib_uverbs_attr *uattrs;
59 DECLARE_BITMAP(uobj_finalize, UVERBS_API_ATTR_BKEY_LEN);
60 DECLARE_BITMAP(spec_finalize, UVERBS_API_ATTR_BKEY_LEN);
63 * Must be last. bundle ends in a flex array which overlaps
66 struct uverbs_attr_bundle bundle;
67 u64 internal_buffer[32];
71 * Each method has an absolute minimum amount of memory it needs to allocate,
72 * precompute that amount and determine if the onstack memory can be used or
73 * if allocation is need.
75 void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm,
76 unsigned int num_attrs)
78 struct bundle_priv *pbundle;
80 offsetof(struct bundle_priv, internal_buffer) +
81 sizeof(*pbundle->bundle.attrs) * method_elm->key_bitmap_len +
82 sizeof(*pbundle->uattrs) * num_attrs;
84 method_elm->use_stack = bundle_size <= sizeof(*pbundle);
85 method_elm->bundle_size =
86 ALIGN(bundle_size + 256, sizeof(*pbundle->internal_buffer));
88 /* Do not want order-2 allocations for this. */
89 WARN_ON_ONCE(method_elm->bundle_size > PAGE_SIZE);
93 * uverbs_alloc() - Quickly allocate memory for use with a bundle
95 * @size: Number of bytes to allocate
96 * @flags: Allocator flags
98 * The bundle allocator is intended for allocations that are connected with
99 * processing the system call related to the bundle. The allocated memory is
100 * always freed once the system call completes, and cannot be freed any other
103 * This tries to use a small pool of pre-allocated memory for performance.
105 __malloc void *_uverbs_alloc(struct uverbs_attr_bundle *bundle, size_t size,
108 struct bundle_priv *pbundle =
109 container_of(bundle, struct bundle_priv, bundle);
113 if (check_add_overflow(size, pbundle->internal_used, &new_used))
114 return ERR_PTR(-EOVERFLOW);
116 if (new_used > pbundle->internal_avail) {
117 struct bundle_alloc_head *buf;
119 buf = kvmalloc(struct_size(buf, data, size), flags);
121 return ERR_PTR(-ENOMEM);
122 buf->next = pbundle->allocated_mem;
123 pbundle->allocated_mem = buf;
127 res = (void *)pbundle->internal_buffer + pbundle->internal_used;
128 pbundle->internal_used =
129 ALIGN(new_used, sizeof(*pbundle->internal_buffer));
130 if (flags & __GFP_ZERO)
131 memset(res, 0, size);
134 EXPORT_SYMBOL(_uverbs_alloc);
136 static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr,
139 if (uattr->len > sizeof(((struct ib_uverbs_attr *)0)->data))
140 return ib_is_buffer_cleared(u64_to_user_ptr(uattr->data) + len,
143 return !memchr_inv((const void *)&uattr->data + len,
144 0, uattr->len - len);
147 static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
148 const struct uverbs_api_attr *attr_uapi,
149 struct uverbs_objs_arr_attr *attr,
150 struct ib_uverbs_attr *uattr,
153 const struct uverbs_attr_spec *spec = &attr_uapi->spec;
159 if (uattr->attr_data.reserved)
162 if (uattr->len % sizeof(u32))
165 array_len = uattr->len / sizeof(u32);
166 if (array_len < spec->u2.objs_arr.min_len ||
167 array_len > spec->u2.objs_arr.max_len)
171 uverbs_alloc(&pbundle->bundle,
172 array_size(array_len, sizeof(*attr->uobjects)));
173 if (IS_ERR(attr->uobjects))
174 return PTR_ERR(attr->uobjects);
177 * Since idr is 4B and *uobjects is >= 4B, we can use attr->uobjects
178 * to store idrs array and avoid additional memory allocation. The
179 * idrs array is offset to the end of the uobjects array so we will be
180 * able to read idr and replace with a pointer.
182 idr_vals = (u32 *)(attr->uobjects + array_len) - array_len;
184 if (uattr->len > sizeof(uattr->data)) {
185 ret = copy_from_user(idr_vals, u64_to_user_ptr(uattr->data),
190 memcpy(idr_vals, &uattr->data, uattr->len);
193 for (i = 0; i != array_len; i++) {
194 attr->uobjects[i] = uverbs_get_uobject_from_file(
195 spec->u2.objs_arr.obj_type, pbundle->bundle.ufile,
196 spec->u2.objs_arr.access, idr_vals[i]);
197 if (IS_ERR(attr->uobjects[i])) {
198 ret = PTR_ERR(attr->uobjects[i]);
204 __set_bit(attr_bkey, pbundle->spec_finalize);
208 static int uverbs_free_idrs_array(const struct uverbs_api_attr *attr_uapi,
209 struct uverbs_objs_arr_attr *attr,
212 const struct uverbs_attr_spec *spec = &attr_uapi->spec;
217 for (i = 0; i != attr->len; i++) {
218 current_ret = uverbs_finalize_object(
219 attr->uobjects[i], spec->u2.objs_arr.access, commit);
227 static int uverbs_process_attr(struct bundle_priv *pbundle,
228 const struct uverbs_api_attr *attr_uapi,
229 struct ib_uverbs_attr *uattr, u32 attr_bkey)
231 const struct uverbs_attr_spec *spec = &attr_uapi->spec;
232 struct uverbs_attr *e = &pbundle->bundle.attrs[attr_bkey];
233 const struct uverbs_attr_spec *val_spec = spec;
234 struct uverbs_obj_attr *o_attr;
236 switch (spec->type) {
237 case UVERBS_ATTR_TYPE_ENUM_IN:
238 if (uattr->attr_data.enum_data.elem_id >= spec->u.enum_def.num_elems)
241 if (uattr->attr_data.enum_data.reserved)
244 val_spec = &spec->u2.enum_def.ids[uattr->attr_data.enum_data.elem_id];
246 /* Currently we only support PTR_IN based enums */
247 if (val_spec->type != UVERBS_ATTR_TYPE_PTR_IN)
250 e->ptr_attr.enum_id = uattr->attr_data.enum_data.elem_id;
252 case UVERBS_ATTR_TYPE_PTR_IN:
253 /* Ensure that any data provided by userspace beyond the known
254 * struct is zero. Userspace that knows how to use some future
255 * longer struct will fail here if used with an old kernel and
256 * non-zero content, making ABI compat/discovery simpler.
258 if (uattr->len > val_spec->u.ptr.len &&
259 val_spec->zero_trailing &&
260 !uverbs_is_attr_cleared(uattr, val_spec->u.ptr.len))
264 case UVERBS_ATTR_TYPE_PTR_OUT:
265 if (uattr->len < val_spec->u.ptr.min_len ||
266 (!val_spec->zero_trailing &&
267 uattr->len > val_spec->u.ptr.len))
270 if (spec->type != UVERBS_ATTR_TYPE_ENUM_IN &&
271 uattr->attr_data.reserved)
274 e->ptr_attr.uattr_idx = uattr - pbundle->uattrs;
275 e->ptr_attr.len = uattr->len;
277 if (val_spec->alloc_and_copy && !uverbs_attr_ptr_is_inline(e)) {
280 p = uverbs_alloc(&pbundle->bundle, uattr->len);
286 if (copy_from_user(p, u64_to_user_ptr(uattr->data),
290 e->ptr_attr.data = uattr->data;
294 case UVERBS_ATTR_TYPE_IDR:
295 case UVERBS_ATTR_TYPE_FD:
296 if (uattr->attr_data.reserved)
302 o_attr = &e->obj_attr;
303 o_attr->attr_elm = attr_uapi;
306 * The type of uattr->data is u64 for UVERBS_ATTR_TYPE_IDR and
307 * s64 for UVERBS_ATTR_TYPE_FD. We can cast the u64 to s64
308 * here without caring about truncation as we know that the
309 * IDR implementation today rejects negative IDs
311 o_attr->uobject = uverbs_get_uobject_from_file(
312 spec->u.obj.obj_type,
313 pbundle->bundle.ufile,
316 if (IS_ERR(o_attr->uobject))
317 return PTR_ERR(o_attr->uobject);
318 __set_bit(attr_bkey, pbundle->uobj_finalize);
320 if (spec->u.obj.access == UVERBS_ACCESS_NEW) {
321 unsigned int uattr_idx = uattr - pbundle->uattrs;
322 s64 id = o_attr->uobject->id;
324 /* Copy the allocated id to the user-space */
325 if (put_user(id, &pbundle->user_attrs[uattr_idx].data))
331 case UVERBS_ATTR_TYPE_IDRS_ARRAY:
332 return uverbs_process_idrs_array(pbundle, attr_uapi,
333 &e->objs_arr_attr, uattr,
343 * We search the radix tree with the method prefix and now we want to fast
344 * search the suffix bits to get a particular attribute pointer. It is not
345 * totally clear to me if this breaks the radix tree encasulation or not, but
346 * it uses the iter data to determine if the method iter points at the same
347 * chunk that will store the attribute, if so it just derefs it directly. By
348 * construction in most kernel configs the method and attrs will all fit in a
349 * single radix chunk, so in most cases this will have no search. Other cases
350 * this falls back to a full search.
352 static void __rcu **uapi_get_attr_for_method(struct bundle_priv *pbundle,
357 if (likely(attr_key < pbundle->radix_slots_len)) {
360 slot = pbundle->radix_slots + attr_key;
361 entry = rcu_dereference_raw(*slot);
362 if (likely(!radix_tree_is_internal_node(entry) && entry))
366 return radix_tree_lookup_slot(pbundle->radix,
367 pbundle->method_key | attr_key);
370 static int uverbs_set_attr(struct bundle_priv *pbundle,
371 struct ib_uverbs_attr *uattr)
373 u32 attr_key = uapi_key_attr(uattr->attr_id);
374 u32 attr_bkey = uapi_bkey_attr(attr_key);
375 const struct uverbs_api_attr *attr;
379 slot = uapi_get_attr_for_method(pbundle, attr_key);
382 * Kernel does not support the attribute but user-space says it
385 if (uattr->flags & UVERBS_ATTR_F_MANDATORY)
386 return -EPROTONOSUPPORT;
389 attr = rcu_dereference_protected(*slot, true);
391 /* Reject duplicate attributes from user-space */
392 if (test_bit(attr_bkey, pbundle->bundle.attr_present))
395 ret = uverbs_process_attr(pbundle, attr, uattr, attr_bkey);
399 __set_bit(attr_bkey, pbundle->bundle.attr_present);
404 static int ib_uverbs_run_method(struct bundle_priv *pbundle,
405 unsigned int num_attrs)
407 int (*handler)(struct ib_uverbs_file *ufile,
408 struct uverbs_attr_bundle *ctx);
409 size_t uattrs_size = array_size(sizeof(*pbundle->uattrs), num_attrs);
410 unsigned int destroy_bkey = pbundle->method_elm->destroy_bkey;
414 /* See uverbs_disassociate_api() */
415 handler = srcu_dereference(
416 pbundle->method_elm->handler,
417 &pbundle->bundle.ufile->device->disassociate_srcu);
421 pbundle->uattrs = uverbs_alloc(&pbundle->bundle, uattrs_size);
422 if (IS_ERR(pbundle->uattrs))
423 return PTR_ERR(pbundle->uattrs);
424 if (copy_from_user(pbundle->uattrs, pbundle->user_attrs, uattrs_size))
427 for (i = 0; i != num_attrs; i++) {
428 ret = uverbs_set_attr(pbundle, &pbundle->uattrs[i]);
433 /* User space did not provide all the mandatory attributes */
434 if (unlikely(!bitmap_subset(pbundle->method_elm->attr_mandatory,
435 pbundle->bundle.attr_present,
436 pbundle->method_elm->key_bitmap_len)))
439 if (destroy_bkey != UVERBS_API_ATTR_BKEY_LEN) {
440 struct uverbs_obj_attr *destroy_attr =
441 &pbundle->bundle.attrs[destroy_bkey].obj_attr;
443 ret = uobj_destroy(destroy_attr->uobject);
446 __clear_bit(destroy_bkey, pbundle->uobj_finalize);
448 ret = handler(pbundle->bundle.ufile, &pbundle->bundle);
449 uobj_put_destroy(destroy_attr->uobject);
451 ret = handler(pbundle->bundle.ufile, &pbundle->bundle);
455 * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can
456 * not invoke the method because the request is not supported. No
457 * other cases should return this code.
459 if (WARN_ON_ONCE(ret == -EPROTONOSUPPORT))
465 static int bundle_destroy(struct bundle_priv *pbundle, bool commit)
467 unsigned int key_bitmap_len = pbundle->method_elm->key_bitmap_len;
468 struct bundle_alloc_head *memblock;
472 /* fast path for simple uobjects */
474 while ((i = find_next_bit(pbundle->uobj_finalize, key_bitmap_len,
475 i + 1)) < key_bitmap_len) {
476 struct uverbs_attr *attr = &pbundle->bundle.attrs[i];
479 current_ret = uverbs_finalize_object(
480 attr->obj_attr.uobject,
481 attr->obj_attr.attr_elm->spec.u.obj.access, commit);
487 while ((i = find_next_bit(pbundle->spec_finalize, key_bitmap_len,
488 i + 1)) < key_bitmap_len) {
489 struct uverbs_attr *attr = &pbundle->bundle.attrs[i];
490 const struct uverbs_api_attr *attr_uapi;
494 slot = uapi_get_attr_for_method(
496 pbundle->method_key | uapi_bkey_to_key_attr(i));
500 attr_uapi = rcu_dereference_protected(*slot, true);
502 if (attr_uapi->spec.type == UVERBS_ATTR_TYPE_IDRS_ARRAY) {
503 current_ret = uverbs_free_idrs_array(
504 attr_uapi, &attr->objs_arr_attr, commit);
510 for (memblock = pbundle->allocated_mem; memblock;) {
511 struct bundle_alloc_head *tmp = memblock;
513 memblock = memblock->next;
520 static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
521 struct ib_uverbs_ioctl_hdr *hdr,
522 struct ib_uverbs_attr __user *user_attrs)
524 const struct uverbs_api_ioctl_method *method_elm;
525 struct uverbs_api *uapi = ufile->device->uapi;
526 struct radix_tree_iter attrs_iter;
527 struct bundle_priv *pbundle;
528 struct bundle_priv onstack;
533 if (unlikely(hdr->driver_id != uapi->driver_id))
536 slot = radix_tree_iter_lookup(
537 &uapi->radix, &attrs_iter,
538 uapi_key_obj(hdr->object_id) |
539 uapi_key_ioctl_method(hdr->method_id));
541 return -EPROTONOSUPPORT;
542 method_elm = rcu_dereference_protected(*slot, true);
544 if (!method_elm->use_stack) {
545 pbundle = kmalloc(method_elm->bundle_size, GFP_KERNEL);
548 pbundle->internal_avail =
549 method_elm->bundle_size -
550 offsetof(struct bundle_priv, internal_buffer);
551 pbundle->alloc_head.next = NULL;
552 pbundle->allocated_mem = &pbundle->alloc_head;
555 pbundle->internal_avail = sizeof(pbundle->internal_buffer);
556 pbundle->allocated_mem = NULL;
559 /* Space for the pbundle->bundle.attrs flex array */
560 pbundle->method_elm = method_elm;
561 pbundle->method_key = attrs_iter.index;
562 pbundle->bundle.ufile = ufile;
563 pbundle->radix = &uapi->radix;
564 pbundle->radix_slots = slot;
565 pbundle->radix_slots_len = radix_tree_chunk_size(&attrs_iter);
566 pbundle->user_attrs = user_attrs;
568 pbundle->internal_used = ALIGN(pbundle->method_elm->key_bitmap_len *
569 sizeof(*pbundle->bundle.attrs),
570 sizeof(*pbundle->internal_buffer));
571 memset(pbundle->bundle.attr_present, 0,
572 sizeof(pbundle->bundle.attr_present));
573 memset(pbundle->uobj_finalize, 0, sizeof(pbundle->uobj_finalize));
574 memset(pbundle->spec_finalize, 0, sizeof(pbundle->spec_finalize));
576 ret = ib_uverbs_run_method(pbundle, hdr->num_attrs);
577 destroy_ret = bundle_destroy(pbundle, ret == 0);
578 if (unlikely(destroy_ret && !ret))
584 long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
586 struct ib_uverbs_file *file = filp->private_data;
587 struct ib_uverbs_ioctl_hdr __user *user_hdr =
588 (struct ib_uverbs_ioctl_hdr __user *)arg;
589 struct ib_uverbs_ioctl_hdr hdr;
593 if (unlikely(cmd != RDMA_VERBS_IOCTL))
596 err = copy_from_user(&hdr, user_hdr, sizeof(hdr));
600 if (hdr.length > PAGE_SIZE ||
601 hdr.length != struct_size(&hdr, attrs, hdr.num_attrs))
604 if (hdr.reserved1 || hdr.reserved2)
605 return -EPROTONOSUPPORT;
607 srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
608 err = ib_uverbs_cmd_verbs(file, &hdr, user_hdr->attrs);
609 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
613 int uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle,
614 size_t idx, u64 allowed_bits)
616 const struct uverbs_attr *attr;
619 attr = uverbs_attr_get(attrs_bundle, idx);
620 /* Missing attribute means 0 flags */
627 * New userspace code should use 8 bytes to pass flags, but we
628 * transparently support old userspaces that were using 4 bytes as
631 if (attr->ptr_attr.len == 8)
632 flags = attr->ptr_attr.data;
633 else if (attr->ptr_attr.len == 4)
634 flags = *(u32 *)&attr->ptr_attr.data;
638 if (flags & ~allowed_bits)
644 EXPORT_SYMBOL(uverbs_get_flags64);
646 int uverbs_get_flags32(u32 *to, const struct uverbs_attr_bundle *attrs_bundle,
647 size_t idx, u64 allowed_bits)
652 ret = uverbs_get_flags64(&flags, attrs_bundle, idx, allowed_bits);
662 EXPORT_SYMBOL(uverbs_get_flags32);
665 * This is for ease of conversion. The purpose is to convert all drivers to
666 * use uverbs_attr_bundle instead of ib_udata. Assume attr == 0 is input and
667 * attr == 1 is output.
669 void create_udata(struct uverbs_attr_bundle *bundle, struct ib_udata *udata)
671 struct bundle_priv *pbundle =
672 container_of(bundle, struct bundle_priv, bundle);
673 const struct uverbs_attr *uhw_in =
674 uverbs_attr_get(bundle, UVERBS_ATTR_UHW_IN);
675 const struct uverbs_attr *uhw_out =
676 uverbs_attr_get(bundle, UVERBS_ATTR_UHW_OUT);
678 if (!IS_ERR(uhw_in)) {
679 udata->inlen = uhw_in->ptr_attr.len;
680 if (uverbs_attr_ptr_is_inline(uhw_in))
682 &pbundle->user_attrs[uhw_in->ptr_attr.uattr_idx]
685 udata->inbuf = u64_to_user_ptr(uhw_in->ptr_attr.data);
691 if (!IS_ERR(uhw_out)) {
692 udata->outbuf = u64_to_user_ptr(uhw_out->ptr_attr.data);
693 udata->outlen = uhw_out->ptr_attr.len;
695 udata->outbuf = NULL;
700 int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx,
701 const void *from, size_t size)
703 struct bundle_priv *pbundle =
704 container_of(bundle, struct bundle_priv, bundle);
705 const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
710 return PTR_ERR(attr);
712 min_size = min_t(size_t, attr->ptr_attr.len, size);
713 if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size))
716 flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags |
717 UVERBS_ATTR_F_VALID_OUTPUT;
719 &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags))
724 EXPORT_SYMBOL(uverbs_copy_to);
726 int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
727 size_t idx, s64 lower_bound, u64 upper_bound,
730 const struct uverbs_attr *attr;
732 attr = uverbs_attr_get(attrs_bundle, idx);
734 if ((PTR_ERR(attr) != -ENOENT) || !def_val)
735 return PTR_ERR(attr);
739 *to = attr->ptr_attr.data;
742 if (*to < lower_bound || (*to > 0 && (u64)*to > upper_bound))
747 EXPORT_SYMBOL(_uverbs_get_const);