1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
6 #include <rdma/ib_user_verbs.h>
7 #include <rdma/ib_verbs.h>
8 #include <rdma/uverbs_types.h>
9 #include <rdma/uverbs_ioctl.h>
10 #include <rdma/mlx5_user_ioctl_cmds.h>
11 #include <rdma/mlx5_user_ioctl_verbs.h>
12 #include <rdma/ib_umem.h>
13 #include <rdma/uverbs_std_types.h>
14 #include <linux/mlx5/driver.h>
15 #include <linux/mlx5/fs.h>
17 #include <linux/xarray.h>
19 #define UVERBS_MODULE_NAME mlx5_ib
20 #include <rdma/uverbs_named_ioctl.h>
22 static void dispatch_event_fd(struct list_head *fd_list, const void *data);
25 DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
26 DEVX_OBJ_FLAGS_DCT = 1 << 1,
27 DEVX_OBJ_FLAGS_CQ = 1 << 2,
30 struct devx_async_data {
31 struct mlx5_ib_dev *mdev;
32 struct list_head list;
33 struct ib_uobject *fd_uobj;
34 struct mlx5_async_work cb_work;
36 /* must be last field in this structure */
37 struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
40 struct devx_async_event_data {
41 struct list_head list; /* headed in ev_file->event_list */
42 struct mlx5_ib_uapi_devx_async_event_hdr hdr;
45 /* first level XA value data structure */
47 struct xarray object_ids; /* second XA level, Key = object id */
48 struct list_head unaffiliated_list;
51 /* second level XA value data structure */
52 struct devx_obj_event {
54 struct list_head obj_sub_list;
57 struct devx_event_subscription {
58 struct list_head file_list; /* headed in ev_file->
59 * subscribed_events_list
61 struct list_head xa_list; /* headed in devx_event->unaffiliated_list or
62 * devx_obj_event->obj_sub_list
64 struct list_head obj_list; /* headed in devx_object */
65 struct list_head event_list; /* headed in ev_file->event_list or in
66 * temp list via subscription
74 struct devx_async_event_file *ev_file;
75 struct file *filp; /* Upon hot unplug we need a direct access to */
76 struct eventfd_ctx *eventfd;
79 struct devx_async_event_file {
80 struct ib_uobject uobj;
81 /* Head of events that are subscribed to this FD */
82 struct list_head subscribed_events_list;
84 wait_queue_head_t poll_wait;
85 struct list_head event_list;
86 struct mlx5_ib_dev *dev;
92 #define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
94 struct mlx5_ib_dev *ib_dev;
96 u32 dinlen; /* destroy inbox length */
97 u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
100 struct mlx5_ib_devx_mr devx_mr;
101 struct mlx5_core_dct core_dct;
102 struct mlx5_core_cq core_cq;
103 u32 flow_counter_bulk_size;
105 struct list_head event_sub; /* holds devx_event_subscription entries */
109 struct mlx5_core_dev *mdev;
110 struct ib_umem *umem;
115 u32 dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
118 struct devx_umem_reg_cmd {
121 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
124 static struct mlx5_ib_ucontext *
125 devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
127 return to_mucontext(ib_uverbs_get_ucontext(attrs));
130 int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
132 u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
133 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
139 /* 0 means not supported */
140 if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx))
143 uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
144 if (is_user && capable(CAP_NET_RAW) &&
145 (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
146 cap |= MLX5_UCTX_CAP_RAW_TX;
147 if (is_user && capable(CAP_SYS_RAWIO) &&
148 (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
149 MLX5_UCTX_CAP_INTERNAL_DEV_RES))
150 cap |= MLX5_UCTX_CAP_INTERNAL_DEV_RES;
152 MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
153 MLX5_SET(uctx, uctx, cap, cap);
155 err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
159 uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
163 void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
165 u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {0};
166 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
168 MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
169 MLX5_SET(destroy_uctx_in, in, uid, uid);
171 mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
174 bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
176 struct devx_obj *devx_obj = obj;
177 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
180 case MLX5_CMD_OP_DESTROY_TIR:
181 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
182 *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
186 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
187 *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
188 *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
196 bool mlx5_ib_devx_is_flow_counter(void *obj, u32 offset, u32 *counter_id)
198 struct devx_obj *devx_obj = obj;
199 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
201 if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) {
203 if (offset && offset >= devx_obj->flow_counter_bulk_size)
206 *counter_id = MLX5_GET(dealloc_flow_counter_in,
209 *counter_id += offset;
216 static bool is_legacy_unaffiliated_event_num(u16 event_num)
219 case MLX5_EVENT_TYPE_PORT_CHANGE:
226 static bool is_legacy_obj_event_num(u16 event_num)
229 case MLX5_EVENT_TYPE_PATH_MIG:
230 case MLX5_EVENT_TYPE_COMM_EST:
231 case MLX5_EVENT_TYPE_SQ_DRAINED:
232 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
233 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
234 case MLX5_EVENT_TYPE_CQ_ERROR:
235 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
236 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
237 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
238 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
239 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
240 case MLX5_EVENT_TYPE_DCT_DRAINED:
241 case MLX5_EVENT_TYPE_COMP:
242 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
243 case MLX5_EVENT_TYPE_XRQ_ERROR:
250 static u16 get_legacy_obj_type(u16 opcode)
253 case MLX5_CMD_OP_CREATE_RQ:
254 return MLX5_EVENT_QUEUE_TYPE_RQ;
255 case MLX5_CMD_OP_CREATE_QP:
256 return MLX5_EVENT_QUEUE_TYPE_QP;
257 case MLX5_CMD_OP_CREATE_SQ:
258 return MLX5_EVENT_QUEUE_TYPE_SQ;
259 case MLX5_CMD_OP_CREATE_DCT:
260 return MLX5_EVENT_QUEUE_TYPE_DCT;
266 static u16 get_dec_obj_type(struct devx_obj *obj, u16 event_num)
270 opcode = (obj->obj_id >> 32) & 0xffff;
272 if (is_legacy_obj_event_num(event_num))
273 return get_legacy_obj_type(opcode);
276 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
277 return (obj->obj_id >> 48);
278 case MLX5_CMD_OP_CREATE_RQ:
279 return MLX5_OBJ_TYPE_RQ;
280 case MLX5_CMD_OP_CREATE_QP:
281 return MLX5_OBJ_TYPE_QP;
282 case MLX5_CMD_OP_CREATE_SQ:
283 return MLX5_OBJ_TYPE_SQ;
284 case MLX5_CMD_OP_CREATE_DCT:
285 return MLX5_OBJ_TYPE_DCT;
286 case MLX5_CMD_OP_CREATE_TIR:
287 return MLX5_OBJ_TYPE_TIR;
288 case MLX5_CMD_OP_CREATE_TIS:
289 return MLX5_OBJ_TYPE_TIS;
290 case MLX5_CMD_OP_CREATE_PSV:
291 return MLX5_OBJ_TYPE_PSV;
292 case MLX5_OBJ_TYPE_MKEY:
293 return MLX5_OBJ_TYPE_MKEY;
294 case MLX5_CMD_OP_CREATE_RMP:
295 return MLX5_OBJ_TYPE_RMP;
296 case MLX5_CMD_OP_CREATE_XRC_SRQ:
297 return MLX5_OBJ_TYPE_XRC_SRQ;
298 case MLX5_CMD_OP_CREATE_XRQ:
299 return MLX5_OBJ_TYPE_XRQ;
300 case MLX5_CMD_OP_CREATE_RQT:
301 return MLX5_OBJ_TYPE_RQT;
302 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
303 return MLX5_OBJ_TYPE_FLOW_COUNTER;
304 case MLX5_CMD_OP_CREATE_CQ:
305 return MLX5_OBJ_TYPE_CQ;
311 static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe)
313 switch (event_type) {
314 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
315 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
316 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
317 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
318 case MLX5_EVENT_TYPE_PATH_MIG:
319 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
320 case MLX5_EVENT_TYPE_COMM_EST:
321 case MLX5_EVENT_TYPE_SQ_DRAINED:
322 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
323 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
324 return eqe->data.qp_srq.type;
325 case MLX5_EVENT_TYPE_CQ_ERROR:
326 case MLX5_EVENT_TYPE_XRQ_ERROR:
328 case MLX5_EVENT_TYPE_DCT_DRAINED:
329 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
330 return MLX5_EVENT_QUEUE_TYPE_DCT;
332 return MLX5_GET(affiliated_event_header, &eqe->data, obj_type);
336 static u32 get_dec_obj_id(u64 obj_id)
338 return (obj_id & 0xffffffff);
342 * As the obj_id in the firmware is not globally unique the object type
343 * must be considered upon checking for a valid object id.
344 * For that the opcode of the creator command is encoded as part of the obj_id.
346 static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
348 return ((u64)opcode << 32) | obj_id;
351 static u64 devx_get_obj_id(const void *in)
353 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
357 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
358 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
359 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT |
360 MLX5_GET(general_obj_in_cmd_hdr, in,
362 MLX5_GET(general_obj_in_cmd_hdr, in,
365 case MLX5_CMD_OP_QUERY_MKEY:
366 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
367 MLX5_GET(query_mkey_in, in,
370 case MLX5_CMD_OP_QUERY_CQ:
371 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
372 MLX5_GET(query_cq_in, in, cqn));
374 case MLX5_CMD_OP_MODIFY_CQ:
375 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
376 MLX5_GET(modify_cq_in, in, cqn));
378 case MLX5_CMD_OP_QUERY_SQ:
379 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
380 MLX5_GET(query_sq_in, in, sqn));
382 case MLX5_CMD_OP_MODIFY_SQ:
383 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
384 MLX5_GET(modify_sq_in, in, sqn));
386 case MLX5_CMD_OP_QUERY_RQ:
387 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
388 MLX5_GET(query_rq_in, in, rqn));
390 case MLX5_CMD_OP_MODIFY_RQ:
391 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
392 MLX5_GET(modify_rq_in, in, rqn));
394 case MLX5_CMD_OP_QUERY_RMP:
395 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
396 MLX5_GET(query_rmp_in, in, rmpn));
398 case MLX5_CMD_OP_MODIFY_RMP:
399 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
400 MLX5_GET(modify_rmp_in, in, rmpn));
402 case MLX5_CMD_OP_QUERY_RQT:
403 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
404 MLX5_GET(query_rqt_in, in, rqtn));
406 case MLX5_CMD_OP_MODIFY_RQT:
407 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
408 MLX5_GET(modify_rqt_in, in, rqtn));
410 case MLX5_CMD_OP_QUERY_TIR:
411 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
412 MLX5_GET(query_tir_in, in, tirn));
414 case MLX5_CMD_OP_MODIFY_TIR:
415 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
416 MLX5_GET(modify_tir_in, in, tirn));
418 case MLX5_CMD_OP_QUERY_TIS:
419 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
420 MLX5_GET(query_tis_in, in, tisn));
422 case MLX5_CMD_OP_MODIFY_TIS:
423 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
424 MLX5_GET(modify_tis_in, in, tisn));
426 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
427 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
428 MLX5_GET(query_flow_table_in, in,
431 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
432 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
433 MLX5_GET(modify_flow_table_in, in,
436 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
437 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
438 MLX5_GET(query_flow_group_in, in,
441 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
442 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
443 MLX5_GET(query_fte_in, in,
446 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
447 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
448 MLX5_GET(set_fte_in, in, flow_index));
450 case MLX5_CMD_OP_QUERY_Q_COUNTER:
451 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
452 MLX5_GET(query_q_counter_in, in,
455 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
456 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
457 MLX5_GET(query_flow_counter_in, in,
460 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
461 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
462 MLX5_GET(general_obj_in_cmd_hdr, in,
465 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
466 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
467 MLX5_GET(query_scheduling_element_in,
468 in, scheduling_element_id));
470 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
471 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
472 MLX5_GET(modify_scheduling_element_in,
473 in, scheduling_element_id));
475 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
476 obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
477 MLX5_GET(add_vxlan_udp_dport_in, in,
480 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
481 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
482 MLX5_GET(query_l2_table_entry_in, in,
485 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
486 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
487 MLX5_GET(set_l2_table_entry_in, in,
490 case MLX5_CMD_OP_QUERY_QP:
491 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
492 MLX5_GET(query_qp_in, in, qpn));
494 case MLX5_CMD_OP_RST2INIT_QP:
495 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
496 MLX5_GET(rst2init_qp_in, in, qpn));
498 case MLX5_CMD_OP_INIT2RTR_QP:
499 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
500 MLX5_GET(init2rtr_qp_in, in, qpn));
502 case MLX5_CMD_OP_RTR2RTS_QP:
503 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
504 MLX5_GET(rtr2rts_qp_in, in, qpn));
506 case MLX5_CMD_OP_RTS2RTS_QP:
507 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
508 MLX5_GET(rts2rts_qp_in, in, qpn));
510 case MLX5_CMD_OP_SQERR2RTS_QP:
511 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
512 MLX5_GET(sqerr2rts_qp_in, in, qpn));
514 case MLX5_CMD_OP_2ERR_QP:
515 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
516 MLX5_GET(qp_2err_in, in, qpn));
518 case MLX5_CMD_OP_2RST_QP:
519 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
520 MLX5_GET(qp_2rst_in, in, qpn));
522 case MLX5_CMD_OP_QUERY_DCT:
523 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
524 MLX5_GET(query_dct_in, in, dctn));
526 case MLX5_CMD_OP_QUERY_XRQ:
527 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
528 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
529 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
530 MLX5_GET(query_xrq_in, in, xrqn));
532 case MLX5_CMD_OP_QUERY_XRC_SRQ:
533 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
534 MLX5_GET(query_xrc_srq_in, in,
537 case MLX5_CMD_OP_ARM_XRC_SRQ:
538 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
539 MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
541 case MLX5_CMD_OP_QUERY_SRQ:
542 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
543 MLX5_GET(query_srq_in, in, srqn));
545 case MLX5_CMD_OP_ARM_RQ:
546 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
547 MLX5_GET(arm_rq_in, in, srq_number));
549 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
550 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
551 MLX5_GET(drain_dct_in, in, dctn));
553 case MLX5_CMD_OP_ARM_XRQ:
554 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
555 case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
556 case MLX5_CMD_OP_MODIFY_XRQ:
557 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
558 MLX5_GET(arm_xrq_in, in, xrqn));
560 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
561 obj_id = get_enc_obj_id
562 (MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
563 MLX5_GET(query_packet_reformat_context_in,
564 in, packet_reformat_id));
573 static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
574 struct ib_uobject *uobj, const void *in)
576 struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
577 u64 obj_id = devx_get_obj_id(in);
582 switch (uobj_get_object_id(uobj)) {
583 case UVERBS_OBJECT_CQ:
584 return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
585 to_mcq(uobj->object)->mcq.cqn) ==
588 case UVERBS_OBJECT_SRQ:
590 struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
593 switch (srq->common.res) {
595 opcode = MLX5_CMD_OP_CREATE_XRC_SRQ;
598 opcode = MLX5_CMD_OP_CREATE_XRQ;
601 if (!dev->mdev->issi)
602 opcode = MLX5_CMD_OP_CREATE_SRQ;
604 opcode = MLX5_CMD_OP_CREATE_RMP;
607 return get_enc_obj_id(opcode,
608 to_msrq(uobj->object)->msrq.srqn) ==
612 case UVERBS_OBJECT_QP:
614 struct mlx5_ib_qp *qp = to_mqp(uobj->object);
615 enum ib_qp_type qp_type = qp->ibqp.qp_type;
617 if (qp_type == IB_QPT_RAW_PACKET ||
618 (qp->flags & MLX5_IB_QP_UNDERLAY)) {
619 struct mlx5_ib_raw_packet_qp *raw_packet_qp =
621 struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
622 struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
624 return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
625 rq->base.mqp.qpn) == obj_id ||
626 get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
627 sq->base.mqp.qpn) == obj_id ||
628 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
629 rq->tirn) == obj_id ||
630 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
631 sq->tisn) == obj_id);
634 if (qp_type == MLX5_IB_QPT_DCT)
635 return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
636 qp->dct.mdct.mqp.qpn) == obj_id;
638 return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
639 qp->ibqp.qp_num) == obj_id;
642 case UVERBS_OBJECT_WQ:
643 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
644 to_mrwq(uobj->object)->core_qp.qpn) ==
647 case UVERBS_OBJECT_RWQ_IND_TBL:
648 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
649 to_mrwq_ind_table(uobj->object)->rqtn) ==
652 case MLX5_IB_OBJECT_DEVX_OBJ:
653 return ((struct devx_obj *)uobj->object)->obj_id == obj_id;
660 static void devx_set_umem_valid(const void *in)
662 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
665 case MLX5_CMD_OP_CREATE_MKEY:
666 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
668 case MLX5_CMD_OP_CREATE_CQ:
672 MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
673 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
674 MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
677 case MLX5_CMD_OP_CREATE_QP:
681 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
682 MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
683 MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
687 case MLX5_CMD_OP_CREATE_RQ:
691 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
692 wq = MLX5_ADDR_OF(rqc, rqc, wq);
693 MLX5_SET(wq, wq, dbr_umem_valid, 1);
694 MLX5_SET(wq, wq, wq_umem_valid, 1);
698 case MLX5_CMD_OP_CREATE_SQ:
702 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
703 wq = MLX5_ADDR_OF(sqc, sqc, wq);
704 MLX5_SET(wq, wq, dbr_umem_valid, 1);
705 MLX5_SET(wq, wq, wq_umem_valid, 1);
709 case MLX5_CMD_OP_MODIFY_CQ:
710 MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
713 case MLX5_CMD_OP_CREATE_RMP:
717 rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
718 wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
719 MLX5_SET(wq, wq, dbr_umem_valid, 1);
720 MLX5_SET(wq, wq, wq_umem_valid, 1);
724 case MLX5_CMD_OP_CREATE_XRQ:
728 xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
729 wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
730 MLX5_SET(wq, wq, dbr_umem_valid, 1);
731 MLX5_SET(wq, wq, wq_umem_valid, 1);
735 case MLX5_CMD_OP_CREATE_XRC_SRQ:
739 MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
740 xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
741 xrc_srq_context_entry);
742 MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
751 static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
753 *opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
756 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
757 case MLX5_CMD_OP_CREATE_MKEY:
758 case MLX5_CMD_OP_CREATE_CQ:
759 case MLX5_CMD_OP_ALLOC_PD:
760 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
761 case MLX5_CMD_OP_CREATE_RMP:
762 case MLX5_CMD_OP_CREATE_SQ:
763 case MLX5_CMD_OP_CREATE_RQ:
764 case MLX5_CMD_OP_CREATE_RQT:
765 case MLX5_CMD_OP_CREATE_TIR:
766 case MLX5_CMD_OP_CREATE_TIS:
767 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
768 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
769 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
770 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
771 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
772 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
773 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
774 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
775 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
776 case MLX5_CMD_OP_CREATE_QP:
777 case MLX5_CMD_OP_CREATE_SRQ:
778 case MLX5_CMD_OP_CREATE_XRC_SRQ:
779 case MLX5_CMD_OP_CREATE_DCT:
780 case MLX5_CMD_OP_CREATE_XRQ:
781 case MLX5_CMD_OP_ATTACH_TO_MCG:
782 case MLX5_CMD_OP_ALLOC_XRCD:
784 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
786 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
791 case MLX5_CMD_OP_CREATE_PSV:
793 u8 num_psv = MLX5_GET(create_psv_in, in, num_psv);
804 static bool devx_is_obj_modify_cmd(const void *in)
806 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
809 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
810 case MLX5_CMD_OP_MODIFY_CQ:
811 case MLX5_CMD_OP_MODIFY_RMP:
812 case MLX5_CMD_OP_MODIFY_SQ:
813 case MLX5_CMD_OP_MODIFY_RQ:
814 case MLX5_CMD_OP_MODIFY_RQT:
815 case MLX5_CMD_OP_MODIFY_TIR:
816 case MLX5_CMD_OP_MODIFY_TIS:
817 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
818 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
819 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
820 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
821 case MLX5_CMD_OP_RST2INIT_QP:
822 case MLX5_CMD_OP_INIT2RTR_QP:
823 case MLX5_CMD_OP_RTR2RTS_QP:
824 case MLX5_CMD_OP_RTS2RTS_QP:
825 case MLX5_CMD_OP_SQERR2RTS_QP:
826 case MLX5_CMD_OP_2ERR_QP:
827 case MLX5_CMD_OP_2RST_QP:
828 case MLX5_CMD_OP_ARM_XRC_SRQ:
829 case MLX5_CMD_OP_ARM_RQ:
830 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
831 case MLX5_CMD_OP_ARM_XRQ:
832 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
833 case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
834 case MLX5_CMD_OP_MODIFY_XRQ:
836 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
838 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
849 static bool devx_is_obj_query_cmd(const void *in)
851 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
854 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
855 case MLX5_CMD_OP_QUERY_MKEY:
856 case MLX5_CMD_OP_QUERY_CQ:
857 case MLX5_CMD_OP_QUERY_RMP:
858 case MLX5_CMD_OP_QUERY_SQ:
859 case MLX5_CMD_OP_QUERY_RQ:
860 case MLX5_CMD_OP_QUERY_RQT:
861 case MLX5_CMD_OP_QUERY_TIR:
862 case MLX5_CMD_OP_QUERY_TIS:
863 case MLX5_CMD_OP_QUERY_Q_COUNTER:
864 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
865 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
866 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
867 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
868 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
869 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
870 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
871 case MLX5_CMD_OP_QUERY_QP:
872 case MLX5_CMD_OP_QUERY_SRQ:
873 case MLX5_CMD_OP_QUERY_XRC_SRQ:
874 case MLX5_CMD_OP_QUERY_DCT:
875 case MLX5_CMD_OP_QUERY_XRQ:
876 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
877 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
878 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
885 static bool devx_is_whitelist_cmd(void *in)
887 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
890 case MLX5_CMD_OP_QUERY_HCA_CAP:
891 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
892 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
899 static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
901 if (devx_is_whitelist_cmd(cmd_in)) {
902 struct mlx5_ib_dev *dev;
907 dev = to_mdev(c->ibucontext.device);
908 if (dev->devx_whitelist_uid)
909 return dev->devx_whitelist_uid;
920 static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
922 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
924 /* Pass all cmds for vhca_tunnel as general, tracking is done in FW */
925 if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) &&
926 MLX5_GET(general_obj_in_cmd_hdr, in, vhca_tunnel_id)) ||
927 (opcode >= MLX5_CMD_OP_GENERAL_START &&
928 opcode < MLX5_CMD_OP_GENERAL_END))
932 case MLX5_CMD_OP_QUERY_HCA_CAP:
933 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
934 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
935 case MLX5_CMD_OP_QUERY_VPORT_STATE:
936 case MLX5_CMD_OP_QUERY_ADAPTER:
937 case MLX5_CMD_OP_QUERY_ISSI:
938 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
939 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
940 case MLX5_CMD_OP_QUERY_VNIC_ENV:
941 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
942 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
943 case MLX5_CMD_OP_NOP:
944 case MLX5_CMD_OP_QUERY_CONG_STATUS:
945 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
946 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
947 case MLX5_CMD_OP_QUERY_LAG:
954 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
955 struct uverbs_attr_bundle *attrs)
957 struct mlx5_ib_ucontext *c;
958 struct mlx5_ib_dev *dev;
964 if (uverbs_copy_from(&user_vector, attrs,
965 MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
968 c = devx_ufile2uctx(attrs);
971 dev = to_mdev(c->ibucontext.device);
973 err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
977 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
978 &dev_eqn, sizeof(dev_eqn)))
986 * The hardware protection mechanism works like this: Each device object that
987 * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
988 * the device specification manual) upon its creation. Then upon doorbell,
989 * hardware fetches the object context for which the doorbell was rang, and
990 * validates that the UAR through which the DB was rang matches the UAR ID
992 * If no match the doorbell is silently ignored by the hardware. Of course,
993 * the user cannot ring a doorbell on a UAR that was not mapped to it.
994 * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
995 * mailboxes (except tagging them with UID), we expose to the user its UAR
996 * ID, so it can embed it in these objects in the expected specification
997 * format. So the only thing the user can do is hurt itself by creating a
998 * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
999 * may ring a doorbell on its objects.
1000 * The consequence of that will be that another user can schedule a QP/SQ
1001 * of the buggy user for execution (just insert it to the hardware schedule
1002 * queue or arm its CQ for event generation), no further harm is expected.
1004 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
1005 struct uverbs_attr_bundle *attrs)
1007 struct mlx5_ib_ucontext *c;
1008 struct mlx5_ib_dev *dev;
1012 c = devx_ufile2uctx(attrs);
1015 dev = to_mdev(c->ibucontext.device);
1017 if (uverbs_copy_from(&user_idx, attrs,
1018 MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
1021 dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
1025 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
1026 &dev_idx, sizeof(dev_idx)))
1032 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
1033 struct uverbs_attr_bundle *attrs)
1035 struct mlx5_ib_ucontext *c;
1036 struct mlx5_ib_dev *dev;
1037 void *cmd_in = uverbs_attr_get_alloced_ptr(
1038 attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
1039 int cmd_out_len = uverbs_attr_get_len(attrs,
1040 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
1045 c = devx_ufile2uctx(attrs);
1048 dev = to_mdev(c->ibucontext.device);
1050 uid = devx_get_uid(c, cmd_in);
1054 /* Only white list of some general HCA commands are allowed for this method. */
1055 if (!devx_is_general_cmd(cmd_in, dev))
1058 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1059 if (IS_ERR(cmd_out))
1060 return PTR_ERR(cmd_out);
1062 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1063 err = mlx5_cmd_exec(dev->mdev, cmd_in,
1064 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
1065 cmd_out, cmd_out_len);
1069 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
1073 static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
1077 u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type);
1078 u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
1080 *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
1081 *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
1083 MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
1084 MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
1086 switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) {
1087 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
1088 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
1089 MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type);
1092 case MLX5_CMD_OP_CREATE_UMEM:
1093 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1094 MLX5_CMD_OP_DESTROY_UMEM);
1096 case MLX5_CMD_OP_CREATE_MKEY:
1097 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY);
1099 case MLX5_CMD_OP_CREATE_CQ:
1100 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
1102 case MLX5_CMD_OP_ALLOC_PD:
1103 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
1105 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
1106 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1107 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
1109 case MLX5_CMD_OP_CREATE_RMP:
1110 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
1112 case MLX5_CMD_OP_CREATE_SQ:
1113 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
1115 case MLX5_CMD_OP_CREATE_RQ:
1116 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
1118 case MLX5_CMD_OP_CREATE_RQT:
1119 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
1121 case MLX5_CMD_OP_CREATE_TIR:
1122 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
1124 case MLX5_CMD_OP_CREATE_TIS:
1125 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
1127 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
1128 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1129 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
1131 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
1132 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
1133 *obj_id = MLX5_GET(create_flow_table_out, out, table_id);
1134 MLX5_SET(destroy_flow_table_in, din, other_vport,
1135 MLX5_GET(create_flow_table_in, in, other_vport));
1136 MLX5_SET(destroy_flow_table_in, din, vport_number,
1137 MLX5_GET(create_flow_table_in, in, vport_number));
1138 MLX5_SET(destroy_flow_table_in, din, table_type,
1139 MLX5_GET(create_flow_table_in, in, table_type));
1140 MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
1141 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1142 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
1144 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
1145 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
1146 *obj_id = MLX5_GET(create_flow_group_out, out, group_id);
1147 MLX5_SET(destroy_flow_group_in, din, other_vport,
1148 MLX5_GET(create_flow_group_in, in, other_vport));
1149 MLX5_SET(destroy_flow_group_in, din, vport_number,
1150 MLX5_GET(create_flow_group_in, in, vport_number));
1151 MLX5_SET(destroy_flow_group_in, din, table_type,
1152 MLX5_GET(create_flow_group_in, in, table_type));
1153 MLX5_SET(destroy_flow_group_in, din, table_id,
1154 MLX5_GET(create_flow_group_in, in, table_id));
1155 MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
1156 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1157 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
1159 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
1160 *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
1161 *obj_id = MLX5_GET(set_fte_in, in, flow_index);
1162 MLX5_SET(delete_fte_in, din, other_vport,
1163 MLX5_GET(set_fte_in, in, other_vport));
1164 MLX5_SET(delete_fte_in, din, vport_number,
1165 MLX5_GET(set_fte_in, in, vport_number));
1166 MLX5_SET(delete_fte_in, din, table_type,
1167 MLX5_GET(set_fte_in, in, table_type));
1168 MLX5_SET(delete_fte_in, din, table_id,
1169 MLX5_GET(set_fte_in, in, table_id));
1170 MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
1171 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1172 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
1174 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
1175 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1176 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
1178 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
1179 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1180 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
1182 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
1183 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1184 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
1186 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
1187 *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
1188 *obj_id = MLX5_GET(create_scheduling_element_out, out,
1189 scheduling_element_id);
1190 MLX5_SET(destroy_scheduling_element_in, din,
1191 scheduling_hierarchy,
1192 MLX5_GET(create_scheduling_element_in, in,
1193 scheduling_hierarchy));
1194 MLX5_SET(destroy_scheduling_element_in, din,
1195 scheduling_element_id, *obj_id);
1196 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1197 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
1199 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
1200 *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
1201 *obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
1202 MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
1203 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1204 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
1206 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
1207 *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
1208 *obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
1209 MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
1210 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1211 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
1213 case MLX5_CMD_OP_CREATE_QP:
1214 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP);
1216 case MLX5_CMD_OP_CREATE_SRQ:
1217 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
1219 case MLX5_CMD_OP_CREATE_XRC_SRQ:
1220 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1221 MLX5_CMD_OP_DESTROY_XRC_SRQ);
1223 case MLX5_CMD_OP_CREATE_DCT:
1224 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
1226 case MLX5_CMD_OP_CREATE_XRQ:
1227 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
1229 case MLX5_CMD_OP_ATTACH_TO_MCG:
1230 *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
1231 MLX5_SET(detach_from_mcg_in, din, qpn,
1232 MLX5_GET(attach_to_mcg_in, in, qpn));
1233 memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
1234 MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
1235 MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
1236 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
1238 case MLX5_CMD_OP_ALLOC_XRCD:
1239 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
1241 case MLX5_CMD_OP_CREATE_PSV:
1242 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1243 MLX5_CMD_OP_DESTROY_PSV);
1244 MLX5_SET(destroy_psv_in, din, psvn,
1245 MLX5_GET(create_psv_out, out, psv0_index));
1248 /* The entry must match to one of the devx_is_obj_create_cmd */
1254 static int devx_handle_mkey_indirect(struct devx_obj *obj,
1255 struct mlx5_ib_dev *dev,
1256 void *in, void *out)
1258 struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr;
1259 struct mlx5_core_mkey *mkey;
1263 mkey = &devx_mr->mmkey;
1264 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1265 key = MLX5_GET(mkc, mkc, mkey_7_0);
1266 mkey->key = mlx5_idx_to_mkey(
1267 MLX5_GET(create_mkey_out, out, mkey_index)) | key;
1268 mkey->type = MLX5_MKEY_INDIRECT_DEVX;
1269 mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
1270 mkey->size = MLX5_GET64(mkc, mkc, len);
1271 mkey->pd = MLX5_GET(mkc, mkc, pd);
1272 devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
1274 return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mkey->key), mkey,
1278 static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
1279 struct devx_obj *obj,
1280 void *in, int in_len)
1282 int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) +
1283 MLX5_FLD_SZ_BYTES(create_mkey_in,
1284 memory_key_mkey_entry);
1288 if (in_len < min_len)
1291 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1293 access_mode = MLX5_GET(mkc, mkc, access_mode_1_0);
1294 access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2;
1296 if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS ||
1297 access_mode == MLX5_MKC_ACCESS_MODE_KSM) {
1298 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1299 obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY;
1303 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
1307 static void devx_cleanup_subscription(struct mlx5_ib_dev *dev,
1308 struct devx_event_subscription *sub)
1310 struct devx_event *event;
1311 struct devx_obj_event *xa_val_level2;
1313 if (sub->is_cleaned)
1316 sub->is_cleaned = 1;
1317 list_del_rcu(&sub->xa_list);
1319 if (list_empty(&sub->obj_list))
1322 list_del_rcu(&sub->obj_list);
1323 /* check whether key level 1 for this obj_sub_list is empty */
1324 event = xa_load(&dev->devx_event_table.event_xa,
1325 sub->xa_key_level1);
1328 xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2);
1329 if (list_empty(&xa_val_level2->obj_sub_list)) {
1330 xa_erase(&event->object_ids,
1331 sub->xa_key_level2);
1332 kfree_rcu(xa_val_level2, rcu);
1336 static int devx_obj_cleanup(struct ib_uobject *uobject,
1337 enum rdma_remove_reason why,
1338 struct uverbs_attr_bundle *attrs)
1340 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1341 struct mlx5_devx_event_table *devx_event_table;
1342 struct devx_obj *obj = uobject->object;
1343 struct devx_event_subscription *sub_entry, *tmp;
1344 struct mlx5_ib_dev *dev;
1347 dev = mlx5_udata_to_mdev(&attrs->driver_udata);
1348 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1350 * The pagefault_single_data_segment() does commands against
1351 * the mmkey, we must wait for that to stop before freeing the
1352 * mkey, as another allocation could get the same mkey #.
1354 xa_erase(&obj->ib_dev->odp_mkeys,
1355 mlx5_base_mkey(obj->devx_mr.mmkey.key));
1356 synchronize_srcu(&dev->odp_srcu);
1359 if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1360 ret = mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
1361 else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1362 ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1364 ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox,
1365 obj->dinlen, out, sizeof(out));
1366 if (ib_is_destroy_retryable(ret, why, uobject))
1369 devx_event_table = &dev->devx_event_table;
1371 mutex_lock(&devx_event_table->event_xa_lock);
1372 list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list)
1373 devx_cleanup_subscription(dev, sub_entry);
1374 mutex_unlock(&devx_event_table->event_xa_lock);
1380 static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
1382 struct devx_obj *obj = container_of(mcq, struct devx_obj, core_cq);
1383 struct mlx5_devx_event_table *table;
1384 struct devx_event *event;
1385 struct devx_obj_event *obj_event;
1386 u32 obj_id = mcq->cqn;
1388 table = &obj->ib_dev->devx_event_table;
1390 event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP);
1394 obj_event = xa_load(&event->object_ids, obj_id);
1398 dispatch_event_fd(&obj_event->obj_sub_list, eqe);
1403 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
1404 struct uverbs_attr_bundle *attrs)
1406 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1407 int cmd_out_len = uverbs_attr_get_len(attrs,
1408 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
1409 int cmd_in_len = uverbs_attr_get_len(attrs,
1410 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1412 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1413 attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
1414 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1415 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1416 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1417 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1418 struct devx_obj *obj;
1425 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1428 uid = devx_get_uid(c, cmd_in);
1432 if (!devx_is_obj_create_cmd(cmd_in, &opcode))
1435 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1436 if (IS_ERR(cmd_out))
1437 return PTR_ERR(cmd_out);
1439 obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
1443 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1444 if (opcode == MLX5_CMD_OP_CREATE_MKEY) {
1445 err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len);
1449 devx_set_umem_valid(cmd_in);
1452 if (opcode == MLX5_CMD_OP_CREATE_DCT) {
1453 obj->flags |= DEVX_OBJ_FLAGS_DCT;
1454 err = mlx5_core_create_dct(dev->mdev, &obj->core_dct,
1456 cmd_out, cmd_out_len);
1457 } else if (opcode == MLX5_CMD_OP_CREATE_CQ) {
1458 obj->flags |= DEVX_OBJ_FLAGS_CQ;
1459 obj->core_cq.comp = devx_cq_comp;
1460 err = mlx5_core_create_cq(dev->mdev, &obj->core_cq,
1461 cmd_in, cmd_in_len, cmd_out,
1464 err = mlx5_cmd_exec(dev->mdev, cmd_in,
1466 cmd_out, cmd_out_len);
1472 if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) {
1473 u8 bulk = MLX5_GET(alloc_flow_counter_in,
1476 obj->flow_counter_bulk_size = 128UL * bulk;
1480 INIT_LIST_HEAD(&obj->event_sub);
1482 devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
1484 WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
1486 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
1490 if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
1491 obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
1492 obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);
1494 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1495 err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
1502 if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1503 mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
1504 else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1505 mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1507 mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out,
1514 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
1515 struct uverbs_attr_bundle *attrs)
1517 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
1518 int cmd_out_len = uverbs_attr_get_len(attrs,
1519 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
1520 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1521 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
1522 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1523 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1524 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1529 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1532 uid = devx_get_uid(c, cmd_in);
1536 if (!devx_is_obj_modify_cmd(cmd_in))
1539 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1542 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1543 if (IS_ERR(cmd_out))
1544 return PTR_ERR(cmd_out);
1546 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1547 devx_set_umem_valid(cmd_in);
1549 err = mlx5_cmd_exec(mdev->mdev, cmd_in,
1550 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
1551 cmd_out, cmd_out_len);
1555 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
1556 cmd_out, cmd_out_len);
1559 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
1560 struct uverbs_attr_bundle *attrs)
1562 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
1563 int cmd_out_len = uverbs_attr_get_len(attrs,
1564 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
1565 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1566 MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
1567 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1568 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1572 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1574 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1577 uid = devx_get_uid(c, cmd_in);
1581 if (!devx_is_obj_query_cmd(cmd_in))
1584 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1587 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1588 if (IS_ERR(cmd_out))
1589 return PTR_ERR(cmd_out);
1591 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1592 err = mlx5_cmd_exec(mdev->mdev, cmd_in,
1593 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
1594 cmd_out, cmd_out_len);
1598 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
1599 cmd_out, cmd_out_len);
1602 struct devx_async_event_queue {
1604 wait_queue_head_t poll_wait;
1605 struct list_head event_list;
1606 atomic_t bytes_in_use;
1610 struct devx_async_cmd_event_file {
1611 struct ib_uobject uobj;
1612 struct devx_async_event_queue ev_queue;
1613 struct mlx5_async_ctx async_ctx;
1616 static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
1618 spin_lock_init(&ev_queue->lock);
1619 INIT_LIST_HEAD(&ev_queue->event_list);
1620 init_waitqueue_head(&ev_queue->poll_wait);
1621 atomic_set(&ev_queue->bytes_in_use, 0);
1622 ev_queue->is_destroyed = 0;
1625 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
1626 struct uverbs_attr_bundle *attrs)
1628 struct devx_async_cmd_event_file *ev_file;
1630 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1631 attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
1632 struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
1634 ev_file = container_of(uobj, struct devx_async_cmd_event_file,
1636 devx_init_event_queue(&ev_file->ev_queue);
1637 mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
1641 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)(
1642 struct uverbs_attr_bundle *attrs)
1644 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1645 attrs, MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE);
1646 struct devx_async_event_file *ev_file;
1647 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1648 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1649 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1653 err = uverbs_get_flags32(&flags, attrs,
1654 MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
1655 MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA);
1660 ev_file = container_of(uobj, struct devx_async_event_file,
1662 spin_lock_init(&ev_file->lock);
1663 INIT_LIST_HEAD(&ev_file->event_list);
1664 init_waitqueue_head(&ev_file->poll_wait);
1665 if (flags & MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA)
1666 ev_file->omit_data = 1;
1667 INIT_LIST_HEAD(&ev_file->subscribed_events_list);
1669 get_device(&dev->ib_dev.dev);
1673 static void devx_query_callback(int status, struct mlx5_async_work *context)
1675 struct devx_async_data *async_data =
1676 container_of(context, struct devx_async_data, cb_work);
1677 struct ib_uobject *fd_uobj = async_data->fd_uobj;
1678 struct devx_async_cmd_event_file *ev_file;
1679 struct devx_async_event_queue *ev_queue;
1680 unsigned long flags;
1682 ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1684 ev_queue = &ev_file->ev_queue;
1686 spin_lock_irqsave(&ev_queue->lock, flags);
1687 list_add_tail(&async_data->list, &ev_queue->event_list);
1688 spin_unlock_irqrestore(&ev_queue->lock, flags);
1690 wake_up_interruptible(&ev_queue->poll_wait);
1691 fput(fd_uobj->object);
1694 #define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
1696 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
1697 struct uverbs_attr_bundle *attrs)
1699 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
1700 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
1701 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1703 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
1705 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1706 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1707 struct ib_uobject *fd_uobj;
1710 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1711 struct devx_async_cmd_event_file *ev_file;
1712 struct devx_async_data *async_data;
1714 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1717 uid = devx_get_uid(c, cmd_in);
1721 if (!devx_is_obj_query_cmd(cmd_in))
1724 err = uverbs_get_const(&cmd_out_len, attrs,
1725 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
1729 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1732 fd_uobj = uverbs_attr_get_uobject(attrs,
1733 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
1734 if (IS_ERR(fd_uobj))
1735 return PTR_ERR(fd_uobj);
1737 ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1740 if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
1741 MAX_ASYNC_BYTES_IN_USE) {
1742 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1746 async_data = kvzalloc(struct_size(async_data, hdr.out_data,
1747 cmd_out_len), GFP_KERNEL);
1753 err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
1754 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
1758 async_data->cmd_out_len = cmd_out_len;
1759 async_data->mdev = mdev;
1760 async_data->fd_uobj = fd_uobj;
1762 get_file(fd_uobj->object);
1763 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1764 err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
1765 uverbs_attr_get_len(attrs,
1766 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
1767 async_data->hdr.out_data,
1768 async_data->cmd_out_len,
1769 devx_query_callback, &async_data->cb_work);
1777 fput(fd_uobj->object);
1781 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1786 subscribe_event_xa_dealloc(struct mlx5_devx_event_table *devx_event_table,
1791 struct devx_event *event;
1792 struct devx_obj_event *xa_val_level2;
1794 /* Level 1 is valid for future use, no need to free */
1798 event = xa_load(&devx_event_table->event_xa, key_level1);
1801 xa_val_level2 = xa_load(&event->object_ids,
1803 if (list_empty(&xa_val_level2->obj_sub_list)) {
1804 xa_erase(&event->object_ids,
1806 kfree_rcu(xa_val_level2, rcu);
1811 subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
1816 struct devx_obj_event *obj_event;
1817 struct devx_event *event;
1820 event = xa_load(&devx_event_table->event_xa, key_level1);
1822 event = kzalloc(sizeof(*event), GFP_KERNEL);
1826 INIT_LIST_HEAD(&event->unaffiliated_list);
1827 xa_init(&event->object_ids);
1829 err = xa_insert(&devx_event_table->event_xa,
1842 obj_event = xa_load(&event->object_ids, key_level2);
1844 obj_event = kzalloc(sizeof(*obj_event), GFP_KERNEL);
1846 /* Level1 is valid for future use, no need to free */
1849 err = xa_insert(&event->object_ids,
1855 INIT_LIST_HEAD(&obj_event->obj_sub_list);
1861 static bool is_valid_events_legacy(int num_events, u16 *event_type_num_list,
1862 struct devx_obj *obj)
1866 for (i = 0; i < num_events; i++) {
1868 if (!is_legacy_obj_event_num(event_type_num_list[i]))
1870 } else if (!is_legacy_unaffiliated_event_num(
1871 event_type_num_list[i])) {
1879 #define MAX_SUPP_EVENT_NUM 255
1880 static bool is_valid_events(struct mlx5_core_dev *dev,
1881 int num_events, u16 *event_type_num_list,
1882 struct devx_obj *obj)
1885 __be64 *unaff_events;
1890 if (MLX5_CAP_GEN(dev, event_cap)) {
1891 aff_events = MLX5_CAP_DEV_EVENT(dev,
1892 user_affiliated_events);
1893 unaff_events = MLX5_CAP_DEV_EVENT(dev,
1894 user_unaffiliated_events);
1896 return is_valid_events_legacy(num_events, event_type_num_list,
1900 for (i = 0; i < num_events; i++) {
1901 if (event_type_num_list[i] > MAX_SUPP_EVENT_NUM)
1904 mask_entry = event_type_num_list[i] / 64;
1905 mask_bit = event_type_num_list[i] % 64;
1909 if (event_type_num_list[i] == 0)
1912 if (!(be64_to_cpu(aff_events[mask_entry]) &
1913 (1ull << mask_bit)))
1919 if (!(be64_to_cpu(unaff_events[mask_entry]) &
1920 (1ull << mask_bit)))
1927 #define MAX_NUM_EVENTS 16
1928 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
1929 struct uverbs_attr_bundle *attrs)
1931 struct ib_uobject *devx_uobj = uverbs_attr_get_uobject(
1933 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE);
1934 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1935 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1936 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1937 struct ib_uobject *fd_uobj;
1938 struct devx_obj *obj = NULL;
1939 struct devx_async_event_file *ev_file;
1940 struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table;
1941 u16 *event_type_num_list;
1942 struct devx_event_subscription *event_sub, *tmp_sub;
1943 struct list_head sub_list;
1945 bool use_eventfd = false;
1947 int num_alloc_xa_entries = 0;
1957 if (!IS_ERR(devx_uobj)) {
1958 obj = (struct devx_obj *)devx_uobj->object;
1960 obj_id = get_dec_obj_id(obj->obj_id);
1963 fd_uobj = uverbs_attr_get_uobject(attrs,
1964 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE);
1965 if (IS_ERR(fd_uobj))
1966 return PTR_ERR(fd_uobj);
1968 ev_file = container_of(fd_uobj, struct devx_async_event_file,
1971 if (uverbs_attr_is_valid(attrs,
1972 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM)) {
1973 err = uverbs_copy_from(&redirect_fd, attrs,
1974 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM);
1981 if (uverbs_attr_is_valid(attrs,
1982 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE)) {
1986 err = uverbs_copy_from(&cookie, attrs,
1987 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE);
1992 num_events = uverbs_attr_ptr_get_array_size(
1993 attrs, MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
1999 if (num_events > MAX_NUM_EVENTS)
2002 event_type_num_list = uverbs_attr_get_alloced_ptr(attrs,
2003 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST);
2005 if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj))
2008 INIT_LIST_HEAD(&sub_list);
2010 /* Protect from concurrent subscriptions to same XA entries to allow
2013 mutex_lock(&devx_event_table->event_xa_lock);
2014 for (i = 0; i < num_events; i++) {
2018 obj_type = get_dec_obj_type(obj,
2019 event_type_num_list[i]);
2020 key_level1 = event_type_num_list[i] | obj_type << 16;
2022 err = subscribe_event_xa_alloc(devx_event_table,
2029 num_alloc_xa_entries++;
2030 event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
2034 list_add_tail(&event_sub->event_list, &sub_list);
2036 event_sub->eventfd =
2037 eventfd_ctx_fdget(redirect_fd);
2039 if (IS_ERR(event_sub->eventfd)) {
2040 err = PTR_ERR(event_sub->eventfd);
2041 event_sub->eventfd = NULL;
2046 event_sub->cookie = cookie;
2047 event_sub->ev_file = ev_file;
2048 event_sub->filp = fd_uobj->object;
2049 /* May be needed upon cleanup the devx object/subscription */
2050 event_sub->xa_key_level1 = key_level1;
2051 event_sub->xa_key_level2 = obj_id;
2052 INIT_LIST_HEAD(&event_sub->obj_list);
2055 /* Once all the allocations and the XA data insertions were done we
2056 * can go ahead and add all the subscriptions to the relevant lists
2057 * without concern of a failure.
2059 list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2060 struct devx_event *event;
2061 struct devx_obj_event *obj_event;
2063 list_del_init(&event_sub->event_list);
2065 spin_lock_irq(&ev_file->lock);
2066 list_add_tail_rcu(&event_sub->file_list,
2067 &ev_file->subscribed_events_list);
2068 spin_unlock_irq(&ev_file->lock);
2070 event = xa_load(&devx_event_table->event_xa,
2071 event_sub->xa_key_level1);
2075 list_add_tail_rcu(&event_sub->xa_list,
2076 &event->unaffiliated_list);
2080 obj_event = xa_load(&event->object_ids, obj_id);
2081 WARN_ON(!obj_event);
2082 list_add_tail_rcu(&event_sub->xa_list,
2083 &obj_event->obj_sub_list);
2084 list_add_tail_rcu(&event_sub->obj_list,
2088 mutex_unlock(&devx_event_table->event_xa_lock);
2092 list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2093 list_del(&event_sub->event_list);
2095 subscribe_event_xa_dealloc(devx_event_table,
2096 event_sub->xa_key_level1,
2100 if (event_sub->eventfd)
2101 eventfd_ctx_put(event_sub->eventfd);
2106 mutex_unlock(&devx_event_table->event_xa_lock);
2110 static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
2111 struct uverbs_attr_bundle *attrs,
2112 struct devx_umem *obj)
2121 if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
2122 uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
2125 err = uverbs_get_flags32(&access, attrs,
2126 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2127 IB_ACCESS_LOCAL_WRITE |
2128 IB_ACCESS_REMOTE_WRITE |
2129 IB_ACCESS_REMOTE_READ);
2133 err = ib_check_mr_access(access);
2137 obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access);
2138 if (IS_ERR(obj->umem))
2139 return PTR_ERR(obj->umem);
2141 mlx5_ib_cont_pages(obj->umem, obj->umem->address,
2142 MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
2143 &obj->page_shift, &obj->ncont, NULL);
2146 ib_umem_release(obj->umem);
2150 page_mask = (1 << obj->page_shift) - 1;
2151 obj->page_offset = obj->umem->address & page_mask;
2156 static int devx_umem_reg_cmd_alloc(struct uverbs_attr_bundle *attrs,
2157 struct devx_umem *obj,
2158 struct devx_umem_reg_cmd *cmd)
2160 cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
2161 (MLX5_ST_SZ_BYTES(mtt) * obj->ncont);
2162 cmd->in = uverbs_zalloc(attrs, cmd->inlen);
2163 return PTR_ERR_OR_ZERO(cmd->in);
2166 static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
2167 struct devx_umem *obj,
2168 struct devx_umem_reg_cmd *cmd)
2173 umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
2174 mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
2176 MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
2177 MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
2178 MLX5_SET(umem, umem, log_page_size, obj->page_shift -
2179 MLX5_ADAPTER_PAGE_SHIFT);
2180 MLX5_SET(umem, umem, page_offset, obj->page_offset);
2181 mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt,
2182 (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
2186 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
2187 struct uverbs_attr_bundle *attrs)
2189 struct devx_umem_reg_cmd cmd;
2190 struct devx_umem *obj;
2191 struct ib_uobject *uobj = uverbs_attr_get_uobject(
2192 attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
2194 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
2195 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
2196 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
2202 obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
2206 err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
2210 err = devx_umem_reg_cmd_alloc(attrs, obj, &cmd);
2212 goto err_umem_release;
2214 devx_umem_reg_cmd_build(dev, obj, &cmd);
2216 MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
2217 err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
2220 goto err_umem_release;
2222 obj->mdev = dev->mdev;
2224 devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
2225 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
2227 goto err_umem_destroy;
2232 mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
2234 ib_umem_release(obj->umem);
2240 static int devx_umem_cleanup(struct ib_uobject *uobject,
2241 enum rdma_remove_reason why,
2242 struct uverbs_attr_bundle *attrs)
2244 struct devx_umem *obj = uobject->object;
2245 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
2248 err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
2249 if (ib_is_destroy_retryable(err, why, uobject))
2252 ib_umem_release(obj->umem);
2257 static bool is_unaffiliated_event(struct mlx5_core_dev *dev,
2258 unsigned long event_type)
2260 __be64 *unaff_events;
2264 if (!MLX5_CAP_GEN(dev, event_cap))
2265 return is_legacy_unaffiliated_event_num(event_type);
2267 unaff_events = MLX5_CAP_DEV_EVENT(dev,
2268 user_unaffiliated_events);
2269 WARN_ON(event_type > MAX_SUPP_EVENT_NUM);
2271 mask_entry = event_type / 64;
2272 mask_bit = event_type % 64;
2274 if (!(be64_to_cpu(unaff_events[mask_entry]) & (1ull << mask_bit)))
2280 static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data)
2282 struct mlx5_eqe *eqe = data;
2285 switch (event_type) {
2286 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
2287 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
2288 case MLX5_EVENT_TYPE_PATH_MIG:
2289 case MLX5_EVENT_TYPE_COMM_EST:
2290 case MLX5_EVENT_TYPE_SQ_DRAINED:
2291 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
2292 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
2293 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
2294 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
2295 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
2296 obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
2298 case MLX5_EVENT_TYPE_XRQ_ERROR:
2299 obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff;
2301 case MLX5_EVENT_TYPE_DCT_DRAINED:
2302 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
2303 obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
2305 case MLX5_EVENT_TYPE_CQ_ERROR:
2306 obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
2309 obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id);
2316 static int deliver_event(struct devx_event_subscription *event_sub,
2319 struct devx_async_event_file *ev_file;
2320 struct devx_async_event_data *event_data;
2321 unsigned long flags;
2323 ev_file = event_sub->ev_file;
2325 if (ev_file->omit_data) {
2326 spin_lock_irqsave(&ev_file->lock, flags);
2327 if (!list_empty(&event_sub->event_list)) {
2328 spin_unlock_irqrestore(&ev_file->lock, flags);
2332 list_add_tail(&event_sub->event_list, &ev_file->event_list);
2333 spin_unlock_irqrestore(&ev_file->lock, flags);
2334 wake_up_interruptible(&ev_file->poll_wait);
2338 event_data = kzalloc(sizeof(*event_data) + sizeof(struct mlx5_eqe),
2341 spin_lock_irqsave(&ev_file->lock, flags);
2342 ev_file->is_overflow_err = 1;
2343 spin_unlock_irqrestore(&ev_file->lock, flags);
2347 event_data->hdr.cookie = event_sub->cookie;
2348 memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe));
2350 spin_lock_irqsave(&ev_file->lock, flags);
2351 list_add_tail(&event_data->list, &ev_file->event_list);
2352 spin_unlock_irqrestore(&ev_file->lock, flags);
2353 wake_up_interruptible(&ev_file->poll_wait);
2358 static void dispatch_event_fd(struct list_head *fd_list,
2361 struct devx_event_subscription *item;
2363 list_for_each_entry_rcu(item, fd_list, xa_list) {
2364 if (!get_file_rcu(item->filp))
2367 if (item->eventfd) {
2368 eventfd_signal(item->eventfd, 1);
2373 deliver_event(item, data);
2378 static int devx_event_notifier(struct notifier_block *nb,
2379 unsigned long event_type, void *data)
2381 struct mlx5_devx_event_table *table;
2382 struct mlx5_ib_dev *dev;
2383 struct devx_event *event;
2384 struct devx_obj_event *obj_event;
2386 bool is_unaffiliated;
2389 /* Explicit filtering to kernel events which may occur frequently */
2390 if (event_type == MLX5_EVENT_TYPE_CMD ||
2391 event_type == MLX5_EVENT_TYPE_PAGE_REQUEST)
2394 table = container_of(nb, struct mlx5_devx_event_table, devx_nb.nb);
2395 dev = container_of(table, struct mlx5_ib_dev, devx_event_table);
2396 is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type);
2398 if (!is_unaffiliated)
2399 obj_type = get_event_obj_type(event_type, data);
2402 event = xa_load(&table->event_xa, event_type | (obj_type << 16));
2408 if (is_unaffiliated) {
2409 dispatch_event_fd(&event->unaffiliated_list, data);
2414 obj_id = devx_get_obj_id_from_event(event_type, data);
2415 obj_event = xa_load(&event->object_ids, obj_id);
2421 dispatch_event_fd(&obj_event->obj_sub_list, data);
2427 void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev)
2429 struct mlx5_devx_event_table *table = &dev->devx_event_table;
2431 xa_init(&table->event_xa);
2432 mutex_init(&table->event_xa_lock);
2433 MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
2434 mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
2437 void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev)
2439 struct mlx5_devx_event_table *table = &dev->devx_event_table;
2440 struct devx_event_subscription *sub, *tmp;
2441 struct devx_event *event;
2445 mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
2446 mutex_lock(&dev->devx_event_table.event_xa_lock);
2447 xa_for_each(&table->event_xa, id, entry) {
2449 list_for_each_entry_safe(sub, tmp, &event->unaffiliated_list,
2451 devx_cleanup_subscription(dev, sub);
2454 mutex_unlock(&dev->devx_event_table.event_xa_lock);
2455 xa_destroy(&table->event_xa);
2458 static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
2459 size_t count, loff_t *pos)
2461 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2462 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2463 struct devx_async_data *event;
2467 spin_lock_irq(&ev_queue->lock);
2469 while (list_empty(&ev_queue->event_list)) {
2470 spin_unlock_irq(&ev_queue->lock);
2472 if (filp->f_flags & O_NONBLOCK)
2475 if (wait_event_interruptible(
2476 ev_queue->poll_wait,
2477 (!list_empty(&ev_queue->event_list) ||
2478 ev_queue->is_destroyed))) {
2479 return -ERESTARTSYS;
2482 if (list_empty(&ev_queue->event_list) &&
2483 ev_queue->is_destroyed)
2486 spin_lock_irq(&ev_queue->lock);
2489 event = list_entry(ev_queue->event_list.next,
2490 struct devx_async_data, list);
2491 eventsz = event->cmd_out_len +
2492 sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr);
2494 if (eventsz > count) {
2495 spin_unlock_irq(&ev_queue->lock);
2499 list_del(ev_queue->event_list.next);
2500 spin_unlock_irq(&ev_queue->lock);
2502 if (copy_to_user(buf, &event->hdr, eventsz))
2507 atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use);
2512 static int devx_async_cmd_event_close(struct inode *inode, struct file *filp)
2514 struct ib_uobject *uobj = filp->private_data;
2515 struct devx_async_cmd_event_file *comp_ev_file = container_of(
2516 uobj, struct devx_async_cmd_event_file, uobj);
2517 struct devx_async_data *entry, *tmp;
2519 spin_lock_irq(&comp_ev_file->ev_queue.lock);
2520 list_for_each_entry_safe(entry, tmp,
2521 &comp_ev_file->ev_queue.event_list, list)
2523 spin_unlock_irq(&comp_ev_file->ev_queue.lock);
2525 uverbs_close_fd(filp);
2529 static __poll_t devx_async_cmd_event_poll(struct file *filp,
2530 struct poll_table_struct *wait)
2532 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2533 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2534 __poll_t pollflags = 0;
2536 poll_wait(filp, &ev_queue->poll_wait, wait);
2538 spin_lock_irq(&ev_queue->lock);
2539 if (ev_queue->is_destroyed)
2540 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2541 else if (!list_empty(&ev_queue->event_list))
2542 pollflags = EPOLLIN | EPOLLRDNORM;
2543 spin_unlock_irq(&ev_queue->lock);
2548 static const struct file_operations devx_async_cmd_event_fops = {
2549 .owner = THIS_MODULE,
2550 .read = devx_async_cmd_event_read,
2551 .poll = devx_async_cmd_event_poll,
2552 .release = devx_async_cmd_event_close,
2553 .llseek = no_llseek,
2556 static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
2557 size_t count, loff_t *pos)
2559 struct devx_async_event_file *ev_file = filp->private_data;
2560 struct devx_event_subscription *event_sub;
2561 struct devx_async_event_data *uninitialized_var(event);
2567 omit_data = ev_file->omit_data;
2569 spin_lock_irq(&ev_file->lock);
2571 if (ev_file->is_overflow_err) {
2572 ev_file->is_overflow_err = 0;
2573 spin_unlock_irq(&ev_file->lock);
2577 if (ev_file->is_destroyed) {
2578 spin_unlock_irq(&ev_file->lock);
2582 while (list_empty(&ev_file->event_list)) {
2583 spin_unlock_irq(&ev_file->lock);
2585 if (filp->f_flags & O_NONBLOCK)
2588 if (wait_event_interruptible(ev_file->poll_wait,
2589 (!list_empty(&ev_file->event_list) ||
2590 ev_file->is_destroyed))) {
2591 return -ERESTARTSYS;
2594 spin_lock_irq(&ev_file->lock);
2595 if (ev_file->is_destroyed) {
2596 spin_unlock_irq(&ev_file->lock);
2602 event_sub = list_first_entry(&ev_file->event_list,
2603 struct devx_event_subscription,
2605 eventsz = sizeof(event_sub->cookie);
2606 event_data = &event_sub->cookie;
2608 event = list_first_entry(&ev_file->event_list,
2609 struct devx_async_event_data, list);
2610 eventsz = sizeof(struct mlx5_eqe) +
2611 sizeof(struct mlx5_ib_uapi_devx_async_event_hdr);
2612 event_data = &event->hdr;
2615 if (eventsz > count) {
2616 spin_unlock_irq(&ev_file->lock);
2621 list_del_init(&event_sub->event_list);
2623 list_del(&event->list);
2625 spin_unlock_irq(&ev_file->lock);
2627 if (copy_to_user(buf, event_data, eventsz))
2628 /* This points to an application issue, not a kernel concern */
2638 static __poll_t devx_async_event_poll(struct file *filp,
2639 struct poll_table_struct *wait)
2641 struct devx_async_event_file *ev_file = filp->private_data;
2642 __poll_t pollflags = 0;
2644 poll_wait(filp, &ev_file->poll_wait, wait);
2646 spin_lock_irq(&ev_file->lock);
2647 if (ev_file->is_destroyed)
2648 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2649 else if (!list_empty(&ev_file->event_list))
2650 pollflags = EPOLLIN | EPOLLRDNORM;
2651 spin_unlock_irq(&ev_file->lock);
2656 static int devx_async_event_close(struct inode *inode, struct file *filp)
2658 struct devx_async_event_file *ev_file = filp->private_data;
2659 struct devx_event_subscription *event_sub, *event_sub_tmp;
2660 struct devx_async_event_data *entry, *tmp;
2661 struct mlx5_ib_dev *dev = ev_file->dev;
2663 mutex_lock(&dev->devx_event_table.event_xa_lock);
2664 /* delete the subscriptions which are related to this FD */
2665 list_for_each_entry_safe(event_sub, event_sub_tmp,
2666 &ev_file->subscribed_events_list, file_list) {
2667 devx_cleanup_subscription(dev, event_sub);
2668 if (event_sub->eventfd)
2669 eventfd_ctx_put(event_sub->eventfd);
2671 list_del_rcu(&event_sub->file_list);
2672 /* subscription may not be used by the read API any more */
2673 kfree_rcu(event_sub, rcu);
2676 mutex_unlock(&dev->devx_event_table.event_xa_lock);
2678 /* free the pending events allocation */
2679 if (!ev_file->omit_data) {
2680 spin_lock_irq(&ev_file->lock);
2681 list_for_each_entry_safe(entry, tmp,
2682 &ev_file->event_list, list)
2683 kfree(entry); /* read can't come any more */
2684 spin_unlock_irq(&ev_file->lock);
2687 uverbs_close_fd(filp);
2688 put_device(&dev->ib_dev.dev);
2692 static const struct file_operations devx_async_event_fops = {
2693 .owner = THIS_MODULE,
2694 .read = devx_async_event_read,
2695 .poll = devx_async_event_poll,
2696 .release = devx_async_event_close,
2697 .llseek = no_llseek,
2700 static int devx_hot_unplug_async_cmd_event_file(struct ib_uobject *uobj,
2701 enum rdma_remove_reason why)
2703 struct devx_async_cmd_event_file *comp_ev_file =
2704 container_of(uobj, struct devx_async_cmd_event_file,
2706 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2708 spin_lock_irq(&ev_queue->lock);
2709 ev_queue->is_destroyed = 1;
2710 spin_unlock_irq(&ev_queue->lock);
2712 if (why == RDMA_REMOVE_DRIVER_REMOVE)
2713 wake_up_interruptible(&ev_queue->poll_wait);
2715 mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
2719 static int devx_hot_unplug_async_event_file(struct ib_uobject *uobj,
2720 enum rdma_remove_reason why)
2722 struct devx_async_event_file *ev_file =
2723 container_of(uobj, struct devx_async_event_file,
2726 spin_lock_irq(&ev_file->lock);
2727 ev_file->is_destroyed = 1;
2728 spin_unlock_irq(&ev_file->lock);
2730 wake_up_interruptible(&ev_file->poll_wait);
2734 DECLARE_UVERBS_NAMED_METHOD(
2735 MLX5_IB_METHOD_DEVX_UMEM_REG,
2736 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
2737 MLX5_IB_OBJECT_DEVX_UMEM,
2740 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
2741 UVERBS_ATTR_TYPE(u64),
2743 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
2744 UVERBS_ATTR_TYPE(u64),
2746 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2747 enum ib_access_flags),
2748 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
2749 UVERBS_ATTR_TYPE(u32),
2752 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2753 MLX5_IB_METHOD_DEVX_UMEM_DEREG,
2754 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
2755 MLX5_IB_OBJECT_DEVX_UMEM,
2756 UVERBS_ACCESS_DESTROY,
2759 DECLARE_UVERBS_NAMED_METHOD(
2760 MLX5_IB_METHOD_DEVX_QUERY_EQN,
2761 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
2762 UVERBS_ATTR_TYPE(u32),
2764 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
2765 UVERBS_ATTR_TYPE(u32),
2768 DECLARE_UVERBS_NAMED_METHOD(
2769 MLX5_IB_METHOD_DEVX_QUERY_UAR,
2770 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
2771 UVERBS_ATTR_TYPE(u32),
2773 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
2774 UVERBS_ATTR_TYPE(u32),
2777 DECLARE_UVERBS_NAMED_METHOD(
2778 MLX5_IB_METHOD_DEVX_OTHER,
2780 MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
2781 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2784 UVERBS_ATTR_PTR_OUT(
2785 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
2786 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2789 DECLARE_UVERBS_NAMED_METHOD(
2790 MLX5_IB_METHOD_DEVX_OBJ_CREATE,
2791 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
2792 MLX5_IB_OBJECT_DEVX_OBJ,
2796 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
2797 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2800 UVERBS_ATTR_PTR_OUT(
2801 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
2802 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2805 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2806 MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
2807 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
2808 MLX5_IB_OBJECT_DEVX_OBJ,
2809 UVERBS_ACCESS_DESTROY,
2812 DECLARE_UVERBS_NAMED_METHOD(
2813 MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
2814 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
2815 UVERBS_IDR_ANY_OBJECT,
2816 UVERBS_ACCESS_WRITE,
2819 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
2820 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2823 UVERBS_ATTR_PTR_OUT(
2824 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
2825 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2828 DECLARE_UVERBS_NAMED_METHOD(
2829 MLX5_IB_METHOD_DEVX_OBJ_QUERY,
2830 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2831 UVERBS_IDR_ANY_OBJECT,
2835 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2836 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2839 UVERBS_ATTR_PTR_OUT(
2840 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
2841 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2844 DECLARE_UVERBS_NAMED_METHOD(
2845 MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
2846 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2847 UVERBS_IDR_ANY_OBJECT,
2851 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2852 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2855 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
2857 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
2858 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2861 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
2862 UVERBS_ATTR_TYPE(u64),
2865 DECLARE_UVERBS_NAMED_METHOD(
2866 MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT,
2867 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE,
2868 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2871 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE,
2872 MLX5_IB_OBJECT_DEVX_OBJ,
2875 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
2876 UVERBS_ATTR_MIN_SIZE(sizeof(u16)),
2879 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE,
2880 UVERBS_ATTR_TYPE(u64),
2882 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM,
2883 UVERBS_ATTR_TYPE(u32),
2886 DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
2887 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
2888 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
2889 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN),
2890 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT));
2892 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
2893 UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
2894 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
2895 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
2896 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
2897 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
2898 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
2900 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
2901 UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
2902 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
2903 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
2906 DECLARE_UVERBS_NAMED_METHOD(
2907 MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC,
2908 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE,
2909 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2913 DECLARE_UVERBS_NAMED_OBJECT(
2914 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2915 UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
2916 devx_hot_unplug_async_cmd_event_file,
2917 &devx_async_cmd_event_fops, "[devx_async_cmd]",
2919 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
2921 DECLARE_UVERBS_NAMED_METHOD(
2922 MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC,
2923 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE,
2924 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2927 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
2928 enum mlx5_ib_uapi_devx_create_event_channel_flags,
2931 DECLARE_UVERBS_NAMED_OBJECT(
2932 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2933 UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file),
2934 devx_hot_unplug_async_event_file,
2935 &devx_async_event_fops, "[devx_async_event]",
2937 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC));
2939 static bool devx_is_supported(struct ib_device *device)
2941 struct mlx5_ib_dev *dev = to_mdev(device);
2943 return MLX5_CAP_GEN(dev->mdev, log_max_uctx);
2946 const struct uapi_definition mlx5_ib_devx_defs[] = {
2947 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2948 MLX5_IB_OBJECT_DEVX,
2949 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2950 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2951 MLX5_IB_OBJECT_DEVX_OBJ,
2952 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2953 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2954 MLX5_IB_OBJECT_DEVX_UMEM,
2955 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2956 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2957 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2958 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2959 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2960 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2961 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),