2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
6 * Copyright (c) 2014 Intel Corporation. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40 #include <linux/dma-mapping.h>
41 #include <linux/idr.h>
42 #include <linux/slab.h>
43 #include <linux/module.h>
44 #include <linux/security.h>
45 #include <rdma/ib_cache.h>
48 #include "core_priv.h"
54 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
55 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
57 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
58 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
59 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
60 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
63 * The mlx4 driver uses the top byte to distinguish which virtual function
64 * generated the MAD, so we must avoid using it.
66 #define AGENT_ID_LIMIT (1 << 24)
67 static DEFINE_IDR(ib_mad_clients);
68 static struct list_head ib_mad_port_list;
71 static DEFINE_SPINLOCK(ib_mad_port_list_lock);
73 /* Forward declarations */
74 static int method_in_use(struct ib_mad_mgmt_method_table **method,
75 struct ib_mad_reg_req *mad_reg_req);
76 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
77 static struct ib_mad_agent_private *find_mad_agent(
78 struct ib_mad_port_private *port_priv,
79 const struct ib_mad_hdr *mad);
80 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
81 struct ib_mad_private *mad);
82 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
83 static void timeout_sends(struct work_struct *work);
84 static void local_completions(struct work_struct *work);
85 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
86 struct ib_mad_agent_private *agent_priv,
88 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
89 struct ib_mad_agent_private *agent_priv);
90 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
92 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
95 * Returns a ib_mad_port_private structure or NULL for a device/port
96 * Assumes ib_mad_port_list_lock is being held
98 static inline struct ib_mad_port_private *
99 __ib_get_mad_port(struct ib_device *device, int port_num)
101 struct ib_mad_port_private *entry;
103 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
104 if (entry->device == device && entry->port_num == port_num)
111 * Wrapper function to return a ib_mad_port_private structure or NULL
114 static inline struct ib_mad_port_private *
115 ib_get_mad_port(struct ib_device *device, int port_num)
117 struct ib_mad_port_private *entry;
120 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
121 entry = __ib_get_mad_port(device, port_num);
122 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
127 static inline u8 convert_mgmt_class(u8 mgmt_class)
129 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
130 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
134 static int get_spl_qp_index(enum ib_qp_type qp_type)
147 static int vendor_class_index(u8 mgmt_class)
149 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
152 static int is_vendor_class(u8 mgmt_class)
154 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
155 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
160 static int is_vendor_oui(char *oui)
162 if (oui[0] || oui[1] || oui[2])
167 static int is_vendor_method_in_use(
168 struct ib_mad_mgmt_vendor_class *vendor_class,
169 struct ib_mad_reg_req *mad_reg_req)
171 struct ib_mad_mgmt_method_table *method;
174 for (i = 0; i < MAX_MGMT_OUI; i++) {
175 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
176 method = vendor_class->method_table[i];
178 if (method_in_use(&method, mad_reg_req))
188 int ib_response_mad(const struct ib_mad_hdr *hdr)
190 return ((hdr->method & IB_MGMT_METHOD_RESP) ||
191 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
192 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
193 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
195 EXPORT_SYMBOL(ib_response_mad);
198 * ib_register_mad_agent - Register to send/receive MADs
200 * Context: Process context.
202 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
204 enum ib_qp_type qp_type,
205 struct ib_mad_reg_req *mad_reg_req,
207 ib_mad_send_handler send_handler,
208 ib_mad_recv_handler recv_handler,
210 u32 registration_flags)
212 struct ib_mad_port_private *port_priv;
213 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
214 struct ib_mad_agent_private *mad_agent_priv;
215 struct ib_mad_reg_req *reg_req = NULL;
216 struct ib_mad_mgmt_class_table *class;
217 struct ib_mad_mgmt_vendor_class_table *vendor;
218 struct ib_mad_mgmt_vendor_class *vendor_class;
219 struct ib_mad_mgmt_method_table *method;
221 u8 mgmt_class, vclass;
223 if ((qp_type == IB_QPT_SMI && !rdma_cap_ib_smi(device, port_num)) ||
224 (qp_type == IB_QPT_GSI && !rdma_cap_ib_cm(device, port_num)))
225 return ERR_PTR(-EPROTONOSUPPORT);
227 /* Validate parameters */
228 qpn = get_spl_qp_index(qp_type);
230 dev_dbg_ratelimited(&device->dev, "%s: invalid QP Type %d\n",
235 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
236 dev_dbg_ratelimited(&device->dev,
237 "%s: invalid RMPP Version %u\n",
238 __func__, rmpp_version);
242 /* Validate MAD registration request if supplied */
244 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
245 dev_dbg_ratelimited(&device->dev,
246 "%s: invalid Class Version %u\n",
248 mad_reg_req->mgmt_class_version);
252 dev_dbg_ratelimited(&device->dev,
253 "%s: no recv_handler\n", __func__);
256 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
258 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
259 * one in this range currently allowed
261 if (mad_reg_req->mgmt_class !=
262 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
263 dev_dbg_ratelimited(&device->dev,
264 "%s: Invalid Mgmt Class 0x%x\n",
265 __func__, mad_reg_req->mgmt_class);
268 } else if (mad_reg_req->mgmt_class == 0) {
270 * Class 0 is reserved in IBA and is used for
271 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
273 dev_dbg_ratelimited(&device->dev,
274 "%s: Invalid Mgmt Class 0\n",
277 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
279 * If class is in "new" vendor range,
280 * ensure supplied OUI is not zero
282 if (!is_vendor_oui(mad_reg_req->oui)) {
283 dev_dbg_ratelimited(&device->dev,
284 "%s: No OUI specified for class 0x%x\n",
286 mad_reg_req->mgmt_class);
290 /* Make sure class supplied is consistent with RMPP */
291 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
293 dev_dbg_ratelimited(&device->dev,
294 "%s: RMPP version for non-RMPP class 0x%x\n",
295 __func__, mad_reg_req->mgmt_class);
300 /* Make sure class supplied is consistent with QP type */
301 if (qp_type == IB_QPT_SMI) {
302 if ((mad_reg_req->mgmt_class !=
303 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
304 (mad_reg_req->mgmt_class !=
305 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
306 dev_dbg_ratelimited(&device->dev,
307 "%s: Invalid SM QP type: class 0x%x\n",
308 __func__, mad_reg_req->mgmt_class);
312 if ((mad_reg_req->mgmt_class ==
313 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
314 (mad_reg_req->mgmt_class ==
315 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
316 dev_dbg_ratelimited(&device->dev,
317 "%s: Invalid GS QP type: class 0x%x\n",
318 __func__, mad_reg_req->mgmt_class);
323 /* No registration request supplied */
326 if (registration_flags & IB_MAD_USER_RMPP)
330 /* Validate device and port */
331 port_priv = ib_get_mad_port(device, port_num);
333 dev_dbg_ratelimited(&device->dev, "%s: Invalid port %d\n",
335 ret = ERR_PTR(-ENODEV);
339 /* Verify the QP requested is supported. For example, Ethernet devices
342 if (!port_priv->qp_info[qpn].qp) {
343 dev_dbg_ratelimited(&device->dev, "%s: QP %d not supported\n",
345 ret = ERR_PTR(-EPROTONOSUPPORT);
349 /* Allocate structures */
350 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
351 if (!mad_agent_priv) {
352 ret = ERR_PTR(-ENOMEM);
357 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
359 ret = ERR_PTR(-ENOMEM);
364 /* Now, fill in the various structures */
365 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
366 mad_agent_priv->reg_req = reg_req;
367 mad_agent_priv->agent.rmpp_version = rmpp_version;
368 mad_agent_priv->agent.device = device;
369 mad_agent_priv->agent.recv_handler = recv_handler;
370 mad_agent_priv->agent.send_handler = send_handler;
371 mad_agent_priv->agent.context = context;
372 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
373 mad_agent_priv->agent.port_num = port_num;
374 mad_agent_priv->agent.flags = registration_flags;
375 spin_lock_init(&mad_agent_priv->lock);
376 INIT_LIST_HEAD(&mad_agent_priv->send_list);
377 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
378 INIT_LIST_HEAD(&mad_agent_priv->done_list);
379 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
380 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
381 INIT_LIST_HEAD(&mad_agent_priv->local_list);
382 INIT_WORK(&mad_agent_priv->local_work, local_completions);
383 atomic_set(&mad_agent_priv->refcount, 1);
384 init_completion(&mad_agent_priv->comp);
386 ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type);
392 idr_preload(GFP_KERNEL);
393 idr_lock(&ib_mad_clients);
394 ret2 = idr_alloc_cyclic(&ib_mad_clients, mad_agent_priv, 0,
395 AGENT_ID_LIMIT, GFP_ATOMIC);
396 idr_unlock(&ib_mad_clients);
403 mad_agent_priv->agent.hi_tid = ret2;
406 * Make sure MAD registration (if supplied)
407 * is non overlapping with any existing ones
409 spin_lock_irq(&port_priv->reg_lock);
411 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
412 if (!is_vendor_class(mgmt_class)) {
413 class = port_priv->version[mad_reg_req->
414 mgmt_class_version].class;
416 method = class->method_table[mgmt_class];
418 if (method_in_use(&method,
423 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
426 /* "New" vendor class range */
427 vendor = port_priv->version[mad_reg_req->
428 mgmt_class_version].vendor;
430 vclass = vendor_class_index(mgmt_class);
431 vendor_class = vendor->vendor_class[vclass];
433 if (is_vendor_method_in_use(
439 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
446 spin_unlock_irq(&port_priv->reg_lock);
448 return &mad_agent_priv->agent;
450 spin_unlock_irq(&port_priv->reg_lock);
451 idr_lock(&ib_mad_clients);
452 idr_remove(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
453 idr_unlock(&ib_mad_clients);
455 ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
459 kfree(mad_agent_priv);
463 EXPORT_SYMBOL(ib_register_mad_agent);
465 static inline int is_snooping_sends(int mad_snoop_flags)
467 return (mad_snoop_flags &
468 (/*IB_MAD_SNOOP_POSTED_SENDS |
469 IB_MAD_SNOOP_RMPP_SENDS |*/
470 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
471 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
474 static inline int is_snooping_recvs(int mad_snoop_flags)
476 return (mad_snoop_flags &
477 (IB_MAD_SNOOP_RECVS /*|
478 IB_MAD_SNOOP_RMPP_RECVS*/));
481 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
482 struct ib_mad_snoop_private *mad_snoop_priv)
484 struct ib_mad_snoop_private **new_snoop_table;
488 spin_lock_irqsave(&qp_info->snoop_lock, flags);
489 /* Check for empty slot in array. */
490 for (i = 0; i < qp_info->snoop_table_size; i++)
491 if (!qp_info->snoop_table[i])
494 if (i == qp_info->snoop_table_size) {
496 new_snoop_table = krealloc(qp_info->snoop_table,
497 sizeof mad_snoop_priv *
498 (qp_info->snoop_table_size + 1),
500 if (!new_snoop_table) {
505 qp_info->snoop_table = new_snoop_table;
506 qp_info->snoop_table_size++;
508 qp_info->snoop_table[i] = mad_snoop_priv;
509 atomic_inc(&qp_info->snoop_count);
511 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
515 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
517 enum ib_qp_type qp_type,
519 ib_mad_snoop_handler snoop_handler,
520 ib_mad_recv_handler recv_handler,
523 struct ib_mad_port_private *port_priv;
524 struct ib_mad_agent *ret;
525 struct ib_mad_snoop_private *mad_snoop_priv;
529 /* Validate parameters */
530 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
531 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
532 ret = ERR_PTR(-EINVAL);
535 qpn = get_spl_qp_index(qp_type);
537 ret = ERR_PTR(-EINVAL);
540 port_priv = ib_get_mad_port(device, port_num);
542 ret = ERR_PTR(-ENODEV);
545 /* Allocate structures */
546 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
547 if (!mad_snoop_priv) {
548 ret = ERR_PTR(-ENOMEM);
552 /* Now, fill in the various structures */
553 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
554 mad_snoop_priv->agent.device = device;
555 mad_snoop_priv->agent.recv_handler = recv_handler;
556 mad_snoop_priv->agent.snoop_handler = snoop_handler;
557 mad_snoop_priv->agent.context = context;
558 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
559 mad_snoop_priv->agent.port_num = port_num;
560 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
561 init_completion(&mad_snoop_priv->comp);
563 err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type);
569 mad_snoop_priv->snoop_index = register_snoop_agent(
570 &port_priv->qp_info[qpn],
572 if (mad_snoop_priv->snoop_index < 0) {
573 ret = ERR_PTR(mad_snoop_priv->snoop_index);
577 atomic_set(&mad_snoop_priv->refcount, 1);
578 return &mad_snoop_priv->agent;
580 ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
582 kfree(mad_snoop_priv);
586 EXPORT_SYMBOL(ib_register_mad_snoop);
588 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
590 if (atomic_dec_and_test(&mad_agent_priv->refcount))
591 complete(&mad_agent_priv->comp);
594 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
596 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
597 complete(&mad_snoop_priv->comp);
600 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
602 struct ib_mad_port_private *port_priv;
604 /* Note that we could still be handling received MADs */
607 * Canceling all sends results in dropping received response
608 * MADs, preventing us from queuing additional work
610 cancel_mads(mad_agent_priv);
611 port_priv = mad_agent_priv->qp_info->port_priv;
612 cancel_delayed_work(&mad_agent_priv->timed_work);
614 spin_lock_irq(&port_priv->reg_lock);
615 remove_mad_reg_req(mad_agent_priv);
616 spin_unlock_irq(&port_priv->reg_lock);
617 idr_lock(&ib_mad_clients);
618 idr_remove(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
619 idr_unlock(&ib_mad_clients);
621 flush_workqueue(port_priv->wq);
622 ib_cancel_rmpp_recvs(mad_agent_priv);
624 deref_mad_agent(mad_agent_priv);
625 wait_for_completion(&mad_agent_priv->comp);
627 ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
629 kfree(mad_agent_priv->reg_req);
630 kfree_rcu(mad_agent_priv, rcu);
633 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
635 struct ib_mad_qp_info *qp_info;
638 qp_info = mad_snoop_priv->qp_info;
639 spin_lock_irqsave(&qp_info->snoop_lock, flags);
640 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
641 atomic_dec(&qp_info->snoop_count);
642 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
644 deref_snoop_agent(mad_snoop_priv);
645 wait_for_completion(&mad_snoop_priv->comp);
647 ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
649 kfree(mad_snoop_priv);
653 * ib_unregister_mad_agent - Unregisters a client from using MAD services
655 * Context: Process context.
657 void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
659 struct ib_mad_agent_private *mad_agent_priv;
660 struct ib_mad_snoop_private *mad_snoop_priv;
662 /* If the TID is zero, the agent can only snoop. */
663 if (mad_agent->hi_tid) {
664 mad_agent_priv = container_of(mad_agent,
665 struct ib_mad_agent_private,
667 unregister_mad_agent(mad_agent_priv);
669 mad_snoop_priv = container_of(mad_agent,
670 struct ib_mad_snoop_private,
672 unregister_mad_snoop(mad_snoop_priv);
675 EXPORT_SYMBOL(ib_unregister_mad_agent);
677 static void dequeue_mad(struct ib_mad_list_head *mad_list)
679 struct ib_mad_queue *mad_queue;
682 mad_queue = mad_list->mad_queue;
683 spin_lock_irqsave(&mad_queue->lock, flags);
684 list_del(&mad_list->list);
686 spin_unlock_irqrestore(&mad_queue->lock, flags);
689 static void snoop_send(struct ib_mad_qp_info *qp_info,
690 struct ib_mad_send_buf *send_buf,
691 struct ib_mad_send_wc *mad_send_wc,
694 struct ib_mad_snoop_private *mad_snoop_priv;
698 spin_lock_irqsave(&qp_info->snoop_lock, flags);
699 for (i = 0; i < qp_info->snoop_table_size; i++) {
700 mad_snoop_priv = qp_info->snoop_table[i];
701 if (!mad_snoop_priv ||
702 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
705 atomic_inc(&mad_snoop_priv->refcount);
706 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
707 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
708 send_buf, mad_send_wc);
709 deref_snoop_agent(mad_snoop_priv);
710 spin_lock_irqsave(&qp_info->snoop_lock, flags);
712 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
715 static void snoop_recv(struct ib_mad_qp_info *qp_info,
716 struct ib_mad_recv_wc *mad_recv_wc,
719 struct ib_mad_snoop_private *mad_snoop_priv;
723 spin_lock_irqsave(&qp_info->snoop_lock, flags);
724 for (i = 0; i < qp_info->snoop_table_size; i++) {
725 mad_snoop_priv = qp_info->snoop_table[i];
726 if (!mad_snoop_priv ||
727 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
730 atomic_inc(&mad_snoop_priv->refcount);
731 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
732 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
734 deref_snoop_agent(mad_snoop_priv);
735 spin_lock_irqsave(&qp_info->snoop_lock, flags);
737 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
740 static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
741 u16 pkey_index, u8 port_num, struct ib_wc *wc)
743 memset(wc, 0, sizeof *wc);
745 wc->status = IB_WC_SUCCESS;
746 wc->opcode = IB_WC_RECV;
747 wc->pkey_index = pkey_index;
748 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
753 wc->dlid_path_bits = 0;
754 wc->port_num = port_num;
757 static size_t mad_priv_size(const struct ib_mad_private *mp)
759 return sizeof(struct ib_mad_private) + mp->mad_size;
762 static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
764 size_t size = sizeof(struct ib_mad_private) + mad_size;
765 struct ib_mad_private *ret = kzalloc(size, flags);
768 ret->mad_size = mad_size;
773 static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
775 return rdma_max_mad_size(port_priv->device, port_priv->port_num);
778 static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
780 return sizeof(struct ib_grh) + mp->mad_size;
784 * Return 0 if SMP is to be sent
785 * Return 1 if SMP was consumed locally (whether or not solicited)
786 * Return < 0 if error
788 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
789 struct ib_mad_send_wr_private *mad_send_wr)
792 struct ib_smp *smp = mad_send_wr->send_buf.mad;
793 struct opa_smp *opa_smp = (struct opa_smp *)smp;
795 struct ib_mad_local_private *local;
796 struct ib_mad_private *mad_priv;
797 struct ib_mad_port_private *port_priv;
798 struct ib_mad_agent_private *recv_mad_agent = NULL;
799 struct ib_device *device = mad_agent_priv->agent.device;
802 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
803 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
804 u16 out_mad_pkey_index = 0;
806 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
807 mad_agent_priv->qp_info->port_priv->port_num);
809 if (rdma_cap_ib_switch(device) &&
810 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
811 port_num = send_wr->port_num;
813 port_num = mad_agent_priv->agent.port_num;
816 * Directed route handling starts if the initial LID routed part of
817 * a request or the ending LID routed part of a response is empty.
818 * If we are at the start of the LID routed part, don't update the
819 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
821 if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
824 if ((opa_get_smp_direction(opa_smp)
825 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
826 OPA_LID_PERMISSIVE &&
827 opa_smi_handle_dr_smp_send(opa_smp,
828 rdma_cap_ib_switch(device),
829 port_num) == IB_SMI_DISCARD) {
831 dev_err(&device->dev, "OPA Invalid directed route\n");
834 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
835 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
836 opa_drslid & 0xffff0000) {
838 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
842 drslid = (u16)(opa_drslid & 0x0000ffff);
844 /* Check to post send on QP or process locally */
845 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
846 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
849 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
851 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
854 dev_err(&device->dev, "Invalid directed route\n");
857 drslid = be16_to_cpu(smp->dr_slid);
859 /* Check to post send on QP or process locally */
860 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
861 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
865 local = kmalloc(sizeof *local, GFP_ATOMIC);
870 local->mad_priv = NULL;
871 local->recv_mad_agent = NULL;
872 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
879 build_smp_wc(mad_agent_priv->agent.qp,
880 send_wr->wr.wr_cqe, drslid,
882 send_wr->port_num, &mad_wc);
884 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
885 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
886 + mad_send_wr->send_buf.data_len
887 + sizeof(struct ib_grh);
890 /* No GRH for DR SMP */
891 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
892 (const struct ib_mad_hdr *)smp, mad_size,
893 (struct ib_mad_hdr *)mad_priv->mad,
894 &mad_size, &out_mad_pkey_index);
897 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
898 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
899 mad_agent_priv->agent.recv_handler) {
900 local->mad_priv = mad_priv;
901 local->recv_mad_agent = mad_agent_priv;
903 * Reference MAD agent until receive
904 * side of local completion handled
906 atomic_inc(&mad_agent_priv->refcount);
910 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
913 case IB_MAD_RESULT_SUCCESS:
914 /* Treat like an incoming receive MAD */
915 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
916 mad_agent_priv->agent.port_num);
918 memcpy(mad_priv->mad, smp, mad_priv->mad_size);
919 recv_mad_agent = find_mad_agent(port_priv,
920 (const struct ib_mad_hdr *)mad_priv->mad);
922 if (!port_priv || !recv_mad_agent) {
924 * No receiving agent so drop packet and
925 * generate send completion.
930 local->mad_priv = mad_priv;
931 local->recv_mad_agent = recv_mad_agent;
940 local->mad_send_wr = mad_send_wr;
942 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
943 local->return_wc_byte_len = mad_size;
945 /* Reference MAD agent until send side of local completion handled */
946 atomic_inc(&mad_agent_priv->refcount);
947 /* Queue local completion to local list */
948 spin_lock_irqsave(&mad_agent_priv->lock, flags);
949 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
950 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
951 queue_work(mad_agent_priv->qp_info->port_priv->wq,
952 &mad_agent_priv->local_work);
958 static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
962 seg_size = mad_size - hdr_len;
963 if (data_len && seg_size) {
964 pad = seg_size - data_len % seg_size;
965 return pad == seg_size ? 0 : pad;
970 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
972 struct ib_rmpp_segment *s, *t;
974 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
980 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
981 size_t mad_size, gfp_t gfp_mask)
983 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
984 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
985 struct ib_rmpp_segment *seg = NULL;
986 int left, seg_size, pad;
988 send_buf->seg_size = mad_size - send_buf->hdr_len;
989 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
990 seg_size = send_buf->seg_size;
993 /* Allocate data segments. */
994 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
995 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
997 free_send_rmpp_list(send_wr);
1000 seg->num = ++send_buf->seg_count;
1001 list_add_tail(&seg->list, &send_wr->rmpp_list);
1004 /* Zero any padding */
1006 memset(seg->data + seg_size - pad, 0, pad);
1008 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
1010 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
1011 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
1013 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
1014 struct ib_rmpp_segment, list);
1015 send_wr->last_ack_seg = send_wr->cur_seg;
1019 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
1021 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
1023 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
1025 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
1026 u32 remote_qpn, u16 pkey_index,
1028 int hdr_len, int data_len,
1032 struct ib_mad_agent_private *mad_agent_priv;
1033 struct ib_mad_send_wr_private *mad_send_wr;
1034 int pad, message_size, ret, size;
1039 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
1042 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
1044 if (opa && base_version == OPA_MGMT_BASE_VERSION)
1045 mad_size = sizeof(struct opa_mad);
1047 mad_size = sizeof(struct ib_mad);
1049 pad = get_pad_size(hdr_len, data_len, mad_size);
1050 message_size = hdr_len + data_len + pad;
1052 if (ib_mad_kernel_rmpp_agent(mad_agent)) {
1053 if (!rmpp_active && message_size > mad_size)
1054 return ERR_PTR(-EINVAL);
1056 if (rmpp_active || message_size > mad_size)
1057 return ERR_PTR(-EINVAL);
1059 size = rmpp_active ? hdr_len : mad_size;
1060 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
1062 return ERR_PTR(-ENOMEM);
1064 mad_send_wr = buf + size;
1065 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
1066 mad_send_wr->send_buf.mad = buf;
1067 mad_send_wr->send_buf.hdr_len = hdr_len;
1068 mad_send_wr->send_buf.data_len = data_len;
1069 mad_send_wr->pad = pad;
1071 mad_send_wr->mad_agent_priv = mad_agent_priv;
1072 mad_send_wr->sg_list[0].length = hdr_len;
1073 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
1075 /* OPA MADs don't have to be the full 2048 bytes */
1076 if (opa && base_version == OPA_MGMT_BASE_VERSION &&
1077 data_len < mad_size - hdr_len)
1078 mad_send_wr->sg_list[1].length = data_len;
1080 mad_send_wr->sg_list[1].length = mad_size - hdr_len;
1082 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
1084 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1086 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1087 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
1088 mad_send_wr->send_wr.wr.num_sge = 2;
1089 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
1090 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
1091 mad_send_wr->send_wr.remote_qpn = remote_qpn;
1092 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
1093 mad_send_wr->send_wr.pkey_index = pkey_index;
1096 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
1099 return ERR_PTR(ret);
1103 mad_send_wr->send_buf.mad_agent = mad_agent;
1104 atomic_inc(&mad_agent_priv->refcount);
1105 return &mad_send_wr->send_buf;
1107 EXPORT_SYMBOL(ib_create_send_mad);
1109 int ib_get_mad_data_offset(u8 mgmt_class)
1111 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1112 return IB_MGMT_SA_HDR;
1113 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1114 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1115 (mgmt_class == IB_MGMT_CLASS_BIS))
1116 return IB_MGMT_DEVICE_HDR;
1117 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1118 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1119 return IB_MGMT_VENDOR_HDR;
1121 return IB_MGMT_MAD_HDR;
1123 EXPORT_SYMBOL(ib_get_mad_data_offset);
1125 int ib_is_mad_class_rmpp(u8 mgmt_class)
1127 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1128 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1129 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1130 (mgmt_class == IB_MGMT_CLASS_BIS) ||
1131 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1132 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1136 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1138 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1140 struct ib_mad_send_wr_private *mad_send_wr;
1141 struct list_head *list;
1143 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1145 list = &mad_send_wr->cur_seg->list;
1147 if (mad_send_wr->cur_seg->num < seg_num) {
1148 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1149 if (mad_send_wr->cur_seg->num == seg_num)
1151 } else if (mad_send_wr->cur_seg->num > seg_num) {
1152 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1153 if (mad_send_wr->cur_seg->num == seg_num)
1156 return mad_send_wr->cur_seg->data;
1158 EXPORT_SYMBOL(ib_get_rmpp_segment);
1160 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1162 if (mad_send_wr->send_buf.seg_count)
1163 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1164 mad_send_wr->seg_num);
1166 return mad_send_wr->send_buf.mad +
1167 mad_send_wr->send_buf.hdr_len;
1170 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1172 struct ib_mad_agent_private *mad_agent_priv;
1173 struct ib_mad_send_wr_private *mad_send_wr;
1175 mad_agent_priv = container_of(send_buf->mad_agent,
1176 struct ib_mad_agent_private, agent);
1177 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1180 free_send_rmpp_list(mad_send_wr);
1181 kfree(send_buf->mad);
1182 deref_mad_agent(mad_agent_priv);
1184 EXPORT_SYMBOL(ib_free_send_mad);
1186 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1188 struct ib_mad_qp_info *qp_info;
1189 struct list_head *list;
1190 struct ib_mad_agent *mad_agent;
1192 unsigned long flags;
1195 /* Set WR ID to find mad_send_wr upon completion */
1196 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1197 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1198 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1199 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1201 mad_agent = mad_send_wr->send_buf.mad_agent;
1202 sge = mad_send_wr->sg_list;
1203 sge[0].addr = ib_dma_map_single(mad_agent->device,
1204 mad_send_wr->send_buf.mad,
1207 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1210 mad_send_wr->header_mapping = sge[0].addr;
1212 sge[1].addr = ib_dma_map_single(mad_agent->device,
1213 ib_get_payload(mad_send_wr),
1216 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1217 ib_dma_unmap_single(mad_agent->device,
1218 mad_send_wr->header_mapping,
1219 sge[0].length, DMA_TO_DEVICE);
1222 mad_send_wr->payload_mapping = sge[1].addr;
1224 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1225 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1226 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
1228 list = &qp_info->send_queue.list;
1231 list = &qp_info->overflow_list;
1235 qp_info->send_queue.count++;
1236 list_add_tail(&mad_send_wr->mad_list.list, list);
1238 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1240 ib_dma_unmap_single(mad_agent->device,
1241 mad_send_wr->header_mapping,
1242 sge[0].length, DMA_TO_DEVICE);
1243 ib_dma_unmap_single(mad_agent->device,
1244 mad_send_wr->payload_mapping,
1245 sge[1].length, DMA_TO_DEVICE);
1251 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1252 * with the registered client
1254 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1255 struct ib_mad_send_buf **bad_send_buf)
1257 struct ib_mad_agent_private *mad_agent_priv;
1258 struct ib_mad_send_buf *next_send_buf;
1259 struct ib_mad_send_wr_private *mad_send_wr;
1260 unsigned long flags;
1263 /* Walk list of send WRs and post each on send list */
1264 for (; send_buf; send_buf = next_send_buf) {
1265 mad_send_wr = container_of(send_buf,
1266 struct ib_mad_send_wr_private,
1268 mad_agent_priv = mad_send_wr->mad_agent_priv;
1270 ret = ib_mad_enforce_security(mad_agent_priv,
1271 mad_send_wr->send_wr.pkey_index);
1275 if (!send_buf->mad_agent->send_handler ||
1276 (send_buf->timeout_ms &&
1277 !send_buf->mad_agent->recv_handler)) {
1282 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1283 if (mad_agent_priv->agent.rmpp_version) {
1290 * Save pointer to next work request to post in case the
1291 * current one completes, and the user modifies the work
1292 * request associated with the completion
1294 next_send_buf = send_buf->next;
1295 mad_send_wr->send_wr.ah = send_buf->ah;
1297 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1298 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1299 ret = handle_outgoing_dr_smp(mad_agent_priv,
1301 if (ret < 0) /* error */
1303 else if (ret == 1) /* locally consumed */
1307 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1308 /* Timeout will be updated after send completes */
1309 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1310 mad_send_wr->max_retries = send_buf->retries;
1311 mad_send_wr->retries_left = send_buf->retries;
1312 send_buf->retries = 0;
1313 /* Reference for work request to QP + response */
1314 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1315 mad_send_wr->status = IB_WC_SUCCESS;
1317 /* Reference MAD agent until send completes */
1318 atomic_inc(&mad_agent_priv->refcount);
1319 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1320 list_add_tail(&mad_send_wr->agent_list,
1321 &mad_agent_priv->send_list);
1322 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1324 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1325 ret = ib_send_rmpp_mad(mad_send_wr);
1326 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1327 ret = ib_send_mad(mad_send_wr);
1329 ret = ib_send_mad(mad_send_wr);
1331 /* Fail send request */
1332 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1333 list_del(&mad_send_wr->agent_list);
1334 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1335 atomic_dec(&mad_agent_priv->refcount);
1342 *bad_send_buf = send_buf;
1345 EXPORT_SYMBOL(ib_post_send_mad);
1348 * ib_free_recv_mad - Returns data buffers used to receive
1349 * a MAD to the access layer
1351 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1353 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1354 struct ib_mad_private_header *mad_priv_hdr;
1355 struct ib_mad_private *priv;
1356 struct list_head free_list;
1358 INIT_LIST_HEAD(&free_list);
1359 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1361 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1363 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1365 mad_priv_hdr = container_of(mad_recv_wc,
1366 struct ib_mad_private_header,
1368 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1373 EXPORT_SYMBOL(ib_free_recv_mad);
1375 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1377 ib_mad_send_handler send_handler,
1378 ib_mad_recv_handler recv_handler,
1381 return ERR_PTR(-EINVAL); /* XXX: for now */
1383 EXPORT_SYMBOL(ib_redirect_mad_qp);
1385 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1388 dev_err(&mad_agent->device->dev,
1389 "ib_process_mad_wc() not implemented yet\n");
1392 EXPORT_SYMBOL(ib_process_mad_wc);
1394 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1395 struct ib_mad_reg_req *mad_reg_req)
1399 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1400 if ((*method)->agent[i]) {
1401 pr_err("Method %d already in use\n", i);
1408 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1410 /* Allocate management method table */
1411 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1412 return (*method) ? 0 : (-ENOMEM);
1416 * Check to see if there are any methods still in use
1418 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1422 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1423 if (method->agent[i])
1429 * Check to see if there are any method tables for this class still in use
1431 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1435 for (i = 0; i < MAX_MGMT_CLASS; i++)
1436 if (class->method_table[i])
1441 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1445 for (i = 0; i < MAX_MGMT_OUI; i++)
1446 if (vendor_class->method_table[i])
1451 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1456 for (i = 0; i < MAX_MGMT_OUI; i++)
1457 /* Is there matching OUI for this vendor class ? */
1458 if (!memcmp(vendor_class->oui[i], oui, 3))
1464 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1468 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1469 if (vendor->vendor_class[i])
1475 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1476 struct ib_mad_agent_private *agent)
1480 /* Remove any methods for this mad agent */
1481 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1482 if (method->agent[i] == agent) {
1483 method->agent[i] = NULL;
1488 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1489 struct ib_mad_agent_private *agent_priv,
1492 struct ib_mad_port_private *port_priv;
1493 struct ib_mad_mgmt_class_table **class;
1494 struct ib_mad_mgmt_method_table **method;
1497 port_priv = agent_priv->qp_info->port_priv;
1498 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1500 /* Allocate management class table for "new" class version */
1501 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1507 /* Allocate method table for this management class */
1508 method = &(*class)->method_table[mgmt_class];
1509 if ((ret = allocate_method_table(method)))
1512 method = &(*class)->method_table[mgmt_class];
1514 /* Allocate method table for this management class */
1515 if ((ret = allocate_method_table(method)))
1520 /* Now, make sure methods are not already in use */
1521 if (method_in_use(method, mad_reg_req))
1524 /* Finally, add in methods being registered */
1525 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1526 (*method)->agent[i] = agent_priv;
1531 /* Remove any methods for this mad agent */
1532 remove_methods_mad_agent(*method, agent_priv);
1533 /* Now, check to see if there are any methods in use */
1534 if (!check_method_table(*method)) {
1535 /* If not, release management method table */
1548 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1549 struct ib_mad_agent_private *agent_priv)
1551 struct ib_mad_port_private *port_priv;
1552 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1553 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1554 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1555 struct ib_mad_mgmt_method_table **method;
1556 int i, ret = -ENOMEM;
1559 /* "New" vendor (with OUI) class */
1560 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1561 port_priv = agent_priv->qp_info->port_priv;
1562 vendor_table = &port_priv->version[
1563 mad_reg_req->mgmt_class_version].vendor;
1564 if (!*vendor_table) {
1565 /* Allocate mgmt vendor class table for "new" class version */
1566 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1570 *vendor_table = vendor;
1572 if (!(*vendor_table)->vendor_class[vclass]) {
1573 /* Allocate table for this management vendor class */
1574 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1578 (*vendor_table)->vendor_class[vclass] = vendor_class;
1580 for (i = 0; i < MAX_MGMT_OUI; i++) {
1581 /* Is there matching OUI for this vendor class ? */
1582 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1583 mad_reg_req->oui, 3)) {
1584 method = &(*vendor_table)->vendor_class[
1585 vclass]->method_table[i];
1591 for (i = 0; i < MAX_MGMT_OUI; i++) {
1592 /* OUI slot available ? */
1593 if (!is_vendor_oui((*vendor_table)->vendor_class[
1595 method = &(*vendor_table)->vendor_class[
1596 vclass]->method_table[i];
1597 /* Allocate method table for this OUI */
1599 ret = allocate_method_table(method);
1603 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1604 mad_reg_req->oui, 3);
1608 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1612 /* Now, make sure methods are not already in use */
1613 if (method_in_use(method, mad_reg_req))
1616 /* Finally, add in methods being registered */
1617 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1618 (*method)->agent[i] = agent_priv;
1623 /* Remove any methods for this mad agent */
1624 remove_methods_mad_agent(*method, agent_priv);
1625 /* Now, check to see if there are any methods in use */
1626 if (!check_method_table(*method)) {
1627 /* If not, release management method table */
1634 (*vendor_table)->vendor_class[vclass] = NULL;
1635 kfree(vendor_class);
1639 *vendor_table = NULL;
1646 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1648 struct ib_mad_port_private *port_priv;
1649 struct ib_mad_mgmt_class_table *class;
1650 struct ib_mad_mgmt_method_table *method;
1651 struct ib_mad_mgmt_vendor_class_table *vendor;
1652 struct ib_mad_mgmt_vendor_class *vendor_class;
1657 * Was MAD registration request supplied
1658 * with original registration ?
1660 if (!agent_priv->reg_req) {
1664 port_priv = agent_priv->qp_info->port_priv;
1665 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1666 class = port_priv->version[
1667 agent_priv->reg_req->mgmt_class_version].class;
1671 method = class->method_table[mgmt_class];
1673 /* Remove any methods for this mad agent */
1674 remove_methods_mad_agent(method, agent_priv);
1675 /* Now, check to see if there are any methods still in use */
1676 if (!check_method_table(method)) {
1677 /* If not, release management method table */
1679 class->method_table[mgmt_class] = NULL;
1680 /* Any management classes left ? */
1681 if (!check_class_table(class)) {
1682 /* If not, release management class table */
1685 agent_priv->reg_req->
1686 mgmt_class_version].class = NULL;
1692 if (!is_vendor_class(mgmt_class))
1695 /* normalize mgmt_class to vendor range 2 */
1696 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1697 vendor = port_priv->version[
1698 agent_priv->reg_req->mgmt_class_version].vendor;
1703 vendor_class = vendor->vendor_class[mgmt_class];
1705 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1708 method = vendor_class->method_table[index];
1710 /* Remove any methods for this mad agent */
1711 remove_methods_mad_agent(method, agent_priv);
1713 * Now, check to see if there are
1714 * any methods still in use
1716 if (!check_method_table(method)) {
1717 /* If not, release management method table */
1719 vendor_class->method_table[index] = NULL;
1720 memset(vendor_class->oui[index], 0, 3);
1721 /* Any OUIs left ? */
1722 if (!check_vendor_class(vendor_class)) {
1723 /* If not, release vendor class table */
1724 kfree(vendor_class);
1725 vendor->vendor_class[mgmt_class] = NULL;
1726 /* Any other vendor classes left ? */
1727 if (!check_vendor_table(vendor)) {
1730 agent_priv->reg_req->
1731 mgmt_class_version].
1743 static struct ib_mad_agent_private *
1744 find_mad_agent(struct ib_mad_port_private *port_priv,
1745 const struct ib_mad_hdr *mad_hdr)
1747 struct ib_mad_agent_private *mad_agent = NULL;
1748 unsigned long flags;
1750 if (ib_response_mad(mad_hdr)) {
1754 * Routing is based on high 32 bits of transaction ID
1757 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
1759 mad_agent = idr_find(&ib_mad_clients, hi_tid);
1760 if (mad_agent && !atomic_inc_not_zero(&mad_agent->refcount))
1764 struct ib_mad_mgmt_class_table *class;
1765 struct ib_mad_mgmt_method_table *method;
1766 struct ib_mad_mgmt_vendor_class_table *vendor;
1767 struct ib_mad_mgmt_vendor_class *vendor_class;
1768 const struct ib_vendor_mad *vendor_mad;
1771 spin_lock_irqsave(&port_priv->reg_lock, flags);
1773 * Routing is based on version, class, and method
1774 * For "newer" vendor MADs, also based on OUI
1776 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1778 if (!is_vendor_class(mad_hdr->mgmt_class)) {
1779 class = port_priv->version[
1780 mad_hdr->class_version].class;
1783 if (convert_mgmt_class(mad_hdr->mgmt_class) >=
1784 ARRAY_SIZE(class->method_table))
1786 method = class->method_table[convert_mgmt_class(
1787 mad_hdr->mgmt_class)];
1789 mad_agent = method->agent[mad_hdr->method &
1790 ~IB_MGMT_METHOD_RESP];
1792 vendor = port_priv->version[
1793 mad_hdr->class_version].vendor;
1796 vendor_class = vendor->vendor_class[vendor_class_index(
1797 mad_hdr->mgmt_class)];
1800 /* Find matching OUI */
1801 vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1802 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1805 method = vendor_class->method_table[index];
1807 mad_agent = method->agent[mad_hdr->method &
1808 ~IB_MGMT_METHOD_RESP];
1812 atomic_inc(&mad_agent->refcount);
1814 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1817 if (mad_agent && !mad_agent->agent.recv_handler) {
1818 dev_notice(&port_priv->device->dev,
1819 "No receive handler for client %p on port %d\n",
1820 &mad_agent->agent, port_priv->port_num);
1821 deref_mad_agent(mad_agent);
1828 static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1829 const struct ib_mad_qp_info *qp_info,
1833 u32 qp_num = qp_info->qp->qp_num;
1835 /* Make sure MAD base version is understood */
1836 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1837 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1838 pr_err("MAD received with unsupported base version %d %s\n",
1839 mad_hdr->base_version, opa ? "(opa)" : "");
1843 /* Filter SMI packets sent to other than QP0 */
1844 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1845 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1849 /* CM attributes other than ClassPortInfo only use Send method */
1850 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1851 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1852 (mad_hdr->method != IB_MGMT_METHOD_SEND))
1854 /* Filter GSI packets sent to QP0 */
1863 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1864 const struct ib_mad_hdr *mad_hdr)
1866 struct ib_rmpp_mad *rmpp_mad;
1868 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1869 return !mad_agent_priv->agent.rmpp_version ||
1870 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1871 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1872 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1873 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1876 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1877 const struct ib_mad_recv_wc *rwc)
1879 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
1880 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1883 static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1884 const struct ib_mad_send_wr_private *wr,
1885 const struct ib_mad_recv_wc *rwc )
1887 struct rdma_ah_attr attr;
1888 u8 send_resp, rcv_resp;
1890 struct ib_device *device = mad_agent_priv->agent.device;
1891 u8 port_num = mad_agent_priv->agent.port_num;
1895 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1896 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
1898 if (send_resp == rcv_resp)
1899 /* both requests, or both responses. GIDs different */
1902 if (rdma_query_ah(wr->send_buf.ah, &attr))
1903 /* Assume not equal, to avoid false positives. */
1906 has_grh = !!(rdma_ah_get_ah_flags(&attr) & IB_AH_GRH);
1907 if (has_grh != !!(rwc->wc->wc_flags & IB_WC_GRH))
1908 /* one has GID, other does not. Assume different */
1911 if (!send_resp && rcv_resp) {
1912 /* is request/response. */
1914 if (ib_get_cached_lmc(device, port_num, &lmc))
1916 return (!lmc || !((rdma_ah_get_path_bits(&attr) ^
1917 rwc->wc->dlid_path_bits) &
1920 const struct ib_global_route *grh =
1921 rdma_ah_read_grh(&attr);
1923 if (rdma_query_gid(device, port_num,
1924 grh->sgid_index, &sgid))
1926 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1932 return rdma_ah_get_dlid(&attr) == rwc->wc->slid;
1934 return !memcmp(rdma_ah_read_grh(&attr)->dgid.raw,
1935 rwc->recv_buf.grh->sgid.raw,
1939 static inline int is_direct(u8 class)
1941 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1944 struct ib_mad_send_wr_private*
1945 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1946 const struct ib_mad_recv_wc *wc)
1948 struct ib_mad_send_wr_private *wr;
1949 const struct ib_mad_hdr *mad_hdr;
1951 mad_hdr = &wc->recv_buf.mad->mad_hdr;
1953 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1954 if ((wr->tid == mad_hdr->tid) &&
1955 rcv_has_same_class(wr, wc) &&
1957 * Don't check GID for direct routed MADs.
1958 * These might have permissive LIDs.
1960 (is_direct(mad_hdr->mgmt_class) ||
1961 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1962 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1966 * It's possible to receive the response before we've
1967 * been notified that the send has completed
1969 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1970 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1971 wr->tid == mad_hdr->tid &&
1973 rcv_has_same_class(wr, wc) &&
1975 * Don't check GID for direct routed MADs.
1976 * These might have permissive LIDs.
1978 (is_direct(mad_hdr->mgmt_class) ||
1979 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1980 /* Verify request has not been canceled */
1981 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1986 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1988 mad_send_wr->timeout = 0;
1989 if (mad_send_wr->refcount == 1)
1990 list_move_tail(&mad_send_wr->agent_list,
1991 &mad_send_wr->mad_agent_priv->done_list);
1994 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1995 struct ib_mad_recv_wc *mad_recv_wc)
1997 struct ib_mad_send_wr_private *mad_send_wr;
1998 struct ib_mad_send_wc mad_send_wc;
1999 unsigned long flags;
2002 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
2003 ret = ib_mad_enforce_security(mad_agent_priv,
2004 mad_recv_wc->wc->pkey_index);
2006 ib_free_recv_mad(mad_recv_wc);
2007 deref_mad_agent(mad_agent_priv);
2011 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
2012 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2013 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
2016 deref_mad_agent(mad_agent_priv);
2021 /* Complete corresponding request */
2022 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
2023 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2024 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
2026 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2027 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
2028 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
2029 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
2030 & IB_MGMT_RMPP_FLAG_ACTIVE)) {
2031 /* user rmpp is in effect
2032 * and this is an active RMPP MAD
2034 mad_agent_priv->agent.recv_handler(
2035 &mad_agent_priv->agent, NULL,
2037 atomic_dec(&mad_agent_priv->refcount);
2039 /* not user rmpp, revert to normal behavior and
2041 ib_free_recv_mad(mad_recv_wc);
2042 deref_mad_agent(mad_agent_priv);
2046 ib_mark_mad_done(mad_send_wr);
2047 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2049 /* Defined behavior is to complete response before request */
2050 mad_agent_priv->agent.recv_handler(
2051 &mad_agent_priv->agent,
2052 &mad_send_wr->send_buf,
2054 atomic_dec(&mad_agent_priv->refcount);
2056 mad_send_wc.status = IB_WC_SUCCESS;
2057 mad_send_wc.vendor_err = 0;
2058 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2059 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2062 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
2064 deref_mad_agent(mad_agent_priv);
2070 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
2071 const struct ib_mad_qp_info *qp_info,
2072 const struct ib_wc *wc,
2074 struct ib_mad_private *recv,
2075 struct ib_mad_private *response)
2077 enum smi_forward_action retsmi;
2078 struct ib_smp *smp = (struct ib_smp *)recv->mad;
2080 if (smi_handle_dr_smp_recv(smp,
2081 rdma_cap_ib_switch(port_priv->device),
2083 port_priv->device->phys_port_cnt) ==
2085 return IB_SMI_DISCARD;
2087 retsmi = smi_check_forward_dr_smp(smp);
2088 if (retsmi == IB_SMI_LOCAL)
2089 return IB_SMI_HANDLE;
2091 if (retsmi == IB_SMI_SEND) { /* don't forward */
2092 if (smi_handle_dr_smp_send(smp,
2093 rdma_cap_ib_switch(port_priv->device),
2094 port_num) == IB_SMI_DISCARD)
2095 return IB_SMI_DISCARD;
2097 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
2098 return IB_SMI_DISCARD;
2099 } else if (rdma_cap_ib_switch(port_priv->device)) {
2100 /* forward case for switches */
2101 memcpy(response, recv, mad_priv_size(response));
2102 response->header.recv_wc.wc = &response->header.wc;
2103 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2104 response->header.recv_wc.recv_buf.grh = &response->grh;
2106 agent_send_response((const struct ib_mad_hdr *)response->mad,
2109 smi_get_fwd_port(smp),
2110 qp_info->qp->qp_num,
2114 return IB_SMI_DISCARD;
2116 return IB_SMI_HANDLE;
2119 static bool generate_unmatched_resp(const struct ib_mad_private *recv,
2120 struct ib_mad_private *response,
2121 size_t *resp_len, bool opa)
2123 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2124 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2126 if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2127 recv_hdr->method == IB_MGMT_METHOD_SET) {
2128 memcpy(response, recv, mad_priv_size(response));
2129 response->header.recv_wc.wc = &response->header.wc;
2130 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2131 response->header.recv_wc.recv_buf.grh = &response->grh;
2132 resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2133 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2134 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2135 resp_hdr->status |= IB_SMP_DIRECTION;
2137 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
2138 if (recv_hdr->mgmt_class ==
2139 IB_MGMT_CLASS_SUBN_LID_ROUTED ||
2140 recv_hdr->mgmt_class ==
2141 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2142 *resp_len = opa_get_smp_header_size(
2143 (struct opa_smp *)recv->mad);
2145 *resp_len = sizeof(struct ib_mad_hdr);
2154 static enum smi_action
2155 handle_opa_smi(struct ib_mad_port_private *port_priv,
2156 struct ib_mad_qp_info *qp_info,
2159 struct ib_mad_private *recv,
2160 struct ib_mad_private *response)
2162 enum smi_forward_action retsmi;
2163 struct opa_smp *smp = (struct opa_smp *)recv->mad;
2165 if (opa_smi_handle_dr_smp_recv(smp,
2166 rdma_cap_ib_switch(port_priv->device),
2168 port_priv->device->phys_port_cnt) ==
2170 return IB_SMI_DISCARD;
2172 retsmi = opa_smi_check_forward_dr_smp(smp);
2173 if (retsmi == IB_SMI_LOCAL)
2174 return IB_SMI_HANDLE;
2176 if (retsmi == IB_SMI_SEND) { /* don't forward */
2177 if (opa_smi_handle_dr_smp_send(smp,
2178 rdma_cap_ib_switch(port_priv->device),
2179 port_num) == IB_SMI_DISCARD)
2180 return IB_SMI_DISCARD;
2182 if (opa_smi_check_local_smp(smp, port_priv->device) ==
2184 return IB_SMI_DISCARD;
2186 } else if (rdma_cap_ib_switch(port_priv->device)) {
2187 /* forward case for switches */
2188 memcpy(response, recv, mad_priv_size(response));
2189 response->header.recv_wc.wc = &response->header.wc;
2190 response->header.recv_wc.recv_buf.opa_mad =
2191 (struct opa_mad *)response->mad;
2192 response->header.recv_wc.recv_buf.grh = &response->grh;
2194 agent_send_response((const struct ib_mad_hdr *)response->mad,
2197 opa_smi_get_fwd_port(smp),
2198 qp_info->qp->qp_num,
2199 recv->header.wc.byte_len,
2202 return IB_SMI_DISCARD;
2205 return IB_SMI_HANDLE;
2208 static enum smi_action
2209 handle_smi(struct ib_mad_port_private *port_priv,
2210 struct ib_mad_qp_info *qp_info,
2213 struct ib_mad_private *recv,
2214 struct ib_mad_private *response,
2217 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2219 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2220 mad_hdr->class_version == OPA_SM_CLASS_VERSION)
2221 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2224 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2227 static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2229 struct ib_mad_port_private *port_priv = cq->cq_context;
2230 struct ib_mad_list_head *mad_list =
2231 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2232 struct ib_mad_qp_info *qp_info;
2233 struct ib_mad_private_header *mad_priv_hdr;
2234 struct ib_mad_private *recv, *response = NULL;
2235 struct ib_mad_agent_private *mad_agent;
2237 int ret = IB_MAD_RESULT_SUCCESS;
2239 u16 resp_mad_pkey_index = 0;
2242 if (list_empty_careful(&port_priv->port_list))
2245 if (wc->status != IB_WC_SUCCESS) {
2247 * Receive errors indicate that the QP has entered the error
2248 * state - error handling/shutdown code will cleanup
2253 qp_info = mad_list->mad_queue->qp_info;
2254 dequeue_mad(mad_list);
2256 opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2257 qp_info->port_priv->port_num);
2259 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2261 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
2262 ib_dma_unmap_single(port_priv->device,
2263 recv->header.mapping,
2264 mad_priv_dma_size(recv),
2267 /* Setup MAD receive work completion from "normal" work completion */
2268 recv->header.wc = *wc;
2269 recv->header.recv_wc.wc = &recv->header.wc;
2271 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2272 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2273 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2275 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2276 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2279 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
2280 recv->header.recv_wc.recv_buf.grh = &recv->grh;
2282 if (atomic_read(&qp_info->snoop_count))
2283 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2286 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
2289 mad_size = recv->mad_size;
2290 response = alloc_mad_private(mad_size, GFP_KERNEL);
2294 if (rdma_cap_ib_switch(port_priv->device))
2295 port_num = wc->port_num;
2297 port_num = port_priv->port_num;
2299 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
2300 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
2301 if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2307 /* Give driver "right of first refusal" on incoming MAD */
2308 if (port_priv->device->process_mad) {
2309 ret = port_priv->device->process_mad(port_priv->device, 0,
2310 port_priv->port_num,
2312 (const struct ib_mad_hdr *)recv->mad,
2314 (struct ib_mad_hdr *)response->mad,
2315 &mad_size, &resp_mad_pkey_index);
2318 wc->pkey_index = resp_mad_pkey_index;
2320 if (ret & IB_MAD_RESULT_SUCCESS) {
2321 if (ret & IB_MAD_RESULT_CONSUMED)
2323 if (ret & IB_MAD_RESULT_REPLY) {
2324 agent_send_response((const struct ib_mad_hdr *)response->mad,
2328 qp_info->qp->qp_num,
2335 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
2337 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
2339 * recv is freed up in error cases in ib_mad_complete_recv
2340 * or via recv_handler in ib_mad_complete_recv()
2343 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2344 generate_unmatched_resp(recv, response, &mad_size, opa)) {
2345 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2346 port_priv->device, port_num,
2347 qp_info->qp->qp_num, mad_size, opa);
2351 /* Post another receive request for this QP */
2353 ib_mad_post_receive_mads(qp_info, response);
2356 ib_mad_post_receive_mads(qp_info, recv);
2359 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2361 struct ib_mad_send_wr_private *mad_send_wr;
2362 unsigned long delay;
2364 if (list_empty(&mad_agent_priv->wait_list)) {
2365 cancel_delayed_work(&mad_agent_priv->timed_work);
2367 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2368 struct ib_mad_send_wr_private,
2371 if (time_after(mad_agent_priv->timeout,
2372 mad_send_wr->timeout)) {
2373 mad_agent_priv->timeout = mad_send_wr->timeout;
2374 delay = mad_send_wr->timeout - jiffies;
2375 if ((long)delay <= 0)
2377 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2378 &mad_agent_priv->timed_work, delay);
2383 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2385 struct ib_mad_agent_private *mad_agent_priv;
2386 struct ib_mad_send_wr_private *temp_mad_send_wr;
2387 struct list_head *list_item;
2388 unsigned long delay;
2390 mad_agent_priv = mad_send_wr->mad_agent_priv;
2391 list_del(&mad_send_wr->agent_list);
2393 delay = mad_send_wr->timeout;
2394 mad_send_wr->timeout += jiffies;
2397 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2398 temp_mad_send_wr = list_entry(list_item,
2399 struct ib_mad_send_wr_private,
2401 if (time_after(mad_send_wr->timeout,
2402 temp_mad_send_wr->timeout))
2407 list_item = &mad_agent_priv->wait_list;
2408 list_add(&mad_send_wr->agent_list, list_item);
2410 /* Reschedule a work item if we have a shorter timeout */
2411 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2412 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2413 &mad_agent_priv->timed_work, delay);
2416 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2417 unsigned long timeout_ms)
2419 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2420 wait_for_response(mad_send_wr);
2424 * Process a send work completion
2426 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2427 struct ib_mad_send_wc *mad_send_wc)
2429 struct ib_mad_agent_private *mad_agent_priv;
2430 unsigned long flags;
2433 mad_agent_priv = mad_send_wr->mad_agent_priv;
2434 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2435 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2436 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2437 if (ret == IB_RMPP_RESULT_CONSUMED)
2440 ret = IB_RMPP_RESULT_UNHANDLED;
2442 if (mad_send_wc->status != IB_WC_SUCCESS &&
2443 mad_send_wr->status == IB_WC_SUCCESS) {
2444 mad_send_wr->status = mad_send_wc->status;
2445 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2448 if (--mad_send_wr->refcount > 0) {
2449 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2450 mad_send_wr->status == IB_WC_SUCCESS) {
2451 wait_for_response(mad_send_wr);
2456 /* Remove send from MAD agent and notify client of completion */
2457 list_del(&mad_send_wr->agent_list);
2458 adjust_timeout(mad_agent_priv);
2459 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2461 if (mad_send_wr->status != IB_WC_SUCCESS )
2462 mad_send_wc->status = mad_send_wr->status;
2463 if (ret == IB_RMPP_RESULT_INTERNAL)
2464 ib_rmpp_send_handler(mad_send_wc);
2466 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2469 /* Release reference on agent taken when sending */
2470 deref_mad_agent(mad_agent_priv);
2473 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2476 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
2478 struct ib_mad_port_private *port_priv = cq->cq_context;
2479 struct ib_mad_list_head *mad_list =
2480 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2481 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2482 struct ib_mad_qp_info *qp_info;
2483 struct ib_mad_queue *send_queue;
2484 struct ib_mad_send_wc mad_send_wc;
2485 unsigned long flags;
2488 if (list_empty_careful(&port_priv->port_list))
2491 if (wc->status != IB_WC_SUCCESS) {
2492 if (!ib_mad_send_error(port_priv, wc))
2496 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2498 send_queue = mad_list->mad_queue;
2499 qp_info = send_queue->qp_info;
2502 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2503 mad_send_wr->header_mapping,
2504 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2505 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2506 mad_send_wr->payload_mapping,
2507 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2508 queued_send_wr = NULL;
2509 spin_lock_irqsave(&send_queue->lock, flags);
2510 list_del(&mad_list->list);
2512 /* Move queued send to the send queue */
2513 if (send_queue->count-- > send_queue->max_active) {
2514 mad_list = container_of(qp_info->overflow_list.next,
2515 struct ib_mad_list_head, list);
2516 queued_send_wr = container_of(mad_list,
2517 struct ib_mad_send_wr_private,
2519 list_move_tail(&mad_list->list, &send_queue->list);
2521 spin_unlock_irqrestore(&send_queue->lock, flags);
2523 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2524 mad_send_wc.status = wc->status;
2525 mad_send_wc.vendor_err = wc->vendor_err;
2526 if (atomic_read(&qp_info->snoop_count))
2527 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2528 IB_MAD_SNOOP_SEND_COMPLETIONS);
2529 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2531 if (queued_send_wr) {
2532 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
2535 dev_err(&port_priv->device->dev,
2536 "ib_post_send failed: %d\n", ret);
2537 mad_send_wr = queued_send_wr;
2538 wc->status = IB_WC_LOC_QP_OP_ERR;
2544 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2546 struct ib_mad_send_wr_private *mad_send_wr;
2547 struct ib_mad_list_head *mad_list;
2548 unsigned long flags;
2550 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2551 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2552 mad_send_wr = container_of(mad_list,
2553 struct ib_mad_send_wr_private,
2555 mad_send_wr->retry = 1;
2557 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2560 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
2563 struct ib_mad_list_head *mad_list =
2564 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2565 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
2566 struct ib_mad_send_wr_private *mad_send_wr;
2570 * Send errors will transition the QP to SQE - move
2571 * QP to RTS and repost flushed work requests
2573 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2575 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2576 if (mad_send_wr->retry) {
2578 mad_send_wr->retry = 0;
2579 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
2585 struct ib_qp_attr *attr;
2587 /* Transition QP to RTS and fail offending send */
2588 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2590 attr->qp_state = IB_QPS_RTS;
2591 attr->cur_qp_state = IB_QPS_SQE;
2592 ret = ib_modify_qp(qp_info->qp, attr,
2593 IB_QP_STATE | IB_QP_CUR_STATE);
2596 dev_err(&port_priv->device->dev,
2597 "%s - ib_modify_qp to RTS: %d\n",
2600 mark_sends_for_retry(qp_info);
2607 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2609 unsigned long flags;
2610 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2611 struct ib_mad_send_wc mad_send_wc;
2612 struct list_head cancel_list;
2614 INIT_LIST_HEAD(&cancel_list);
2616 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2617 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2618 &mad_agent_priv->send_list, agent_list) {
2619 if (mad_send_wr->status == IB_WC_SUCCESS) {
2620 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2621 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2625 /* Empty wait list to prevent receives from finding a request */
2626 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2627 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2629 /* Report all cancelled requests */
2630 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2631 mad_send_wc.vendor_err = 0;
2633 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2634 &cancel_list, agent_list) {
2635 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2636 list_del(&mad_send_wr->agent_list);
2637 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2639 atomic_dec(&mad_agent_priv->refcount);
2643 static struct ib_mad_send_wr_private*
2644 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2645 struct ib_mad_send_buf *send_buf)
2647 struct ib_mad_send_wr_private *mad_send_wr;
2649 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2651 if (&mad_send_wr->send_buf == send_buf)
2655 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2657 if (is_rmpp_data_mad(mad_agent_priv,
2658 mad_send_wr->send_buf.mad) &&
2659 &mad_send_wr->send_buf == send_buf)
2665 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2666 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2668 struct ib_mad_agent_private *mad_agent_priv;
2669 struct ib_mad_send_wr_private *mad_send_wr;
2670 unsigned long flags;
2673 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2675 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2676 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2677 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2678 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2682 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2684 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2685 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2688 mad_send_wr->send_buf.timeout_ms = timeout_ms;
2690 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2692 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2694 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2697 EXPORT_SYMBOL(ib_modify_mad);
2699 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2700 struct ib_mad_send_buf *send_buf)
2702 ib_modify_mad(mad_agent, send_buf, 0);
2704 EXPORT_SYMBOL(ib_cancel_mad);
2706 static void local_completions(struct work_struct *work)
2708 struct ib_mad_agent_private *mad_agent_priv;
2709 struct ib_mad_local_private *local;
2710 struct ib_mad_agent_private *recv_mad_agent;
2711 unsigned long flags;
2714 struct ib_mad_send_wc mad_send_wc;
2718 container_of(work, struct ib_mad_agent_private, local_work);
2720 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2721 mad_agent_priv->qp_info->port_priv->port_num);
2723 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2724 while (!list_empty(&mad_agent_priv->local_list)) {
2725 local = list_entry(mad_agent_priv->local_list.next,
2726 struct ib_mad_local_private,
2728 list_del(&local->completion_list);
2729 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2731 if (local->mad_priv) {
2733 recv_mad_agent = local->recv_mad_agent;
2734 if (!recv_mad_agent) {
2735 dev_err(&mad_agent_priv->agent.device->dev,
2736 "No receive MAD agent for local completion\n");
2738 goto local_send_completion;
2742 * Defined behavior is to complete response
2745 build_smp_wc(recv_mad_agent->agent.qp,
2746 local->mad_send_wr->send_wr.wr.wr_cqe,
2747 be16_to_cpu(IB_LID_PERMISSIVE),
2748 local->mad_send_wr->send_wr.pkey_index,
2749 recv_mad_agent->agent.port_num, &wc);
2751 local->mad_priv->header.recv_wc.wc = &wc;
2753 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2754 if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2755 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2756 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2758 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2759 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2762 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2763 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2764 &local->mad_priv->header.recv_wc.rmpp_list);
2765 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2766 local->mad_priv->header.recv_wc.recv_buf.mad =
2767 (struct ib_mad *)local->mad_priv->mad;
2768 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2769 snoop_recv(recv_mad_agent->qp_info,
2770 &local->mad_priv->header.recv_wc,
2771 IB_MAD_SNOOP_RECVS);
2772 recv_mad_agent->agent.recv_handler(
2773 &recv_mad_agent->agent,
2774 &local->mad_send_wr->send_buf,
2775 &local->mad_priv->header.recv_wc);
2776 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2777 atomic_dec(&recv_mad_agent->refcount);
2778 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2781 local_send_completion:
2783 mad_send_wc.status = IB_WC_SUCCESS;
2784 mad_send_wc.vendor_err = 0;
2785 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2786 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2787 snoop_send(mad_agent_priv->qp_info,
2788 &local->mad_send_wr->send_buf,
2789 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2790 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2793 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2794 atomic_dec(&mad_agent_priv->refcount);
2796 kfree(local->mad_priv);
2799 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2802 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2806 if (!mad_send_wr->retries_left)
2809 mad_send_wr->retries_left--;
2810 mad_send_wr->send_buf.retries++;
2812 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2814 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2815 ret = ib_retry_rmpp(mad_send_wr);
2817 case IB_RMPP_RESULT_UNHANDLED:
2818 ret = ib_send_mad(mad_send_wr);
2820 case IB_RMPP_RESULT_CONSUMED:
2828 ret = ib_send_mad(mad_send_wr);
2831 mad_send_wr->refcount++;
2832 list_add_tail(&mad_send_wr->agent_list,
2833 &mad_send_wr->mad_agent_priv->send_list);
2838 static void timeout_sends(struct work_struct *work)
2840 struct ib_mad_agent_private *mad_agent_priv;
2841 struct ib_mad_send_wr_private *mad_send_wr;
2842 struct ib_mad_send_wc mad_send_wc;
2843 unsigned long flags, delay;
2845 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2847 mad_send_wc.vendor_err = 0;
2849 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2850 while (!list_empty(&mad_agent_priv->wait_list)) {
2851 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2852 struct ib_mad_send_wr_private,
2855 if (time_after(mad_send_wr->timeout, jiffies)) {
2856 delay = mad_send_wr->timeout - jiffies;
2857 if ((long)delay <= 0)
2859 queue_delayed_work(mad_agent_priv->qp_info->
2861 &mad_agent_priv->timed_work, delay);
2865 list_del(&mad_send_wr->agent_list);
2866 if (mad_send_wr->status == IB_WC_SUCCESS &&
2867 !retry_send(mad_send_wr))
2870 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2872 if (mad_send_wr->status == IB_WC_SUCCESS)
2873 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2875 mad_send_wc.status = mad_send_wr->status;
2876 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2877 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2880 atomic_dec(&mad_agent_priv->refcount);
2881 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2883 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2887 * Allocate receive MADs and post receive WRs for them
2889 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2890 struct ib_mad_private *mad)
2892 unsigned long flags;
2894 struct ib_mad_private *mad_priv;
2895 struct ib_sge sg_list;
2896 struct ib_recv_wr recv_wr;
2897 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2899 /* Initialize common scatter list fields */
2900 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
2902 /* Initialize common receive WR fields */
2903 recv_wr.next = NULL;
2904 recv_wr.sg_list = &sg_list;
2905 recv_wr.num_sge = 1;
2908 /* Allocate and map receive buffer */
2913 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2920 sg_list.length = mad_priv_dma_size(mad_priv);
2921 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2923 mad_priv_dma_size(mad_priv),
2925 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2930 mad_priv->header.mapping = sg_list.addr;
2931 mad_priv->header.mad_list.mad_queue = recv_queue;
2932 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
2933 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
2935 /* Post receive WR */
2936 spin_lock_irqsave(&recv_queue->lock, flags);
2937 post = (++recv_queue->count < recv_queue->max_active);
2938 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2939 spin_unlock_irqrestore(&recv_queue->lock, flags);
2940 ret = ib_post_recv(qp_info->qp, &recv_wr, NULL);
2942 spin_lock_irqsave(&recv_queue->lock, flags);
2943 list_del(&mad_priv->header.mad_list.list);
2944 recv_queue->count--;
2945 spin_unlock_irqrestore(&recv_queue->lock, flags);
2946 ib_dma_unmap_single(qp_info->port_priv->device,
2947 mad_priv->header.mapping,
2948 mad_priv_dma_size(mad_priv),
2951 dev_err(&qp_info->port_priv->device->dev,
2952 "ib_post_recv failed: %d\n", ret);
2961 * Return all the posted receive MADs
2963 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2965 struct ib_mad_private_header *mad_priv_hdr;
2966 struct ib_mad_private *recv;
2967 struct ib_mad_list_head *mad_list;
2972 while (!list_empty(&qp_info->recv_queue.list)) {
2974 mad_list = list_entry(qp_info->recv_queue.list.next,
2975 struct ib_mad_list_head, list);
2976 mad_priv_hdr = container_of(mad_list,
2977 struct ib_mad_private_header,
2979 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2982 /* Remove from posted receive MAD list */
2983 list_del(&mad_list->list);
2985 ib_dma_unmap_single(qp_info->port_priv->device,
2986 recv->header.mapping,
2987 mad_priv_dma_size(recv),
2992 qp_info->recv_queue.count = 0;
2998 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
3001 struct ib_qp_attr *attr;
3005 attr = kmalloc(sizeof *attr, GFP_KERNEL);
3009 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
3010 IB_DEFAULT_PKEY_FULL, &pkey_index);
3014 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3015 qp = port_priv->qp_info[i].qp;
3020 * PKey index for QP1 is irrelevant but
3021 * one is needed for the Reset to Init transition
3023 attr->qp_state = IB_QPS_INIT;
3024 attr->pkey_index = pkey_index;
3025 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
3026 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
3027 IB_QP_PKEY_INDEX | IB_QP_QKEY);
3029 dev_err(&port_priv->device->dev,
3030 "Couldn't change QP%d state to INIT: %d\n",
3035 attr->qp_state = IB_QPS_RTR;
3036 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
3038 dev_err(&port_priv->device->dev,
3039 "Couldn't change QP%d state to RTR: %d\n",
3044 attr->qp_state = IB_QPS_RTS;
3045 attr->sq_psn = IB_MAD_SEND_Q_PSN;
3046 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
3048 dev_err(&port_priv->device->dev,
3049 "Couldn't change QP%d state to RTS: %d\n",
3055 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
3057 dev_err(&port_priv->device->dev,
3058 "Failed to request completion notification: %d\n",
3063 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3064 if (!port_priv->qp_info[i].qp)
3067 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
3069 dev_err(&port_priv->device->dev,
3070 "Couldn't post receive WRs\n");
3079 static void qp_event_handler(struct ib_event *event, void *qp_context)
3081 struct ib_mad_qp_info *qp_info = qp_context;
3083 /* It's worse than that! He's dead, Jim! */
3084 dev_err(&qp_info->port_priv->device->dev,
3085 "Fatal error (%d) on MAD QP (%d)\n",
3086 event->event, qp_info->qp->qp_num);
3089 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
3090 struct ib_mad_queue *mad_queue)
3092 mad_queue->qp_info = qp_info;
3093 mad_queue->count = 0;
3094 spin_lock_init(&mad_queue->lock);
3095 INIT_LIST_HEAD(&mad_queue->list);
3098 static void init_mad_qp(struct ib_mad_port_private *port_priv,
3099 struct ib_mad_qp_info *qp_info)
3101 qp_info->port_priv = port_priv;
3102 init_mad_queue(qp_info, &qp_info->send_queue);
3103 init_mad_queue(qp_info, &qp_info->recv_queue);
3104 INIT_LIST_HEAD(&qp_info->overflow_list);
3105 spin_lock_init(&qp_info->snoop_lock);
3106 qp_info->snoop_table = NULL;
3107 qp_info->snoop_table_size = 0;
3108 atomic_set(&qp_info->snoop_count, 0);
3111 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
3112 enum ib_qp_type qp_type)
3114 struct ib_qp_init_attr qp_init_attr;
3117 memset(&qp_init_attr, 0, sizeof qp_init_attr);
3118 qp_init_attr.send_cq = qp_info->port_priv->cq;
3119 qp_init_attr.recv_cq = qp_info->port_priv->cq;
3120 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
3121 qp_init_attr.cap.max_send_wr = mad_sendq_size;
3122 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
3123 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
3124 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
3125 qp_init_attr.qp_type = qp_type;
3126 qp_init_attr.port_num = qp_info->port_priv->port_num;
3127 qp_init_attr.qp_context = qp_info;
3128 qp_init_attr.event_handler = qp_event_handler;
3129 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
3130 if (IS_ERR(qp_info->qp)) {
3131 dev_err(&qp_info->port_priv->device->dev,
3132 "Couldn't create ib_mad QP%d\n",
3133 get_spl_qp_index(qp_type));
3134 ret = PTR_ERR(qp_info->qp);
3137 /* Use minimum queue sizes unless the CQ is resized */
3138 qp_info->send_queue.max_active = mad_sendq_size;
3139 qp_info->recv_queue.max_active = mad_recvq_size;
3146 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
3151 ib_destroy_qp(qp_info->qp);
3152 kfree(qp_info->snoop_table);
3157 * Create the QP, PD, MR, and CQ if needed
3159 static int ib_mad_port_open(struct ib_device *device,
3163 struct ib_mad_port_private *port_priv;
3164 unsigned long flags;
3165 char name[sizeof "ib_mad123"];
3168 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
3171 if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
3172 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
3175 /* Create new device info */
3176 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
3180 port_priv->device = device;
3181 port_priv->port_num = port_num;
3182 spin_lock_init(&port_priv->reg_lock);
3183 init_mad_qp(port_priv, &port_priv->qp_info[0]);
3184 init_mad_qp(port_priv, &port_priv->qp_info[1]);
3186 cq_size = mad_sendq_size + mad_recvq_size;
3187 has_smi = rdma_cap_ib_smi(device, port_num);
3191 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
3192 IB_POLL_UNBOUND_WORKQUEUE);
3193 if (IS_ERR(port_priv->cq)) {
3194 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
3195 ret = PTR_ERR(port_priv->cq);
3199 port_priv->pd = ib_alloc_pd(device, 0);
3200 if (IS_ERR(port_priv->pd)) {
3201 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
3202 ret = PTR_ERR(port_priv->pd);
3207 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3211 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3215 snprintf(name, sizeof name, "ib_mad%d", port_num);
3216 port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
3217 if (!port_priv->wq) {
3222 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3223 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3224 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3226 ret = ib_mad_port_start(port_priv);
3228 dev_err(&device->dev, "Couldn't start port\n");
3235 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3236 list_del_init(&port_priv->port_list);
3237 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3239 destroy_workqueue(port_priv->wq);
3241 destroy_mad_qp(&port_priv->qp_info[1]);
3243 destroy_mad_qp(&port_priv->qp_info[0]);
3245 ib_dealloc_pd(port_priv->pd);
3247 ib_free_cq(port_priv->cq);
3248 cleanup_recv_queue(&port_priv->qp_info[1]);
3249 cleanup_recv_queue(&port_priv->qp_info[0]);
3258 * If there are no classes using the port, free the port
3259 * resources (CQ, MR, PD, QP) and remove the port's info structure
3261 static int ib_mad_port_close(struct ib_device *device, int port_num)
3263 struct ib_mad_port_private *port_priv;
3264 unsigned long flags;
3266 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3267 port_priv = __ib_get_mad_port(device, port_num);
3268 if (port_priv == NULL) {
3269 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3270 dev_err(&device->dev, "Port %d not found\n", port_num);
3273 list_del_init(&port_priv->port_list);
3274 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3276 destroy_workqueue(port_priv->wq);
3277 destroy_mad_qp(&port_priv->qp_info[1]);
3278 destroy_mad_qp(&port_priv->qp_info[0]);
3279 ib_dealloc_pd(port_priv->pd);
3280 ib_free_cq(port_priv->cq);
3281 cleanup_recv_queue(&port_priv->qp_info[1]);
3282 cleanup_recv_queue(&port_priv->qp_info[0]);
3283 /* XXX: Handle deallocation of MAD registration tables */
3290 static void ib_mad_init_device(struct ib_device *device)
3294 start = rdma_start_port(device);
3296 for (i = start; i <= rdma_end_port(device); i++) {
3297 if (!rdma_cap_ib_mad(device, i))
3300 if (ib_mad_port_open(device, i)) {
3301 dev_err(&device->dev, "Couldn't open port %d\n", i);
3304 if (ib_agent_port_open(device, i)) {
3305 dev_err(&device->dev,
3306 "Couldn't open port %d for agents\n", i);
3313 if (ib_mad_port_close(device, i))
3314 dev_err(&device->dev, "Couldn't close port %d\n", i);
3317 while (--i >= start) {
3318 if (!rdma_cap_ib_mad(device, i))
3321 if (ib_agent_port_close(device, i))
3322 dev_err(&device->dev,
3323 "Couldn't close port %d for agents\n", i);
3324 if (ib_mad_port_close(device, i))
3325 dev_err(&device->dev, "Couldn't close port %d\n", i);
3329 static void ib_mad_remove_device(struct ib_device *device, void *client_data)
3333 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
3334 if (!rdma_cap_ib_mad(device, i))
3337 if (ib_agent_port_close(device, i))
3338 dev_err(&device->dev,
3339 "Couldn't close port %d for agents\n", i);
3340 if (ib_mad_port_close(device, i))
3341 dev_err(&device->dev, "Couldn't close port %d\n", i);
3345 static struct ib_client mad_client = {
3347 .add = ib_mad_init_device,
3348 .remove = ib_mad_remove_device
3351 int ib_mad_init(void)
3353 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3354 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3356 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3357 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3359 INIT_LIST_HEAD(&ib_mad_port_list);
3361 /* Client ID 0 is used for snoop-only clients */
3362 idr_alloc(&ib_mad_clients, NULL, 0, 0, GFP_KERNEL);
3364 if (ib_register_client(&mad_client)) {
3365 pr_err("Couldn't register ib_mad client\n");
3372 void ib_mad_cleanup(void)
3374 ib_unregister_client(&mad_client);