2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_mad.h>
34 #include <rdma/ib_smi.h>
35 #include <rdma/ib_sa.h>
36 #include <rdma/ib_cache.h>
38 #include <linux/random.h>
39 #include <linux/mlx4/cmd.h>
40 #include <linux/gfp.h>
41 #include <rdma/ib_pma.h>
46 MLX4_IB_VENDOR_CLASS1 = 0x9,
47 MLX4_IB_VENDOR_CLASS2 = 0xa
50 #define MLX4_TUN_SEND_WRID_SHIFT 34
51 #define MLX4_TUN_QPN_SHIFT 32
52 #define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT)
53 #define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT)
55 #define MLX4_TUN_IS_RECV(a) (((a) >> MLX4_TUN_SEND_WRID_SHIFT) & 0x1)
56 #define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3)
58 /* Port mgmt change event handling */
60 #define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.block_ptr)
61 #define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_entries_mask)
62 #define NUM_IDX_IN_PKEY_TBL_BLK 32
63 #define GUID_TBL_ENTRY_SIZE 8 /* size in bytes */
64 #define GUID_TBL_BLK_NUM_ENTRIES 8
65 #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
67 /* Counters should be saturate once they reach their maximum value */
68 #define ASSIGN_32BIT_COUNTER(counter, value) do {\
69 if ((value) > U32_MAX) \
70 counter = cpu_to_be32(U32_MAX); \
72 counter = cpu_to_be32(value); \
75 struct mlx4_mad_rcv_buf {
80 struct mlx4_mad_snd_buf {
84 struct mlx4_tunnel_mad {
86 struct mlx4_ib_tunnel_header hdr;
90 struct mlx4_rcv_tunnel_mad {
91 struct mlx4_rcv_tunnel_hdr hdr;
96 static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num);
97 static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num);
98 static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
99 int block, u32 change_bitmap);
101 __be64 mlx4_ib_gen_node_guid(void)
103 #define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40))
104 return cpu_to_be64(NODE_GUID_HI | prandom_u32());
107 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
109 return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
110 cpu_to_be64(0xff00000000000000LL);
113 int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
114 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
115 void *in_mad, void *response_mad)
117 struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
120 u32 in_modifier = port;
123 inmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
124 if (IS_ERR(inmailbox))
125 return PTR_ERR(inmailbox);
126 inbox = inmailbox->buf;
128 outmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
129 if (IS_ERR(outmailbox)) {
130 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
131 return PTR_ERR(outmailbox);
134 memcpy(inbox, in_mad, 256);
137 * Key check traps can't be generated unless we have in_wc to
138 * tell us where to send the trap.
140 if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_MKEY) || !in_wc)
142 if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_BKEY) || !in_wc)
144 if (mlx4_is_mfunc(dev->dev) &&
145 (mad_ifc_flags & MLX4_MAD_IFC_NET_VIEW || in_wc))
161 memset(inbox + 256, 0, 256);
162 ext_info = inbox + 256;
164 ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num);
165 ext_info->rqpn = cpu_to_be32(in_wc->src_qp);
166 ext_info->sl = in_wc->sl << 4;
167 ext_info->g_path = in_wc->dlid_path_bits |
168 (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
169 ext_info->pkey = cpu_to_be16(in_wc->pkey_index);
172 memcpy(ext_info->grh, in_grh, 40);
176 in_modifier |= in_wc->slid << 16;
179 err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier,
180 mlx4_is_master(dev->dev) ? (op_modifier & ~0x8) : op_modifier,
181 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
182 (op_modifier & 0x8) ? MLX4_CMD_NATIVE : MLX4_CMD_WRAPPED);
185 memcpy(response_mad, outmailbox->buf, 256);
187 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
188 mlx4_free_cmd_mailbox(dev->dev, outmailbox);
193 static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
195 struct ib_ah *new_ah;
196 struct ib_ah_attr ah_attr;
199 if (!dev->send_agent[port_num - 1][0])
202 memset(&ah_attr, 0, sizeof ah_attr);
205 ah_attr.port_num = port_num;
207 new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
212 spin_lock_irqsave(&dev->sm_lock, flags);
213 if (dev->sm_ah[port_num - 1])
214 ib_destroy_ah(dev->sm_ah[port_num - 1]);
215 dev->sm_ah[port_num - 1] = new_ah;
216 spin_unlock_irqrestore(&dev->sm_lock, flags);
220 * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can
221 * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
223 static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
226 struct ib_port_info *pinfo;
229 u32 bn, pkey_change_bitmap;
233 struct mlx4_ib_dev *dev = to_mdev(ibdev);
234 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
235 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
236 mad->mad_hdr.method == IB_MGMT_METHOD_SET)
237 switch (mad->mad_hdr.attr_id) {
238 case IB_SMP_ATTR_PORT_INFO:
239 pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
240 lid = be16_to_cpu(pinfo->lid);
242 update_sm_ah(dev, port_num,
243 be16_to_cpu(pinfo->sm_lid),
244 pinfo->neighbormtu_mastersmsl & 0xf);
246 if (pinfo->clientrereg_resv_subnetto & 0x80)
247 handle_client_rereg_event(dev, port_num);
250 handle_lid_change_event(dev, port_num);
253 case IB_SMP_ATTR_PKEY_TABLE:
254 if (!mlx4_is_mfunc(dev->dev)) {
255 mlx4_ib_dispatch_event(dev, port_num,
256 IB_EVENT_PKEY_CHANGE);
260 /* at this point, we are running in the master.
261 * Slaves do not receive SMPs.
263 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF;
264 base = (__be16 *) &(((struct ib_smp *)mad)->data[0]);
265 pkey_change_bitmap = 0;
266 for (i = 0; i < 32; i++) {
267 pr_debug("PKEY[%d] = x%x\n",
268 i + bn*32, be16_to_cpu(base[i]));
269 if (be16_to_cpu(base[i]) !=
270 dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32]) {
271 pkey_change_bitmap |= (1 << i);
272 dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32] =
273 be16_to_cpu(base[i]);
276 pr_debug("PKEY Change event: port=%d, "
277 "block=0x%x, change_bitmap=0x%x\n",
278 port_num, bn, pkey_change_bitmap);
280 if (pkey_change_bitmap) {
281 mlx4_ib_dispatch_event(dev, port_num,
282 IB_EVENT_PKEY_CHANGE);
283 if (!dev->sriov.is_going_down)
284 __propagate_pkey_ev(dev, port_num, bn,
289 case IB_SMP_ATTR_GUID_INFO:
290 /* paravirtualized master's guid is guid 0 -- does not change */
291 if (!mlx4_is_master(dev->dev))
292 mlx4_ib_dispatch_event(dev, port_num,
293 IB_EVENT_GID_CHANGE);
294 /*if master, notify relevant slaves*/
295 if (mlx4_is_master(dev->dev) &&
296 !dev->sriov.is_going_down) {
297 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod);
298 mlx4_ib_update_cache_on_guid_change(dev, bn, port_num,
299 (u8 *)(&((struct ib_smp *)mad)->data));
300 mlx4_ib_notify_slaves_on_guid_change(dev, bn, port_num,
301 (u8 *)(&((struct ib_smp *)mad)->data));
310 static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
311 int block, u32 change_bitmap)
313 int i, ix, slave, err;
316 for (slave = 0; slave < dev->dev->caps.sqp_demux; slave++) {
317 if (slave == mlx4_master_func_num(dev->dev))
319 if (!mlx4_is_slave_active(dev->dev, slave))
323 for (i = 0; i < 32; i++) {
324 if (!(change_bitmap & (1 << i)))
327 ix < dev->dev->caps.pkey_table_len[port_num]; ix++) {
328 if (dev->pkeys.virt2phys_pkey[slave][port_num - 1]
329 [ix] == i + 32 * block) {
330 err = mlx4_gen_pkey_eqe(dev->dev, slave, port_num);
331 pr_debug("propagate_pkey_ev: slave %d,"
332 " port %d, ix %d (%d)\n",
333 slave, port_num, ix, err);
344 static void node_desc_override(struct ib_device *dev,
349 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
350 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
351 mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
352 mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
353 spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
354 memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
355 spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
359 static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *mad)
361 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
362 struct ib_mad_send_buf *send_buf;
363 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
368 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
369 IB_MGMT_MAD_DATA, GFP_ATOMIC);
370 if (IS_ERR(send_buf))
373 * We rely here on the fact that MLX QPs don't use the
374 * address handle after the send is posted (this is
375 * wrong following the IB spec strictly, but we know
376 * it's OK for our devices).
378 spin_lock_irqsave(&dev->sm_lock, flags);
379 memcpy(send_buf->mad, mad, sizeof *mad);
380 if ((send_buf->ah = dev->sm_ah[port_num - 1]))
381 ret = ib_post_send_mad(send_buf, NULL);
384 spin_unlock_irqrestore(&dev->sm_lock, flags);
387 ib_free_send_mad(send_buf);
391 static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave,
392 struct ib_sa_mad *sa_mad)
396 /* dispatch to different sa handlers */
397 switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
398 case IB_SA_ATTR_MC_MEMBER_REC:
399 ret = mlx4_ib_mcg_demux_handler(ibdev, port, slave, sa_mad);
407 int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
409 struct mlx4_ib_dev *dev = to_mdev(ibdev);
412 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
413 if (dev->sriov.demux[port - 1].guid_cache[i] == guid)
420 static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
421 u8 port, u16 pkey, u16 *ix)
424 u8 unassigned_pkey_ix, pkey_ix, partial_ix = 0xFF;
427 if (slave == mlx4_master_func_num(dev->dev))
428 return ib_find_cached_pkey(&dev->ib_dev, port, pkey, ix);
430 unassigned_pkey_ix = dev->dev->phys_caps.pkey_phys_table_len[port] - 1;
432 for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) {
433 if (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == unassigned_pkey_ix)
436 pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][i];
438 ret = ib_get_cached_pkey(&dev->ib_dev, port, pkey_ix, &slot_pkey);
441 if ((slot_pkey & 0x7FFF) == (pkey & 0x7FFF)) {
442 if (slot_pkey & 0x8000) {
446 /* take first partial pkey index found */
447 if (partial_ix == 0xFF)
448 partial_ix = pkey_ix;
453 if (partial_ix < 0xFF) {
454 *ix = (u16) partial_ix;
461 int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
462 enum ib_qp_type dest_qpt, struct ib_wc *wc,
463 struct ib_grh *grh, struct ib_mad *mad)
466 struct ib_send_wr wr, *bad_wr;
467 struct mlx4_ib_demux_pv_ctx *tun_ctx;
468 struct mlx4_ib_demux_pv_qp *tun_qp;
469 struct mlx4_rcv_tunnel_mad *tun_mad;
470 struct ib_ah_attr attr;
472 struct ib_qp *src_qp = NULL;
473 unsigned tun_tx_ix = 0;
478 u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
480 if (dest_qpt > IB_QPT_GSI)
483 tun_ctx = dev->sriov.demux[port-1].tun[slave];
485 /* check if proxy qp created */
486 if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE)
490 tun_qp = &tun_ctx->qp[0];
492 tun_qp = &tun_ctx->qp[1];
494 /* compute P_Key index to put in tunnel header for slave */
497 ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey);
501 ret = find_slave_port_pkey_ix(dev, slave, port, cached_pkey, &pkey_ix);
504 tun_pkey_ix = pkey_ix;
506 tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
508 dqpn = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave + port + (dest_qpt * 2) - 1;
510 /* get tunnel tx data buf for slave */
513 /* create ah. Just need an empty one with the port num for the post send.
514 * The driver will set the force loopback bit in post_send */
515 memset(&attr, 0, sizeof attr);
516 attr.port_num = port;
518 memcpy(&attr.grh.dgid.raw[0], &grh->dgid.raw[0], 16);
519 attr.ah_flags = IB_AH_GRH;
521 ah = ib_create_ah(tun_ctx->pd, &attr);
525 /* allocate tunnel tx buf after pass failure returns */
526 spin_lock(&tun_qp->tx_lock);
527 if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >=
528 (MLX4_NUM_TUNNEL_BUFS - 1))
531 tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
532 spin_unlock(&tun_qp->tx_lock);
536 tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
537 if (tun_qp->tx_ring[tun_tx_ix].ah)
538 ib_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah);
539 tun_qp->tx_ring[tun_tx_ix].ah = ah;
540 ib_dma_sync_single_for_cpu(&dev->ib_dev,
541 tun_qp->tx_ring[tun_tx_ix].buf.map,
542 sizeof (struct mlx4_rcv_tunnel_mad),
545 /* copy over to tunnel buffer */
547 memcpy(&tun_mad->grh, grh, sizeof *grh);
548 memcpy(&tun_mad->mad, mad, sizeof *mad);
550 /* adjust tunnel data */
551 tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix);
552 tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF);
553 tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0;
557 if (mlx4_get_slave_default_vlan(dev->dev, port, slave, &vlan,
560 if (vlan != wc->vlan_id)
561 /* Packet vlan is not the VST-assigned vlan.
566 /* Remove the vlan tag before forwarding
567 * the packet to the VF.
574 tun_mad->hdr.sl_vid = cpu_to_be16(vlan);
575 memcpy((char *)&tun_mad->hdr.mac_31_0, &(wc->smac[0]), 4);
576 memcpy((char *)&tun_mad->hdr.slid_mac_47_32, &(wc->smac[4]), 2);
578 tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
579 tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid);
582 ib_dma_sync_single_for_device(&dev->ib_dev,
583 tun_qp->tx_ring[tun_tx_ix].buf.map,
584 sizeof (struct mlx4_rcv_tunnel_mad),
587 list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
588 list.length = sizeof (struct mlx4_rcv_tunnel_mad);
589 list.lkey = tun_ctx->mr->lkey;
592 wr.wr.ud.port_num = port;
593 wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
594 wr.wr.ud.remote_qpn = dqpn;
596 wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
599 wr.opcode = IB_WR_SEND;
600 wr.send_flags = IB_SEND_SIGNALED;
602 ret = ib_post_send(src_qp, &wr, &bad_wr);
609 static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
610 struct ib_wc *wc, struct ib_grh *grh,
613 struct mlx4_ib_dev *dev = to_mdev(ibdev);
619 if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
625 if (!(wc->wc_flags & IB_WC_GRH)) {
626 mlx4_ib_warn(ibdev, "RoCE grh not present.\n");
629 if (mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_CM) {
630 mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n");
633 if (mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave)) {
634 mlx4_ib_warn(ibdev, "failed matching grh\n");
637 if (slave >= dev->dev->caps.sqp_demux) {
638 mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
639 slave, dev->dev->caps.sqp_demux);
643 if (mlx4_ib_demux_cm_handler(ibdev, port, NULL, mad))
646 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
648 pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
653 /* Initially assume that this mad is for us */
654 slave = mlx4_master_func_num(dev->dev);
656 /* See if the slave id is encoded in a response mad */
657 if (mad->mad_hdr.method & 0x80) {
658 slave_id = (u8 *) &mad->mad_hdr.tid;
660 if (slave != 255) /*255 indicates the dom0*/
661 *slave_id = 0; /* remap tid */
664 /* If a grh is present, we demux according to it */
665 if (wc->wc_flags & IB_WC_GRH) {
666 slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id);
668 mlx4_ib_warn(ibdev, "failed matching grh\n");
672 /* Class-specific handling */
673 switch (mad->mad_hdr.mgmt_class) {
674 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
675 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
676 /* 255 indicates the dom0 */
677 if (slave != 255 && slave != mlx4_master_func_num(dev->dev)) {
678 if (!mlx4_vf_smi_enabled(dev->dev, slave, port))
680 /* for a VF. drop unsolicited MADs */
681 if (!(mad->mad_hdr.method & IB_MGMT_METHOD_RESP)) {
682 mlx4_ib_warn(ibdev, "demux QP0. rejecting unsolicited mad for slave %d class 0x%x, method 0x%x\n",
683 slave, mad->mad_hdr.mgmt_class,
684 mad->mad_hdr.method);
689 case IB_MGMT_CLASS_SUBN_ADM:
690 if (mlx4_ib_demux_sa_handler(ibdev, port, slave,
691 (struct ib_sa_mad *) mad))
694 case IB_MGMT_CLASS_CM:
695 if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad))
698 case IB_MGMT_CLASS_DEVICE_MGMT:
699 if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP)
703 /* Drop unsupported classes for slaves in tunnel mode */
704 if (slave != mlx4_master_func_num(dev->dev)) {
705 pr_debug("dropping unsupported ingress mad from class:%d "
706 "for slave:%d\n", mad->mad_hdr.mgmt_class, slave);
710 /*make sure that no slave==255 was not handled yet.*/
711 if (slave >= dev->dev->caps.sqp_demux) {
712 mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
713 slave, dev->dev->caps.sqp_demux);
717 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
719 pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
724 static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
725 struct ib_wc *in_wc, struct ib_grh *in_grh,
726 struct ib_mad *in_mad, struct ib_mad *out_mad)
728 u16 slid, prev_lid = 0;
730 struct ib_port_attr pattr;
732 if (in_wc && in_wc->qp->qp_num) {
733 pr_debug("received MAD: slid:%d sqpn:%d "
734 "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
735 in_wc->slid, in_wc->src_qp,
736 in_wc->dlid_path_bits,
739 in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
740 be16_to_cpu(in_mad->mad_hdr.attr_id));
741 if (in_wc->wc_flags & IB_WC_GRH) {
742 pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
743 be64_to_cpu(in_grh->sgid.global.subnet_prefix),
744 be64_to_cpu(in_grh->sgid.global.interface_id));
745 pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n",
746 be64_to_cpu(in_grh->dgid.global.subnet_prefix),
747 be64_to_cpu(in_grh->dgid.global.interface_id));
751 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
753 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
754 forward_trap(to_mdev(ibdev), port_num, in_mad);
755 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
758 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
759 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
760 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
761 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
762 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
763 return IB_MAD_RESULT_SUCCESS;
766 * Don't process SMInfo queries -- the SMA can't handle them.
768 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
769 return IB_MAD_RESULT_SUCCESS;
770 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
771 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 ||
772 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2 ||
773 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
774 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
775 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
776 return IB_MAD_RESULT_SUCCESS;
778 return IB_MAD_RESULT_SUCCESS;
780 if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
781 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
782 in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
783 in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
784 !ib_query_port(ibdev, port_num, &pattr))
785 prev_lid = pattr.lid;
787 err = mlx4_MAD_IFC(to_mdev(ibdev),
788 (mad_flags & IB_MAD_IGNORE_MKEY ? MLX4_MAD_IFC_IGNORE_MKEY : 0) |
789 (mad_flags & IB_MAD_IGNORE_BKEY ? MLX4_MAD_IFC_IGNORE_BKEY : 0) |
790 MLX4_MAD_IFC_NET_VIEW,
791 port_num, in_wc, in_grh, in_mad, out_mad);
793 return IB_MAD_RESULT_FAILURE;
795 if (!out_mad->mad_hdr.status) {
796 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
797 smp_snoop(ibdev, port_num, in_mad, prev_lid);
798 /* slaves get node desc from FW */
799 if (!mlx4_is_slave(to_mdev(ibdev)->dev))
800 node_desc_override(ibdev, out_mad);
803 /* set return bit in status of directed route responses */
804 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
805 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
807 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
808 /* no response for trap repress */
809 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
811 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
814 static void edit_counter(struct mlx4_counter *cnt,
815 struct ib_pma_portcounters *pma_cnt)
817 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
818 (be64_to_cpu(cnt->tx_bytes) >> 2));
819 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
820 (be64_to_cpu(cnt->rx_bytes) >> 2));
821 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
822 be64_to_cpu(cnt->tx_frames));
823 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
824 be64_to_cpu(cnt->rx_frames));
827 static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
828 struct ib_wc *in_wc, struct ib_grh *in_grh,
829 struct ib_mad *in_mad, struct ib_mad *out_mad)
831 struct mlx4_cmd_mailbox *mailbox;
832 struct mlx4_ib_dev *dev = to_mdev(ibdev);
834 u32 inmod = dev->counters[port_num - 1].index & 0xffff;
837 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
840 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
842 return IB_MAD_RESULT_FAILURE;
844 err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
845 MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
848 err = IB_MAD_RESULT_FAILURE;
850 memset(out_mad->data, 0, sizeof out_mad->data);
851 mode = ((struct mlx4_counter *)mailbox->buf)->counter_mode;
852 switch (mode & 0xf) {
854 edit_counter(mailbox->buf,
855 (void *)(out_mad->data + 40));
856 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
859 err = IB_MAD_RESULT_FAILURE;
863 mlx4_free_cmd_mailbox(dev->dev, mailbox);
868 int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
869 struct ib_wc *in_wc, struct ib_grh *in_grh,
870 struct ib_mad *in_mad, struct ib_mad *out_mad)
872 struct mlx4_ib_dev *dev = to_mdev(ibdev);
873 switch (rdma_port_get_link_layer(ibdev, port_num)) {
874 case IB_LINK_LAYER_INFINIBAND:
875 if (!mlx4_is_slave(dev->dev))
876 return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
877 in_grh, in_mad, out_mad);
878 case IB_LINK_LAYER_ETHERNET:
879 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
880 in_grh, in_mad, out_mad);
886 static void send_handler(struct ib_mad_agent *agent,
887 struct ib_mad_send_wc *mad_send_wc)
889 if (mad_send_wc->send_buf->context[0])
890 ib_destroy_ah(mad_send_wc->send_buf->context[0]);
891 ib_free_send_mad(mad_send_wc->send_buf);
894 int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
896 struct ib_mad_agent *agent;
899 enum rdma_link_layer ll;
901 for (p = 0; p < dev->num_ports; ++p) {
902 ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1);
903 for (q = 0; q <= 1; ++q) {
904 if (ll == IB_LINK_LAYER_INFINIBAND) {
905 agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
906 q ? IB_QPT_GSI : IB_QPT_SMI,
907 NULL, 0, send_handler,
910 ret = PTR_ERR(agent);
913 dev->send_agent[p][q] = agent;
915 dev->send_agent[p][q] = NULL;
922 for (p = 0; p < dev->num_ports; ++p)
923 for (q = 0; q <= 1; ++q)
924 if (dev->send_agent[p][q])
925 ib_unregister_mad_agent(dev->send_agent[p][q]);
930 void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
932 struct ib_mad_agent *agent;
935 for (p = 0; p < dev->num_ports; ++p) {
936 for (q = 0; q <= 1; ++q) {
937 agent = dev->send_agent[p][q];
939 dev->send_agent[p][q] = NULL;
940 ib_unregister_mad_agent(agent);
945 ib_destroy_ah(dev->sm_ah[p]);
949 static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
951 mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE);
953 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
954 mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
955 MLX4_EQ_PORT_INFO_LID_CHANGE_MASK);
958 static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
960 /* re-configure the alias-guid and mcg's */
961 if (mlx4_is_master(dev->dev)) {
962 mlx4_ib_invalidate_all_guid_record(dev, port_num);
964 if (!dev->sriov.is_going_down) {
965 mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0);
966 mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
967 MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK);
970 mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
973 static void propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
974 struct mlx4_eqe *eqe)
976 __propagate_pkey_ev(dev, port_num, GET_BLK_PTR_FROM_EQE(eqe),
977 GET_MASK_FROM_EQE(eqe));
980 static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
981 u32 guid_tbl_blk_num, u32 change_bitmap)
983 struct ib_smp *in_mad = NULL;
984 struct ib_smp *out_mad = NULL;
987 if (!mlx4_is_mfunc(dev->dev) || !mlx4_is_master(dev->dev))
990 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
991 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
992 if (!in_mad || !out_mad) {
993 mlx4_ib_warn(&dev->ib_dev, "failed to allocate memory for guid info mads\n");
997 guid_tbl_blk_num *= 4;
999 for (i = 0; i < 4; i++) {
1000 if (change_bitmap && (!((change_bitmap >> (8 * i)) & 0xff)))
1002 memset(in_mad, 0, sizeof *in_mad);
1003 memset(out_mad, 0, sizeof *out_mad);
1005 in_mad->base_version = 1;
1006 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
1007 in_mad->class_version = 1;
1008 in_mad->method = IB_MGMT_METHOD_GET;
1009 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
1010 in_mad->attr_mod = cpu_to_be32(guid_tbl_blk_num + i);
1012 if (mlx4_MAD_IFC(dev,
1013 MLX4_MAD_IFC_IGNORE_KEYS | MLX4_MAD_IFC_NET_VIEW,
1014 port_num, NULL, NULL, in_mad, out_mad)) {
1015 mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n");
1019 mlx4_ib_update_cache_on_guid_change(dev, guid_tbl_blk_num + i,
1021 (u8 *)(&((struct ib_smp *)out_mad)->data));
1022 mlx4_ib_notify_slaves_on_guid_change(dev, guid_tbl_blk_num + i,
1024 (u8 *)(&((struct ib_smp *)out_mad)->data));
1033 void handle_port_mgmt_change_event(struct work_struct *work)
1035 struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
1036 struct mlx4_ib_dev *dev = ew->ib_dev;
1037 struct mlx4_eqe *eqe = &(ew->ib_eqe);
1038 u8 port = eqe->event.port_mgmt_change.port;
1043 switch (eqe->subtype) {
1044 case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
1045 changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr);
1047 /* Update the SM ah - This should be done before handling
1048 the other changed attributes so that MADs can be sent to the SM */
1049 if (changed_attr & MSTR_SM_CHANGE_MASK) {
1050 u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
1051 u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
1052 update_sm_ah(dev, port, lid, sl);
1055 /* Check if it is a lid change event */
1056 if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
1057 handle_lid_change_event(dev, port);
1059 /* Generate GUID changed event */
1060 if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
1061 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
1062 /*if master, notify all slaves*/
1063 if (mlx4_is_master(dev->dev))
1064 mlx4_gen_slaves_port_mgt_ev(dev->dev, port,
1065 MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK);
1068 if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
1069 handle_client_rereg_event(dev, port);
1072 case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
1073 mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
1074 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
1075 propagate_pkey_ev(dev, port, eqe);
1077 case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
1078 /* paravirtualized master's guid is guid 0 -- does not change */
1079 if (!mlx4_is_master(dev->dev))
1080 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
1081 /*if master, notify relevant slaves*/
1082 else if (!dev->sriov.is_going_down) {
1083 tbl_block = GET_BLK_PTR_FROM_EQE(eqe);
1084 change_bitmap = GET_MASK_FROM_EQE(eqe);
1085 handle_slaves_guid_change(dev, port, tbl_block, change_bitmap);
1089 pr_warn("Unsupported subtype 0x%x for "
1090 "Port Management Change event\n", eqe->subtype);
1096 void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
1097 enum ib_event_type type)
1099 struct ib_event event;
1101 event.device = &dev->ib_dev;
1102 event.element.port_num = port_num;
1105 ib_dispatch_event(&event);
1108 static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
1110 unsigned long flags;
1111 struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
1112 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1113 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
1114 if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
1115 queue_work(ctx->wq, &ctx->work);
1116 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
1119 static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
1120 struct mlx4_ib_demux_pv_qp *tun_qp,
1123 struct ib_sge sg_list;
1124 struct ib_recv_wr recv_wr, *bad_recv_wr;
1127 size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
1128 sizeof (struct mlx4_tunnel_mad) : sizeof (struct mlx4_mad_rcv_buf);
1130 sg_list.addr = tun_qp->ring[index].map;
1131 sg_list.length = size;
1132 sg_list.lkey = ctx->mr->lkey;
1134 recv_wr.next = NULL;
1135 recv_wr.sg_list = &sg_list;
1136 recv_wr.num_sge = 1;
1137 recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV |
1138 MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt);
1139 ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map,
1140 size, DMA_FROM_DEVICE);
1141 return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr);
1144 static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port,
1145 int slave, struct ib_sa_mad *sa_mad)
1149 /* dispatch to different sa handlers */
1150 switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
1151 case IB_SA_ATTR_MC_MEMBER_REC:
1152 ret = mlx4_ib_mcg_multiplex_handler(ibdev, port, slave, sa_mad);
1160 static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
1162 int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave;
1164 return (qpn >= proxy_start && qpn <= proxy_start + 1);
1168 int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
1169 enum ib_qp_type dest_qpt, u16 pkey_index,
1170 u32 remote_qpn, u32 qkey, struct ib_ah_attr *attr,
1171 u8 *s_mac, struct ib_mad *mad)
1174 struct ib_send_wr wr, *bad_wr;
1175 struct mlx4_ib_demux_pv_ctx *sqp_ctx;
1176 struct mlx4_ib_demux_pv_qp *sqp;
1177 struct mlx4_mad_snd_buf *sqp_mad;
1179 struct ib_qp *send_qp = NULL;
1180 unsigned wire_tx_ix = 0;
1187 sqp_ctx = dev->sriov.sqps[port-1];
1189 /* check if proxy qp created */
1190 if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE)
1193 if (dest_qpt == IB_QPT_SMI) {
1195 sqp = &sqp_ctx->qp[0];
1196 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
1199 sqp = &sqp_ctx->qp[1];
1200 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][pkey_index];
1206 sgid_index = attr->grh.sgid_index;
1207 attr->grh.sgid_index = 0;
1208 ah = ib_create_ah(sqp_ctx->pd, attr);
1211 attr->grh.sgid_index = sgid_index;
1212 to_mah(ah)->av.ib.gid_index = sgid_index;
1213 /* get rid of force-loopback bit */
1214 to_mah(ah)->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF);
1215 spin_lock(&sqp->tx_lock);
1216 if (sqp->tx_ix_head - sqp->tx_ix_tail >=
1217 (MLX4_NUM_TUNNEL_BUFS - 1))
1220 wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
1221 spin_unlock(&sqp->tx_lock);
1225 sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
1226 if (sqp->tx_ring[wire_tx_ix].ah)
1227 ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah);
1228 sqp->tx_ring[wire_tx_ix].ah = ah;
1229 ib_dma_sync_single_for_cpu(&dev->ib_dev,
1230 sqp->tx_ring[wire_tx_ix].buf.map,
1231 sizeof (struct mlx4_mad_snd_buf),
1234 memcpy(&sqp_mad->payload, mad, sizeof *mad);
1236 ib_dma_sync_single_for_device(&dev->ib_dev,
1237 sqp->tx_ring[wire_tx_ix].buf.map,
1238 sizeof (struct mlx4_mad_snd_buf),
1241 list.addr = sqp->tx_ring[wire_tx_ix].buf.map;
1242 list.length = sizeof (struct mlx4_mad_snd_buf);
1243 list.lkey = sqp_ctx->mr->lkey;
1246 wr.wr.ud.port_num = port;
1247 wr.wr.ud.pkey_index = wire_pkey_ix;
1248 wr.wr.ud.remote_qkey = qkey;
1249 wr.wr.ud.remote_qpn = remote_qpn;
1251 wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
1254 wr.opcode = IB_WR_SEND;
1255 wr.send_flags = IB_SEND_SIGNALED;
1257 memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6);
1260 ret = ib_post_send(send_qp, &wr, &bad_wr);
1267 static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port)
1269 if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
1271 return mlx4_get_base_gid_ix(dev->dev, slave, port);
1274 static void fill_in_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port,
1275 struct ib_ah_attr *ah_attr)
1277 if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
1278 ah_attr->grh.sgid_index = slave;
1280 ah_attr->grh.sgid_index += get_slave_base_gid_ix(dev, slave, port);
1283 static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc)
1285 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1286 struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)];
1287 int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1);
1288 struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr;
1289 struct mlx4_ib_ah ah;
1290 struct ib_ah_attr ah_attr;
1295 /* Get slave that sent this packet */
1296 if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn ||
1297 wc->src_qp >= dev->dev->phys_caps.base_proxy_sqpn + 8 * MLX4_MFUNC_MAX ||
1298 (wc->src_qp & 0x1) != ctx->port - 1 ||
1300 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp);
1303 slave = ((wc->src_qp & ~0x7) - dev->dev->phys_caps.base_proxy_sqpn) / 8;
1304 if (slave != ctx->slave) {
1305 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
1306 "belongs to another slave\n", wc->src_qp);
1310 /* Map transaction ID */
1311 ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
1312 sizeof (struct mlx4_tunnel_mad),
1314 switch (tunnel->mad.mad_hdr.method) {
1315 case IB_MGMT_METHOD_SET:
1316 case IB_MGMT_METHOD_GET:
1317 case IB_MGMT_METHOD_REPORT:
1318 case IB_SA_METHOD_GET_TABLE:
1319 case IB_SA_METHOD_DELETE:
1320 case IB_SA_METHOD_GET_MULTI:
1321 case IB_SA_METHOD_GET_TRACE_TBL:
1322 slave_id = (u8 *) &tunnel->mad.mad_hdr.tid;
1324 mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d "
1325 "class:%d slave:%d\n", *slave_id,
1326 tunnel->mad.mad_hdr.mgmt_class, slave);
1334 /* Class-specific handling */
1335 switch (tunnel->mad.mad_hdr.mgmt_class) {
1336 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
1337 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
1338 if (slave != mlx4_master_func_num(dev->dev) &&
1339 !mlx4_vf_smi_enabled(dev->dev, slave, ctx->port))
1342 case IB_MGMT_CLASS_SUBN_ADM:
1343 if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave,
1344 (struct ib_sa_mad *) &tunnel->mad))
1347 case IB_MGMT_CLASS_CM:
1348 if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave,
1349 (struct ib_mad *) &tunnel->mad))
1352 case IB_MGMT_CLASS_DEVICE_MGMT:
1353 if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET &&
1354 tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET)
1358 /* Drop unsupported classes for slaves in tunnel mode */
1359 if (slave != mlx4_master_func_num(dev->dev)) {
1360 mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d "
1361 "for slave:%d\n", tunnel->mad.mad_hdr.mgmt_class, slave);
1366 /* We are using standard ib_core services to send the mad, so generate a
1367 * stadard address handle by decoding the tunnelled mlx4_ah fields */
1368 memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
1369 ah.ibah.device = ctx->ib_dev;
1371 port = be32_to_cpu(ah.av.ib.port_pd) >> 24;
1372 port = mlx4_slave_convert_port(dev->dev, slave, port);
1375 ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff));
1377 mlx4_ib_query_ah(&ah.ibah, &ah_attr);
1378 if (ah_attr.ah_flags & IB_AH_GRH)
1379 fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr);
1381 memcpy(ah_attr.dmac, tunnel->hdr.mac, 6);
1382 ah_attr.vlan_id = be16_to_cpu(tunnel->hdr.vlan);
1383 /* if slave have default vlan use it */
1384 mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave,
1385 &ah_attr.vlan_id, &ah_attr.sl);
1387 mlx4_ib_send_to_wire(dev, slave, ctx->port,
1388 is_proxy_qp0(dev, wc->src_qp, slave) ?
1389 IB_QPT_SMI : IB_QPT_GSI,
1390 be16_to_cpu(tunnel->hdr.pkey_index),
1391 be32_to_cpu(tunnel->hdr.remote_qpn),
1392 be32_to_cpu(tunnel->hdr.qkey),
1393 &ah_attr, wc->smac, &tunnel->mad);
1396 static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1397 enum ib_qp_type qp_type, int is_tun)
1400 struct mlx4_ib_demux_pv_qp *tun_qp;
1401 int rx_buf_size, tx_buf_size;
1403 if (qp_type > IB_QPT_GSI)
1406 tun_qp = &ctx->qp[qp_type];
1408 tun_qp->ring = kzalloc(sizeof (struct mlx4_ib_buf) * MLX4_NUM_TUNNEL_BUFS,
1413 tun_qp->tx_ring = kcalloc(MLX4_NUM_TUNNEL_BUFS,
1414 sizeof (struct mlx4_ib_tun_tx_buf),
1416 if (!tun_qp->tx_ring) {
1417 kfree(tun_qp->ring);
1418 tun_qp->ring = NULL;
1423 rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1424 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1426 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1427 tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1430 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1431 tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL);
1432 if (!tun_qp->ring[i].addr)
1434 tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev,
1435 tun_qp->ring[i].addr,
1438 if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) {
1439 kfree(tun_qp->ring[i].addr);
1444 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1445 tun_qp->tx_ring[i].buf.addr =
1446 kmalloc(tx_buf_size, GFP_KERNEL);
1447 if (!tun_qp->tx_ring[i].buf.addr)
1449 tun_qp->tx_ring[i].buf.map =
1450 ib_dma_map_single(ctx->ib_dev,
1451 tun_qp->tx_ring[i].buf.addr,
1454 if (ib_dma_mapping_error(ctx->ib_dev,
1455 tun_qp->tx_ring[i].buf.map)) {
1456 kfree(tun_qp->tx_ring[i].buf.addr);
1459 tun_qp->tx_ring[i].ah = NULL;
1461 spin_lock_init(&tun_qp->tx_lock);
1462 tun_qp->tx_ix_head = 0;
1463 tun_qp->tx_ix_tail = 0;
1464 tun_qp->proxy_qpt = qp_type;
1471 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1472 tx_buf_size, DMA_TO_DEVICE);
1473 kfree(tun_qp->tx_ring[i].buf.addr);
1475 kfree(tun_qp->tx_ring);
1476 tun_qp->tx_ring = NULL;
1477 i = MLX4_NUM_TUNNEL_BUFS;
1481 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1482 rx_buf_size, DMA_FROM_DEVICE);
1483 kfree(tun_qp->ring[i].addr);
1485 kfree(tun_qp->ring);
1486 tun_qp->ring = NULL;
1490 static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1491 enum ib_qp_type qp_type, int is_tun)
1494 struct mlx4_ib_demux_pv_qp *tun_qp;
1495 int rx_buf_size, tx_buf_size;
1497 if (qp_type > IB_QPT_GSI)
1500 tun_qp = &ctx->qp[qp_type];
1502 rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1503 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1505 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1506 tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1510 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1511 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1512 rx_buf_size, DMA_FROM_DEVICE);
1513 kfree(tun_qp->ring[i].addr);
1516 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1517 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1518 tx_buf_size, DMA_TO_DEVICE);
1519 kfree(tun_qp->tx_ring[i].buf.addr);
1520 if (tun_qp->tx_ring[i].ah)
1521 ib_destroy_ah(tun_qp->tx_ring[i].ah);
1523 kfree(tun_qp->tx_ring);
1524 kfree(tun_qp->ring);
1527 static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
1529 struct mlx4_ib_demux_pv_ctx *ctx;
1530 struct mlx4_ib_demux_pv_qp *tun_qp;
1533 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1534 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1536 while (ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1537 tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1538 if (wc.status == IB_WC_SUCCESS) {
1539 switch (wc.opcode) {
1541 mlx4_ib_multiplex_mad(ctx, &wc);
1542 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp,
1544 (MLX4_NUM_TUNNEL_BUFS - 1));
1546 pr_err("Failed reposting tunnel "
1547 "buf:%lld\n", wc.wr_id);
1550 pr_debug("received tunnel send completion:"
1551 "wrid=0x%llx, status=0x%x\n",
1552 wc.wr_id, wc.status);
1553 ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1554 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1555 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1557 spin_lock(&tun_qp->tx_lock);
1558 tun_qp->tx_ix_tail++;
1559 spin_unlock(&tun_qp->tx_lock);
1566 pr_debug("mlx4_ib: completion error in tunnel: %d."
1567 " status = %d, wrid = 0x%llx\n",
1568 ctx->slave, wc.status, wc.wr_id);
1569 if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1570 ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1571 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1572 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1574 spin_lock(&tun_qp->tx_lock);
1575 tun_qp->tx_ix_tail++;
1576 spin_unlock(&tun_qp->tx_lock);
1582 static void pv_qp_event_handler(struct ib_event *event, void *qp_context)
1584 struct mlx4_ib_demux_pv_ctx *sqp = qp_context;
1586 /* It's worse than that! He's dead, Jim! */
1587 pr_err("Fatal error (%d) on a MAD QP on port %d\n",
1588 event->event, sqp->port);
1591 static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
1592 enum ib_qp_type qp_type, int create_tun)
1595 struct mlx4_ib_demux_pv_qp *tun_qp;
1596 struct mlx4_ib_qp_tunnel_init_attr qp_init_attr;
1597 struct ib_qp_attr attr;
1598 int qp_attr_mask_INIT;
1600 if (qp_type > IB_QPT_GSI)
1603 tun_qp = &ctx->qp[qp_type];
1605 memset(&qp_init_attr, 0, sizeof qp_init_attr);
1606 qp_init_attr.init_attr.send_cq = ctx->cq;
1607 qp_init_attr.init_attr.recv_cq = ctx->cq;
1608 qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
1609 qp_init_attr.init_attr.cap.max_send_wr = MLX4_NUM_TUNNEL_BUFS;
1610 qp_init_attr.init_attr.cap.max_recv_wr = MLX4_NUM_TUNNEL_BUFS;
1611 qp_init_attr.init_attr.cap.max_send_sge = 1;
1612 qp_init_attr.init_attr.cap.max_recv_sge = 1;
1614 qp_init_attr.init_attr.qp_type = IB_QPT_UD;
1615 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_TUNNEL_QP;
1616 qp_init_attr.port = ctx->port;
1617 qp_init_attr.slave = ctx->slave;
1618 qp_init_attr.proxy_qp_type = qp_type;
1619 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX |
1620 IB_QP_QKEY | IB_QP_PORT;
1622 qp_init_attr.init_attr.qp_type = qp_type;
1623 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_SQP;
1624 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY;
1626 qp_init_attr.init_attr.port_num = ctx->port;
1627 qp_init_attr.init_attr.qp_context = ctx;
1628 qp_init_attr.init_attr.event_handler = pv_qp_event_handler;
1629 tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
1630 if (IS_ERR(tun_qp->qp)) {
1631 ret = PTR_ERR(tun_qp->qp);
1633 pr_err("Couldn't create %s QP (%d)\n",
1634 create_tun ? "tunnel" : "special", ret);
1638 memset(&attr, 0, sizeof attr);
1639 attr.qp_state = IB_QPS_INIT;
1642 ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave,
1643 ctx->port, IB_DEFAULT_PKEY_FULL,
1645 if (ret || !create_tun)
1647 to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
1648 attr.qkey = IB_QP1_QKEY;
1649 attr.port_num = ctx->port;
1650 ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
1652 pr_err("Couldn't change %s qp state to INIT (%d)\n",
1653 create_tun ? "tunnel" : "special", ret);
1656 attr.qp_state = IB_QPS_RTR;
1657 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE);
1659 pr_err("Couldn't change %s qp state to RTR (%d)\n",
1660 create_tun ? "tunnel" : "special", ret);
1663 attr.qp_state = IB_QPS_RTS;
1665 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
1667 pr_err("Couldn't change %s qp state to RTS (%d)\n",
1668 create_tun ? "tunnel" : "special", ret);
1672 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1673 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i);
1675 pr_err(" mlx4_ib_post_pv_buf error"
1676 " (err = %d, i = %d)\n", ret, i);
1683 ib_destroy_qp(tun_qp->qp);
1689 * IB MAD completion callback for real SQPs
1691 static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
1693 struct mlx4_ib_demux_pv_ctx *ctx;
1694 struct mlx4_ib_demux_pv_qp *sqp;
1699 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1700 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1702 while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1703 sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1704 if (wc.status == IB_WC_SUCCESS) {
1705 switch (wc.opcode) {
1707 ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1708 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1709 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1711 spin_lock(&sqp->tx_lock);
1713 spin_unlock(&sqp->tx_lock);
1716 mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *)
1717 (sqp->ring[wc.wr_id &
1718 (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->payload);
1719 grh = &(((struct mlx4_mad_rcv_buf *)
1720 (sqp->ring[wc.wr_id &
1721 (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->grh);
1722 mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad);
1723 if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id &
1724 (MLX4_NUM_TUNNEL_BUFS - 1)))
1725 pr_err("Failed reposting SQP "
1726 "buf:%lld\n", wc.wr_id);
1733 pr_debug("mlx4_ib: completion error in tunnel: %d."
1734 " status = %d, wrid = 0x%llx\n",
1735 ctx->slave, wc.status, wc.wr_id);
1736 if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1737 ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1738 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1739 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1741 spin_lock(&sqp->tx_lock);
1743 spin_unlock(&sqp->tx_lock);
1749 static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port,
1750 struct mlx4_ib_demux_pv_ctx **ret_ctx)
1752 struct mlx4_ib_demux_pv_ctx *ctx;
1755 ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL);
1757 pr_err("failed allocating pv resource context "
1758 "for port %d, slave %d\n", port, slave);
1762 ctx->ib_dev = &dev->ib_dev;
1769 static void free_pv_object(struct mlx4_ib_dev *dev, int slave, int port)
1771 if (dev->sriov.demux[port - 1].tun[slave]) {
1772 kfree(dev->sriov.demux[port - 1].tun[slave]);
1773 dev->sriov.demux[port - 1].tun[slave] = NULL;
1777 static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
1778 int create_tun, struct mlx4_ib_demux_pv_ctx *ctx)
1782 if (ctx->state != DEMUX_PV_STATE_DOWN)
1785 ctx->state = DEMUX_PV_STATE_STARTING;
1786 /* have QP0 only if link layer is IB */
1787 if (rdma_port_get_link_layer(ibdev, ctx->port) ==
1788 IB_LINK_LAYER_INFINIBAND)
1792 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun);
1794 pr_err("Failed allocating qp0 tunnel bufs (%d)\n", ret);
1799 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun);
1801 pr_err("Failed allocating qp1 tunnel bufs (%d)\n", ret);
1805 cq_size = 2 * MLX4_NUM_TUNNEL_BUFS;
1809 ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
1810 NULL, ctx, cq_size, 0);
1811 if (IS_ERR(ctx->cq)) {
1812 ret = PTR_ERR(ctx->cq);
1813 pr_err("Couldn't create tunnel CQ (%d)\n", ret);
1817 ctx->pd = ib_alloc_pd(ctx->ib_dev);
1818 if (IS_ERR(ctx->pd)) {
1819 ret = PTR_ERR(ctx->pd);
1820 pr_err("Couldn't create tunnel PD (%d)\n", ret);
1824 ctx->mr = ib_get_dma_mr(ctx->pd, IB_ACCESS_LOCAL_WRITE);
1825 if (IS_ERR(ctx->mr)) {
1826 ret = PTR_ERR(ctx->mr);
1827 pr_err("Couldn't get tunnel DMA MR (%d)\n", ret);
1832 ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun);
1834 pr_err("Couldn't create %s QP0 (%d)\n",
1835 create_tun ? "tunnel for" : "", ret);
1840 ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun);
1842 pr_err("Couldn't create %s QP1 (%d)\n",
1843 create_tun ? "tunnel for" : "", ret);
1848 INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker);
1850 INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
1852 ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
1854 ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1856 pr_err("Couldn't arm tunnel cq (%d)\n", ret);
1859 ctx->state = DEMUX_PV_STATE_ACTIVE;
1864 ib_destroy_qp(ctx->qp[1].qp);
1865 ctx->qp[1].qp = NULL;
1870 ib_destroy_qp(ctx->qp[0].qp);
1871 ctx->qp[0].qp = NULL;
1874 ib_dereg_mr(ctx->mr);
1878 ib_dealloc_pd(ctx->pd);
1882 ib_destroy_cq(ctx->cq);
1886 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun);
1890 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun);
1892 ctx->state = DEMUX_PV_STATE_DOWN;
1896 static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port,
1897 struct mlx4_ib_demux_pv_ctx *ctx, int flush)
1901 if (ctx->state > DEMUX_PV_STATE_DOWN) {
1902 ctx->state = DEMUX_PV_STATE_DOWNING;
1904 flush_workqueue(ctx->wq);
1906 ib_destroy_qp(ctx->qp[0].qp);
1907 ctx->qp[0].qp = NULL;
1908 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1);
1910 ib_destroy_qp(ctx->qp[1].qp);
1911 ctx->qp[1].qp = NULL;
1912 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1);
1913 ib_dereg_mr(ctx->mr);
1915 ib_dealloc_pd(ctx->pd);
1917 ib_destroy_cq(ctx->cq);
1919 ctx->state = DEMUX_PV_STATE_DOWN;
1923 static int mlx4_ib_tunnels_update(struct mlx4_ib_dev *dev, int slave,
1924 int port, int do_init)
1929 clean_vf_mcast(&dev->sriov.demux[port - 1], slave);
1930 /* for master, destroy real sqp resources */
1931 if (slave == mlx4_master_func_num(dev->dev))
1932 destroy_pv_resources(dev, slave, port,
1933 dev->sriov.sqps[port - 1], 1);
1934 /* destroy the tunnel qp resources */
1935 destroy_pv_resources(dev, slave, port,
1936 dev->sriov.demux[port - 1].tun[slave], 1);
1940 /* create the tunnel qp resources */
1941 ret = create_pv_resources(&dev->ib_dev, slave, port, 1,
1942 dev->sriov.demux[port - 1].tun[slave]);
1944 /* for master, create the real sqp resources */
1945 if (!ret && slave == mlx4_master_func_num(dev->dev))
1946 ret = create_pv_resources(&dev->ib_dev, slave, port, 0,
1947 dev->sriov.sqps[port - 1]);
1951 void mlx4_ib_tunnels_update_work(struct work_struct *work)
1953 struct mlx4_ib_demux_work *dmxw;
1955 dmxw = container_of(work, struct mlx4_ib_demux_work, work);
1956 mlx4_ib_tunnels_update(dmxw->dev, dmxw->slave, (int) dmxw->port,
1962 static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
1963 struct mlx4_ib_demux_ctx *ctx,
1970 ctx->tun = kcalloc(dev->dev->caps.sqp_demux,
1971 sizeof (struct mlx4_ib_demux_pv_ctx *), GFP_KERNEL);
1977 ctx->ib_dev = &dev->ib_dev;
1980 i < min(dev->dev->caps.sqp_demux,
1981 (u16)(dev->dev->persist->num_vfs + 1));
1983 struct mlx4_active_ports actv_ports =
1984 mlx4_get_active_ports(dev->dev, i);
1986 if (!test_bit(port - 1, actv_ports.ports))
1989 ret = alloc_pv_object(dev, i, port, &ctx->tun[i]);
1996 ret = mlx4_ib_mcg_port_init(ctx);
1998 pr_err("Failed initializing mcg para-virt (%d)\n", ret);
2002 snprintf(name, sizeof name, "mlx4_ibt%d", port);
2003 ctx->wq = create_singlethread_workqueue(name);
2005 pr_err("Failed to create tunnelling WQ for port %d\n", port);
2010 snprintf(name, sizeof name, "mlx4_ibud%d", port);
2011 ctx->ud_wq = create_singlethread_workqueue(name);
2013 pr_err("Failed to create up/down WQ for port %d\n", port);
2021 destroy_workqueue(ctx->wq);
2025 mlx4_ib_mcg_port_cleanup(ctx, 1);
2027 for (i = 0; i < dev->dev->caps.sqp_demux; i++)
2028 free_pv_object(dev, i, port);
2034 static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx)
2036 if (sqp_ctx->state > DEMUX_PV_STATE_DOWN) {
2037 sqp_ctx->state = DEMUX_PV_STATE_DOWNING;
2038 flush_workqueue(sqp_ctx->wq);
2039 if (sqp_ctx->has_smi) {
2040 ib_destroy_qp(sqp_ctx->qp[0].qp);
2041 sqp_ctx->qp[0].qp = NULL;
2042 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_SMI, 0);
2044 ib_destroy_qp(sqp_ctx->qp[1].qp);
2045 sqp_ctx->qp[1].qp = NULL;
2046 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0);
2047 ib_dereg_mr(sqp_ctx->mr);
2049 ib_dealloc_pd(sqp_ctx->pd);
2051 ib_destroy_cq(sqp_ctx->cq);
2053 sqp_ctx->state = DEMUX_PV_STATE_DOWN;
2057 static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
2061 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
2062 mlx4_ib_mcg_port_cleanup(ctx, 1);
2063 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2066 if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN)
2067 ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
2069 flush_workqueue(ctx->wq);
2070 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2071 destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
2072 free_pv_object(dev, i, ctx->port);
2075 destroy_workqueue(ctx->ud_wq);
2076 destroy_workqueue(ctx->wq);
2080 static void mlx4_ib_master_tunnels(struct mlx4_ib_dev *dev, int do_init)
2084 if (!mlx4_is_master(dev->dev))
2086 /* initialize or tear down tunnel QPs for the master */
2087 for (i = 0; i < dev->dev->caps.num_ports; i++)
2088 mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->dev), i + 1, do_init);
2092 int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
2097 if (!mlx4_is_mfunc(dev->dev))
2100 dev->sriov.is_going_down = 0;
2101 spin_lock_init(&dev->sriov.going_down_lock);
2102 mlx4_ib_cm_paravirt_init(dev);
2104 mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n");
2106 if (mlx4_is_slave(dev->dev)) {
2107 mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n");
2111 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2112 if (i == mlx4_master_func_num(dev->dev))
2113 mlx4_put_slave_node_guid(dev->dev, i, dev->ib_dev.node_guid);
2115 mlx4_put_slave_node_guid(dev->dev, i, mlx4_ib_gen_node_guid());
2118 err = mlx4_ib_init_alias_guid_service(dev);
2120 mlx4_ib_warn(&dev->ib_dev, "Failed init alias guid process.\n");
2123 err = mlx4_ib_device_register_sysfs(dev);
2125 mlx4_ib_warn(&dev->ib_dev, "Failed to register sysfs\n");
2129 mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n",
2130 dev->dev->caps.sqp_demux);
2131 for (i = 0; i < dev->num_ports; i++) {
2133 err = __mlx4_ib_query_gid(&dev->ib_dev, i + 1, 0, &gid, 1);
2136 dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
2137 err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
2138 &dev->sriov.sqps[i]);
2141 err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1);
2145 mlx4_ib_master_tunnels(dev, 1);
2149 free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
2152 free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
2153 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
2155 mlx4_ib_device_unregister_sysfs(dev);
2158 mlx4_ib_destroy_alias_guid_service(dev);
2161 mlx4_ib_cm_paravirt_clean(dev, -1);
2166 void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev)
2169 unsigned long flags;
2171 if (!mlx4_is_mfunc(dev->dev))
2174 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
2175 dev->sriov.is_going_down = 1;
2176 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
2177 if (mlx4_is_master(dev->dev)) {
2178 for (i = 0; i < dev->num_ports; i++) {
2179 flush_workqueue(dev->sriov.demux[i].ud_wq);
2180 mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]);
2181 kfree(dev->sriov.sqps[i]);
2182 dev->sriov.sqps[i] = NULL;
2183 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
2186 mlx4_ib_cm_paravirt_clean(dev, -1);
2187 mlx4_ib_destroy_alias_guid_service(dev);
2188 mlx4_ib_device_unregister_sysfs(dev);