]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/infiniband/sw/rxe/rxe_qp.c
Linux 5.6-rc7
[linux.git] / drivers / infiniband / sw / rxe / rxe_qp.c
1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *         Redistribution and use in source and binary forms, with or
12  *         without modification, are permitted provided that the following
13  *         conditions are met:
14  *
15  *              - Redistributions of source code must retain the above
16  *                copyright notice, this list of conditions and the following
17  *                disclaimer.
18  *
19  *              - Redistributions in binary form must reproduce the above
20  *                copyright notice, this list of conditions and the following
21  *                disclaimer in the documentation and/or other materials
22  *                provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/skbuff.h>
35 #include <linux/delay.h>
36 #include <linux/sched.h>
37 #include <linux/vmalloc.h>
38 #include <rdma/uverbs_ioctl.h>
39
40 #include "rxe.h"
41 #include "rxe_loc.h"
42 #include "rxe_queue.h"
43 #include "rxe_task.h"
44
45 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
46                           int has_srq)
47 {
48         if (cap->max_send_wr > rxe->attr.max_qp_wr) {
49                 pr_warn("invalid send wr = %d > %d\n",
50                         cap->max_send_wr, rxe->attr.max_qp_wr);
51                 goto err1;
52         }
53
54         if (cap->max_send_sge > rxe->attr.max_send_sge) {
55                 pr_warn("invalid send sge = %d > %d\n",
56                         cap->max_send_sge, rxe->attr.max_send_sge);
57                 goto err1;
58         }
59
60         if (!has_srq) {
61                 if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
62                         pr_warn("invalid recv wr = %d > %d\n",
63                                 cap->max_recv_wr, rxe->attr.max_qp_wr);
64                         goto err1;
65                 }
66
67                 if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
68                         pr_warn("invalid recv sge = %d > %d\n",
69                                 cap->max_recv_sge, rxe->attr.max_recv_sge);
70                         goto err1;
71                 }
72         }
73
74         if (cap->max_inline_data > rxe->max_inline_data) {
75                 pr_warn("invalid max inline data = %d > %d\n",
76                         cap->max_inline_data, rxe->max_inline_data);
77                 goto err1;
78         }
79
80         return 0;
81
82 err1:
83         return -EINVAL;
84 }
85
86 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
87 {
88         struct ib_qp_cap *cap = &init->cap;
89         struct rxe_port *port;
90         int port_num = init->port_num;
91
92         if (!init->recv_cq || !init->send_cq) {
93                 pr_warn("missing cq\n");
94                 goto err1;
95         }
96
97         if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
98                 goto err1;
99
100         if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) {
101                 if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
102                         pr_warn("invalid port = %d\n", port_num);
103                         goto err1;
104                 }
105
106                 port = &rxe->port;
107
108                 if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) {
109                         pr_warn("SMI QP exists for port %d\n", port_num);
110                         goto err1;
111                 }
112
113                 if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
114                         pr_warn("GSI QP exists for port %d\n", port_num);
115                         goto err1;
116                 }
117         }
118
119         return 0;
120
121 err1:
122         return -EINVAL;
123 }
124
125 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
126 {
127         qp->resp.res_head = 0;
128         qp->resp.res_tail = 0;
129         qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
130
131         if (!qp->resp.resources)
132                 return -ENOMEM;
133
134         return 0;
135 }
136
137 static void free_rd_atomic_resources(struct rxe_qp *qp)
138 {
139         if (qp->resp.resources) {
140                 int i;
141
142                 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
143                         struct resp_res *res = &qp->resp.resources[i];
144
145                         free_rd_atomic_resource(qp, res);
146                 }
147                 kfree(qp->resp.resources);
148                 qp->resp.resources = NULL;
149         }
150 }
151
152 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
153 {
154         if (res->type == RXE_ATOMIC_MASK) {
155                 rxe_drop_ref(qp);
156                 kfree_skb(res->atomic.skb);
157         } else if (res->type == RXE_READ_MASK) {
158                 if (res->read.mr)
159                         rxe_drop_ref(res->read.mr);
160         }
161         res->type = 0;
162 }
163
164 static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
165 {
166         int i;
167         struct resp_res *res;
168
169         if (qp->resp.resources) {
170                 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
171                         res = &qp->resp.resources[i];
172                         free_rd_atomic_resource(qp, res);
173                 }
174         }
175 }
176
177 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
178                              struct ib_qp_init_attr *init)
179 {
180         struct rxe_port *port;
181         u32 qpn;
182
183         qp->sq_sig_type         = init->sq_sig_type;
184         qp->attr.path_mtu       = 1;
185         qp->mtu                 = ib_mtu_enum_to_int(qp->attr.path_mtu);
186
187         qpn                     = qp->pelem.index;
188         port                    = &rxe->port;
189
190         switch (init->qp_type) {
191         case IB_QPT_SMI:
192                 qp->ibqp.qp_num         = 0;
193                 port->qp_smi_index      = qpn;
194                 qp->attr.port_num       = init->port_num;
195                 break;
196
197         case IB_QPT_GSI:
198                 qp->ibqp.qp_num         = 1;
199                 port->qp_gsi_index      = qpn;
200                 qp->attr.port_num       = init->port_num;
201                 break;
202
203         default:
204                 qp->ibqp.qp_num         = qpn;
205                 break;
206         }
207
208         INIT_LIST_HEAD(&qp->grp_list);
209
210         skb_queue_head_init(&qp->send_pkts);
211
212         spin_lock_init(&qp->grp_lock);
213         spin_lock_init(&qp->state_lock);
214
215         atomic_set(&qp->ssn, 0);
216         atomic_set(&qp->skb_out, 0);
217 }
218
219 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
220                            struct ib_qp_init_attr *init, struct ib_udata *udata,
221                            struct rxe_create_qp_resp __user *uresp)
222 {
223         int err;
224         int wqe_size;
225
226         err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
227         if (err < 0)
228                 return err;
229         qp->sk->sk->sk_user_data = qp;
230
231         /* pick a source UDP port number for this QP based on
232          * the source QPN. this spreads traffic for different QPs
233          * across different NIC RX queues (while using a single
234          * flow for a given QP to maintain packet order).
235          * the port number must be in the Dynamic Ports range
236          * (0xc000 - 0xffff).
237          */
238         qp->src_port = RXE_ROCE_V2_SPORT +
239                 (hash_32_generic(qp_num(qp), 14) & 0x3fff);
240         qp->sq.max_wr           = init->cap.max_send_wr;
241
242         /* These caps are limited by rxe_qp_chk_cap() done by the caller */
243         wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
244                          init->cap.max_inline_data);
245         qp->sq.max_sge = init->cap.max_send_sge =
246                 wqe_size / sizeof(struct ib_sge);
247         qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
248         wqe_size += sizeof(struct rxe_send_wqe);
249
250         qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size);
251         if (!qp->sq.queue)
252                 return -ENOMEM;
253
254         err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
255                            qp->sq.queue->buf, qp->sq.queue->buf_size,
256                            &qp->sq.queue->ip);
257
258         if (err) {
259                 vfree(qp->sq.queue->buf);
260                 kfree(qp->sq.queue);
261                 return err;
262         }
263
264         qp->req.wqe_index       = producer_index(qp->sq.queue);
265         qp->req.state           = QP_STATE_RESET;
266         qp->req.opcode          = -1;
267         qp->comp.opcode         = -1;
268
269         spin_lock_init(&qp->sq.sq_lock);
270         skb_queue_head_init(&qp->req_pkts);
271
272         rxe_init_task(rxe, &qp->req.task, qp,
273                       rxe_requester, "req");
274         rxe_init_task(rxe, &qp->comp.task, qp,
275                       rxe_completer, "comp");
276
277         qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
278         if (init->qp_type == IB_QPT_RC) {
279                 timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
280                 timer_setup(&qp->retrans_timer, retransmit_timer, 0);
281         }
282         return 0;
283 }
284
285 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
286                             struct ib_qp_init_attr *init,
287                             struct ib_udata *udata,
288                             struct rxe_create_qp_resp __user *uresp)
289 {
290         int err;
291         int wqe_size;
292
293         if (!qp->srq) {
294                 qp->rq.max_wr           = init->cap.max_recv_wr;
295                 qp->rq.max_sge          = init->cap.max_recv_sge;
296
297                 wqe_size = rcv_wqe_size(qp->rq.max_sge);
298
299                 pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
300                          qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
301
302                 qp->rq.queue = rxe_queue_init(rxe,
303                                               &qp->rq.max_wr,
304                                               wqe_size);
305                 if (!qp->rq.queue)
306                         return -ENOMEM;
307
308                 err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
309                                    qp->rq.queue->buf, qp->rq.queue->buf_size,
310                                    &qp->rq.queue->ip);
311                 if (err) {
312                         vfree(qp->rq.queue->buf);
313                         kfree(qp->rq.queue);
314                         return err;
315                 }
316         }
317
318         spin_lock_init(&qp->rq.producer_lock);
319         spin_lock_init(&qp->rq.consumer_lock);
320
321         skb_queue_head_init(&qp->resp_pkts);
322
323         rxe_init_task(rxe, &qp->resp.task, qp,
324                       rxe_responder, "resp");
325
326         qp->resp.opcode         = OPCODE_NONE;
327         qp->resp.msn            = 0;
328         qp->resp.state          = QP_STATE_RESET;
329
330         return 0;
331 }
332
333 /* called by the create qp verb */
334 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
335                      struct ib_qp_init_attr *init,
336                      struct rxe_create_qp_resp __user *uresp,
337                      struct ib_pd *ibpd,
338                      struct ib_udata *udata)
339 {
340         int err;
341         struct rxe_cq *rcq = to_rcq(init->recv_cq);
342         struct rxe_cq *scq = to_rcq(init->send_cq);
343         struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
344
345         rxe_add_ref(pd);
346         rxe_add_ref(rcq);
347         rxe_add_ref(scq);
348         if (srq)
349                 rxe_add_ref(srq);
350
351         qp->pd                  = pd;
352         qp->rcq                 = rcq;
353         qp->scq                 = scq;
354         qp->srq                 = srq;
355
356         rxe_qp_init_misc(rxe, qp, init);
357
358         err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
359         if (err)
360                 goto err1;
361
362         err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
363         if (err)
364                 goto err2;
365
366         qp->attr.qp_state = IB_QPS_RESET;
367         qp->valid = 1;
368
369         return 0;
370
371 err2:
372         rxe_queue_cleanup(qp->sq.queue);
373 err1:
374         if (srq)
375                 rxe_drop_ref(srq);
376         rxe_drop_ref(scq);
377         rxe_drop_ref(rcq);
378         rxe_drop_ref(pd);
379
380         return err;
381 }
382
383 /* called by the query qp verb */
384 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
385 {
386         init->event_handler             = qp->ibqp.event_handler;
387         init->qp_context                = qp->ibqp.qp_context;
388         init->send_cq                   = qp->ibqp.send_cq;
389         init->recv_cq                   = qp->ibqp.recv_cq;
390         init->srq                       = qp->ibqp.srq;
391
392         init->cap.max_send_wr           = qp->sq.max_wr;
393         init->cap.max_send_sge          = qp->sq.max_sge;
394         init->cap.max_inline_data       = qp->sq.max_inline;
395
396         if (!qp->srq) {
397                 init->cap.max_recv_wr           = qp->rq.max_wr;
398                 init->cap.max_recv_sge          = qp->rq.max_sge;
399         }
400
401         init->sq_sig_type               = qp->sq_sig_type;
402
403         init->qp_type                   = qp->ibqp.qp_type;
404         init->port_num                  = 1;
405
406         return 0;
407 }
408
409 /* called by the modify qp verb, this routine checks all the parameters before
410  * making any changes
411  */
412 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
413                     struct ib_qp_attr *attr, int mask)
414 {
415         enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
416                                         attr->cur_qp_state : qp->attr.qp_state;
417         enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
418                                         attr->qp_state : cur_state;
419
420         if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) {
421                 pr_warn("invalid mask or state for qp\n");
422                 goto err1;
423         }
424
425         if (mask & IB_QP_STATE) {
426                 if (cur_state == IB_QPS_SQD) {
427                         if (qp->req.state == QP_STATE_DRAIN &&
428                             new_state != IB_QPS_ERR)
429                                 goto err1;
430                 }
431         }
432
433         if (mask & IB_QP_PORT) {
434                 if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
435                         pr_warn("invalid port %d\n", attr->port_num);
436                         goto err1;
437                 }
438         }
439
440         if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
441                 goto err1;
442
443         if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
444                 goto err1;
445
446         if (mask & IB_QP_ALT_PATH) {
447                 if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
448                         goto err1;
449                 if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num))  {
450                         pr_warn("invalid alt port %d\n", attr->alt_port_num);
451                         goto err1;
452                 }
453                 if (attr->alt_timeout > 31) {
454                         pr_warn("invalid QP alt timeout %d > 31\n",
455                                 attr->alt_timeout);
456                         goto err1;
457                 }
458         }
459
460         if (mask & IB_QP_PATH_MTU) {
461                 struct rxe_port *port = &rxe->port;
462
463                 enum ib_mtu max_mtu = port->attr.max_mtu;
464                 enum ib_mtu mtu = attr->path_mtu;
465
466                 if (mtu > max_mtu) {
467                         pr_debug("invalid mtu (%d) > (%d)\n",
468                                  ib_mtu_enum_to_int(mtu),
469                                  ib_mtu_enum_to_int(max_mtu));
470                         goto err1;
471                 }
472         }
473
474         if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
475                 if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
476                         pr_warn("invalid max_rd_atomic %d > %d\n",
477                                 attr->max_rd_atomic,
478                                 rxe->attr.max_qp_rd_atom);
479                         goto err1;
480                 }
481         }
482
483         if (mask & IB_QP_TIMEOUT) {
484                 if (attr->timeout > 31) {
485                         pr_warn("invalid QP timeout %d > 31\n",
486                                 attr->timeout);
487                         goto err1;
488                 }
489         }
490
491         return 0;
492
493 err1:
494         return -EINVAL;
495 }
496
497 /* move the qp to the reset state */
498 static void rxe_qp_reset(struct rxe_qp *qp)
499 {
500         /* stop tasks from running */
501         rxe_disable_task(&qp->resp.task);
502
503         /* stop request/comp */
504         if (qp->sq.queue) {
505                 if (qp_type(qp) == IB_QPT_RC)
506                         rxe_disable_task(&qp->comp.task);
507                 rxe_disable_task(&qp->req.task);
508         }
509
510         /* move qp to the reset state */
511         qp->req.state = QP_STATE_RESET;
512         qp->resp.state = QP_STATE_RESET;
513
514         /* let state machines reset themselves drain work and packet queues
515          * etc.
516          */
517         __rxe_do_task(&qp->resp.task);
518
519         if (qp->sq.queue) {
520                 __rxe_do_task(&qp->comp.task);
521                 __rxe_do_task(&qp->req.task);
522                 rxe_queue_reset(qp->sq.queue);
523         }
524
525         /* cleanup attributes */
526         atomic_set(&qp->ssn, 0);
527         qp->req.opcode = -1;
528         qp->req.need_retry = 0;
529         qp->req.noack_pkts = 0;
530         qp->resp.msn = 0;
531         qp->resp.opcode = -1;
532         qp->resp.drop_msg = 0;
533         qp->resp.goto_error = 0;
534         qp->resp.sent_psn_nak = 0;
535
536         if (qp->resp.mr) {
537                 rxe_drop_ref(qp->resp.mr);
538                 qp->resp.mr = NULL;
539         }
540
541         cleanup_rd_atomic_resources(qp);
542
543         /* reenable tasks */
544         rxe_enable_task(&qp->resp.task);
545
546         if (qp->sq.queue) {
547                 if (qp_type(qp) == IB_QPT_RC)
548                         rxe_enable_task(&qp->comp.task);
549
550                 rxe_enable_task(&qp->req.task);
551         }
552 }
553
554 /* drain the send queue */
555 static void rxe_qp_drain(struct rxe_qp *qp)
556 {
557         if (qp->sq.queue) {
558                 if (qp->req.state != QP_STATE_DRAINED) {
559                         qp->req.state = QP_STATE_DRAIN;
560                         if (qp_type(qp) == IB_QPT_RC)
561                                 rxe_run_task(&qp->comp.task, 1);
562                         else
563                                 __rxe_do_task(&qp->comp.task);
564                         rxe_run_task(&qp->req.task, 1);
565                 }
566         }
567 }
568
569 /* move the qp to the error state */
570 void rxe_qp_error(struct rxe_qp *qp)
571 {
572         qp->req.state = QP_STATE_ERROR;
573         qp->resp.state = QP_STATE_ERROR;
574         qp->attr.qp_state = IB_QPS_ERR;
575
576         /* drain work and packet queues */
577         rxe_run_task(&qp->resp.task, 1);
578
579         if (qp_type(qp) == IB_QPT_RC)
580                 rxe_run_task(&qp->comp.task, 1);
581         else
582                 __rxe_do_task(&qp->comp.task);
583         rxe_run_task(&qp->req.task, 1);
584 }
585
586 /* called by the modify qp verb */
587 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
588                      struct ib_udata *udata)
589 {
590         int err;
591
592         if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
593                 int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
594
595                 qp->attr.max_rd_atomic = max_rd_atomic;
596                 atomic_set(&qp->req.rd_atomic, max_rd_atomic);
597         }
598
599         if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
600                 int max_dest_rd_atomic =
601                         __roundup_pow_of_two(attr->max_dest_rd_atomic);
602
603                 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
604
605                 free_rd_atomic_resources(qp);
606
607                 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
608                 if (err)
609                         return err;
610         }
611
612         if (mask & IB_QP_CUR_STATE)
613                 qp->attr.cur_qp_state = attr->qp_state;
614
615         if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
616                 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
617
618         if (mask & IB_QP_ACCESS_FLAGS)
619                 qp->attr.qp_access_flags = attr->qp_access_flags;
620
621         if (mask & IB_QP_PKEY_INDEX)
622                 qp->attr.pkey_index = attr->pkey_index;
623
624         if (mask & IB_QP_PORT)
625                 qp->attr.port_num = attr->port_num;
626
627         if (mask & IB_QP_QKEY)
628                 qp->attr.qkey = attr->qkey;
629
630         if (mask & IB_QP_AV) {
631                 rxe_init_av(&attr->ah_attr, &qp->pri_av);
632         }
633
634         if (mask & IB_QP_ALT_PATH) {
635                 rxe_init_av(&attr->alt_ah_attr, &qp->alt_av);
636                 qp->attr.alt_port_num = attr->alt_port_num;
637                 qp->attr.alt_pkey_index = attr->alt_pkey_index;
638                 qp->attr.alt_timeout = attr->alt_timeout;
639         }
640
641         if (mask & IB_QP_PATH_MTU) {
642                 qp->attr.path_mtu = attr->path_mtu;
643                 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
644         }
645
646         if (mask & IB_QP_TIMEOUT) {
647                 qp->attr.timeout = attr->timeout;
648                 if (attr->timeout == 0) {
649                         qp->qp_timeout_jiffies = 0;
650                 } else {
651                         /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
652                         int j = nsecs_to_jiffies(4096ULL << attr->timeout);
653
654                         qp->qp_timeout_jiffies = j ? j : 1;
655                 }
656         }
657
658         if (mask & IB_QP_RETRY_CNT) {
659                 qp->attr.retry_cnt = attr->retry_cnt;
660                 qp->comp.retry_cnt = attr->retry_cnt;
661                 pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
662                          attr->retry_cnt);
663         }
664
665         if (mask & IB_QP_RNR_RETRY) {
666                 qp->attr.rnr_retry = attr->rnr_retry;
667                 qp->comp.rnr_retry = attr->rnr_retry;
668                 pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
669                          attr->rnr_retry);
670         }
671
672         if (mask & IB_QP_RQ_PSN) {
673                 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
674                 qp->resp.psn = qp->attr.rq_psn;
675                 pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
676                          qp->resp.psn);
677         }
678
679         if (mask & IB_QP_MIN_RNR_TIMER) {
680                 qp->attr.min_rnr_timer = attr->min_rnr_timer;
681                 pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
682                          attr->min_rnr_timer);
683         }
684
685         if (mask & IB_QP_SQ_PSN) {
686                 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
687                 qp->req.psn = qp->attr.sq_psn;
688                 qp->comp.psn = qp->attr.sq_psn;
689                 pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
690         }
691
692         if (mask & IB_QP_PATH_MIG_STATE)
693                 qp->attr.path_mig_state = attr->path_mig_state;
694
695         if (mask & IB_QP_DEST_QPN)
696                 qp->attr.dest_qp_num = attr->dest_qp_num;
697
698         if (mask & IB_QP_STATE) {
699                 qp->attr.qp_state = attr->qp_state;
700
701                 switch (attr->qp_state) {
702                 case IB_QPS_RESET:
703                         pr_debug("qp#%d state -> RESET\n", qp_num(qp));
704                         rxe_qp_reset(qp);
705                         break;
706
707                 case IB_QPS_INIT:
708                         pr_debug("qp#%d state -> INIT\n", qp_num(qp));
709                         qp->req.state = QP_STATE_INIT;
710                         qp->resp.state = QP_STATE_INIT;
711                         break;
712
713                 case IB_QPS_RTR:
714                         pr_debug("qp#%d state -> RTR\n", qp_num(qp));
715                         qp->resp.state = QP_STATE_READY;
716                         break;
717
718                 case IB_QPS_RTS:
719                         pr_debug("qp#%d state -> RTS\n", qp_num(qp));
720                         qp->req.state = QP_STATE_READY;
721                         break;
722
723                 case IB_QPS_SQD:
724                         pr_debug("qp#%d state -> SQD\n", qp_num(qp));
725                         rxe_qp_drain(qp);
726                         break;
727
728                 case IB_QPS_SQE:
729                         pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
730                         /* Not possible from modify_qp. */
731                         break;
732
733                 case IB_QPS_ERR:
734                         pr_debug("qp#%d state -> ERR\n", qp_num(qp));
735                         rxe_qp_error(qp);
736                         break;
737                 }
738         }
739
740         return 0;
741 }
742
743 /* called by the query qp verb */
744 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
745 {
746         *attr = qp->attr;
747
748         attr->rq_psn                            = qp->resp.psn;
749         attr->sq_psn                            = qp->req.psn;
750
751         attr->cap.max_send_wr                   = qp->sq.max_wr;
752         attr->cap.max_send_sge                  = qp->sq.max_sge;
753         attr->cap.max_inline_data               = qp->sq.max_inline;
754
755         if (!qp->srq) {
756                 attr->cap.max_recv_wr           = qp->rq.max_wr;
757                 attr->cap.max_recv_sge          = qp->rq.max_sge;
758         }
759
760         rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
761         rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
762
763         if (qp->req.state == QP_STATE_DRAIN) {
764                 attr->sq_draining = 1;
765                 /* applications that get this state
766                  * typically spin on it. yield the
767                  * processor
768                  */
769                 cond_resched();
770         } else {
771                 attr->sq_draining = 0;
772         }
773
774         pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
775
776         return 0;
777 }
778
779 /* called by the destroy qp verb */
780 void rxe_qp_destroy(struct rxe_qp *qp)
781 {
782         qp->valid = 0;
783         qp->qp_timeout_jiffies = 0;
784         rxe_cleanup_task(&qp->resp.task);
785
786         if (qp_type(qp) == IB_QPT_RC) {
787                 del_timer_sync(&qp->retrans_timer);
788                 del_timer_sync(&qp->rnr_nak_timer);
789         }
790
791         rxe_cleanup_task(&qp->req.task);
792         rxe_cleanup_task(&qp->comp.task);
793
794         /* flush out any receive wr's or pending requests */
795         __rxe_do_task(&qp->req.task);
796         if (qp->sq.queue) {
797                 __rxe_do_task(&qp->comp.task);
798                 __rxe_do_task(&qp->req.task);
799         }
800 }
801
802 /* called when the last reference to the qp is dropped */
803 static void rxe_qp_do_cleanup(struct work_struct *work)
804 {
805         struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
806
807         rxe_drop_all_mcast_groups(qp);
808
809         if (qp->sq.queue)
810                 rxe_queue_cleanup(qp->sq.queue);
811
812         if (qp->srq)
813                 rxe_drop_ref(qp->srq);
814
815         if (qp->rq.queue)
816                 rxe_queue_cleanup(qp->rq.queue);
817
818         if (qp->scq)
819                 rxe_drop_ref(qp->scq);
820         if (qp->rcq)
821                 rxe_drop_ref(qp->rcq);
822         if (qp->pd)
823                 rxe_drop_ref(qp->pd);
824
825         if (qp->resp.mr) {
826                 rxe_drop_ref(qp->resp.mr);
827                 qp->resp.mr = NULL;
828         }
829
830         if (qp_type(qp) == IB_QPT_RC)
831                 sk_dst_reset(qp->sk->sk);
832
833         free_rd_atomic_resources(qp);
834
835         kernel_sock_shutdown(qp->sk, SHUT_RDWR);
836         sock_release(qp->sk);
837 }
838
839 /* called when the last reference to the qp is dropped */
840 void rxe_qp_cleanup(struct rxe_pool_entry *arg)
841 {
842         struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
843
844         execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
845 }