]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/infiniband/hw/hns/hns_roce_qp.c
Merge ath-next from git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
[linux.git] / drivers / infiniband / hw / hns / hns_roce_qp.c
1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/platform_device.h>
35 #include <rdma/ib_addr.h>
36 #include <rdma/ib_umem.h>
37 #include "hns_roce_common.h"
38 #include "hns_roce_device.h"
39 #include "hns_roce_hem.h"
40 #include <rdma/hns-abi.h>
41
42 #define SQP_NUM                         (2 * HNS_ROCE_MAX_PORTS)
43
44 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
45 {
46         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
47         struct device *dev = hr_dev->dev;
48         struct hns_roce_qp *qp;
49
50         spin_lock(&qp_table->lock);
51
52         qp = __hns_roce_qp_lookup(hr_dev, qpn);
53         if (qp)
54                 atomic_inc(&qp->refcount);
55
56         spin_unlock(&qp_table->lock);
57
58         if (!qp) {
59                 dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
60                 return;
61         }
62
63         qp->event(qp, (enum hns_roce_event)event_type);
64
65         if (atomic_dec_and_test(&qp->refcount))
66                 complete(&qp->free);
67 }
68 EXPORT_SYMBOL_GPL(hns_roce_qp_event);
69
70 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
71                                  enum hns_roce_event type)
72 {
73         struct ib_event event;
74         struct ib_qp *ibqp = &hr_qp->ibqp;
75
76         if (ibqp->event_handler) {
77                 event.device = ibqp->device;
78                 event.element.qp = ibqp;
79                 switch (type) {
80                 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
81                         event.event = IB_EVENT_PATH_MIG;
82                         break;
83                 case HNS_ROCE_EVENT_TYPE_COMM_EST:
84                         event.event = IB_EVENT_COMM_EST;
85                         break;
86                 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
87                         event.event = IB_EVENT_SQ_DRAINED;
88                         break;
89                 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
90                         event.event = IB_EVENT_QP_LAST_WQE_REACHED;
91                         break;
92                 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
93                         event.event = IB_EVENT_QP_FATAL;
94                         break;
95                 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
96                         event.event = IB_EVENT_PATH_MIG_ERR;
97                         break;
98                 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
99                         event.event = IB_EVENT_QP_REQ_ERR;
100                         break;
101                 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
102                         event.event = IB_EVENT_QP_ACCESS_ERR;
103                         break;
104                 default:
105                         dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n",
106                                 type, hr_qp->qpn);
107                         return;
108                 }
109                 ibqp->event_handler(&event, ibqp->qp_context);
110         }
111 }
112
113 static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
114                                      int align, unsigned long *base)
115 {
116         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
117
118         return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base);
119 }
120
121 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
122 {
123         switch (state) {
124         case IB_QPS_RESET:
125                 return HNS_ROCE_QP_STATE_RST;
126         case IB_QPS_INIT:
127                 return HNS_ROCE_QP_STATE_INIT;
128         case IB_QPS_RTR:
129                 return HNS_ROCE_QP_STATE_RTR;
130         case IB_QPS_RTS:
131                 return HNS_ROCE_QP_STATE_RTS;
132         case IB_QPS_SQD:
133                 return HNS_ROCE_QP_STATE_SQD;
134         case IB_QPS_ERR:
135                 return HNS_ROCE_QP_STATE_ERR;
136         default:
137                 return HNS_ROCE_QP_NUM_STATE;
138         }
139 }
140 EXPORT_SYMBOL_GPL(to_hns_roce_state);
141
142 static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
143                                  struct hns_roce_qp *hr_qp)
144 {
145         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
146         int ret;
147
148         if (!qpn)
149                 return -EINVAL;
150
151         hr_qp->qpn = qpn;
152
153         spin_lock_irq(&qp_table->lock);
154         ret = radix_tree_insert(&hr_dev->qp_table_tree,
155                                 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
156         spin_unlock_irq(&qp_table->lock);
157         if (ret) {
158                 dev_err(hr_dev->dev, "QPC radix_tree_insert failed\n");
159                 goto err_put_irrl;
160         }
161
162         atomic_set(&hr_qp->refcount, 1);
163         init_completion(&hr_qp->free);
164
165         return 0;
166
167 err_put_irrl:
168
169         return ret;
170 }
171
172 static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
173                              struct hns_roce_qp *hr_qp)
174 {
175         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
176         struct device *dev = hr_dev->dev;
177         int ret;
178
179         if (!qpn)
180                 return -EINVAL;
181
182         hr_qp->qpn = qpn;
183
184         /* Alloc memory for QPC */
185         ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
186         if (ret) {
187                 dev_err(dev, "QPC table get failed\n");
188                 goto err_out;
189         }
190
191         /* Alloc memory for IRRL */
192         ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
193         if (ret) {
194                 dev_err(dev, "IRRL table get failed\n");
195                 goto err_put_qp;
196         }
197
198         if (hr_dev->caps.trrl_entry_sz) {
199                 /* Alloc memory for TRRL */
200                 ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
201                                          hr_qp->qpn);
202                 if (ret) {
203                         dev_err(dev, "TRRL table get failed\n");
204                         goto err_put_irrl;
205                 }
206         }
207
208         spin_lock_irq(&qp_table->lock);
209         ret = radix_tree_insert(&hr_dev->qp_table_tree,
210                                 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
211         spin_unlock_irq(&qp_table->lock);
212         if (ret) {
213                 dev_err(dev, "QPC radix_tree_insert failed\n");
214                 goto err_put_trrl;
215         }
216
217         atomic_set(&hr_qp->refcount, 1);
218         init_completion(&hr_qp->free);
219
220         return 0;
221
222 err_put_trrl:
223         if (hr_dev->caps.trrl_entry_sz)
224                 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
225
226 err_put_irrl:
227         hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
228
229 err_put_qp:
230         hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
231
232 err_out:
233         return ret;
234 }
235
236 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
237 {
238         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
239         unsigned long flags;
240
241         spin_lock_irqsave(&qp_table->lock, flags);
242         radix_tree_delete(&hr_dev->qp_table_tree,
243                           hr_qp->qpn & (hr_dev->caps.num_qps - 1));
244         spin_unlock_irqrestore(&qp_table->lock, flags);
245 }
246 EXPORT_SYMBOL_GPL(hns_roce_qp_remove);
247
248 void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
249 {
250         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
251
252         if (atomic_dec_and_test(&hr_qp->refcount))
253                 complete(&hr_qp->free);
254         wait_for_completion(&hr_qp->free);
255
256         if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
257                 if (hr_dev->caps.trrl_entry_sz)
258                         hns_roce_table_put(hr_dev, &qp_table->trrl_table,
259                                            hr_qp->qpn);
260                 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
261                 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
262         }
263 }
264 EXPORT_SYMBOL_GPL(hns_roce_qp_free);
265
266 void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
267                                int cnt)
268 {
269         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
270
271         if (base_qpn < SQP_NUM)
272                 return;
273
274         hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR);
275 }
276 EXPORT_SYMBOL_GPL(hns_roce_release_range_qp);
277
278 static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
279                                 struct ib_qp_cap *cap, int is_user, int has_srq,
280                                 struct hns_roce_qp *hr_qp)
281 {
282         struct device *dev = hr_dev->dev;
283         u32 max_cnt;
284
285         /* Check the validity of QP support capacity */
286         if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
287             cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
288                 dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n",
289                         cap->max_recv_wr, cap->max_recv_sge);
290                 return -EINVAL;
291         }
292
293         /* If srq exit, set zero for relative number of rq */
294         if (has_srq) {
295                 if (cap->max_recv_wr) {
296                         dev_dbg(dev, "srq no need config max_recv_wr\n");
297                         return -EINVAL;
298                 }
299
300                 hr_qp->rq.wqe_cnt = hr_qp->rq.max_gs = 0;
301         } else {
302                 if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) {
303                         dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n");
304                         return -EINVAL;
305                 }
306
307                 if (hr_dev->caps.min_wqes)
308                         max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
309                 else
310                         max_cnt = cap->max_recv_wr;
311
312                 hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
313
314                 if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
315                         dev_err(dev, "while setting rq size, rq.wqe_cnt too large\n");
316                         return -EINVAL;
317                 }
318
319                 max_cnt = max(1U, cap->max_recv_sge);
320                 hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
321                 if (hr_dev->caps.max_rq_sg <= 2)
322                         hr_qp->rq.wqe_shift =
323                                         ilog2(hr_dev->caps.max_rq_desc_sz);
324                 else
325                         hr_qp->rq.wqe_shift =
326                                         ilog2(hr_dev->caps.max_rq_desc_sz
327                                               * hr_qp->rq.max_gs);
328         }
329
330         cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
331         cap->max_recv_sge = hr_qp->rq.max_gs;
332
333         return 0;
334 }
335
336 static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
337                                      struct ib_qp_cap *cap,
338                                      struct hns_roce_qp *hr_qp,
339                                      struct hns_roce_ib_create_qp *ucmd)
340 {
341         u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
342         u8 max_sq_stride = ilog2(roundup_sq_stride);
343         u32 page_size;
344         u32 max_cnt;
345
346         /* Sanity check SQ size before proceeding */
347         if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
348              ucmd->log_sq_stride > max_sq_stride ||
349              ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
350                 dev_err(hr_dev->dev, "check SQ size error!\n");
351                 return -EINVAL;
352         }
353
354         if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
355                 dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n",
356                         cap->max_send_sge);
357                 return -EINVAL;
358         }
359
360         hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
361         hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
362
363         max_cnt = max(1U, cap->max_send_sge);
364         if (hr_dev->caps.max_sq_sg <= 2)
365                 hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
366         else
367                 hr_qp->sq.max_gs = max_cnt;
368
369         if (hr_qp->sq.max_gs > 2)
370                 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
371                                                         (hr_qp->sq.max_gs - 2));
372         hr_qp->sge.sge_shift = 4;
373
374         /* Get buf size, SQ and RQ  are aligned to page_szie */
375         if (hr_dev->caps.max_sq_sg <= 2) {
376                 hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
377                                              hr_qp->rq.wqe_shift), PAGE_SIZE) +
378                                    HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
379                                              hr_qp->sq.wqe_shift), PAGE_SIZE);
380
381                 hr_qp->sq.offset = 0;
382                 hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
383                                              hr_qp->sq.wqe_shift), PAGE_SIZE);
384         } else {
385                 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
386                 hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
387                                              hr_qp->rq.wqe_shift), page_size) +
388                                    HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
389                                              hr_qp->sge.sge_shift), page_size) +
390                                    HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
391                                              hr_qp->sq.wqe_shift), page_size);
392
393                 hr_qp->sq.offset = 0;
394                 if (hr_qp->sge.sge_cnt) {
395                         hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
396                                                         (hr_qp->sq.wqe_cnt <<
397                                                         hr_qp->sq.wqe_shift),
398                                                         page_size);
399                         hr_qp->rq.offset = hr_qp->sge.offset +
400                                         HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
401                                                 hr_qp->sge.sge_shift),
402                                                 page_size);
403                 } else {
404                         hr_qp->rq.offset = HNS_ROCE_ALOGN_UP(
405                                                         (hr_qp->sq.wqe_cnt <<
406                                                         hr_qp->sq.wqe_shift),
407                                                         page_size);
408                 }
409         }
410
411         return 0;
412 }
413
414 static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
415                                        struct ib_qp_cap *cap,
416                                        struct hns_roce_qp *hr_qp)
417 {
418         struct device *dev = hr_dev->dev;
419         u32 page_size;
420         u32 max_cnt;
421         int size;
422
423         if (cap->max_send_wr  > hr_dev->caps.max_wqes  ||
424             cap->max_send_sge > hr_dev->caps.max_sq_sg ||
425             cap->max_inline_data > hr_dev->caps.max_sq_inline) {
426                 dev_err(dev, "SQ WR or sge or inline data error!\n");
427                 return -EINVAL;
428         }
429
430         hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
431         hr_qp->sq_max_wqes_per_wr = 1;
432         hr_qp->sq_spare_wqes = 0;
433
434         if (hr_dev->caps.min_wqes)
435                 max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
436         else
437                 max_cnt = cap->max_send_wr;
438
439         hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
440         if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
441                 dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n");
442                 return -EINVAL;
443         }
444
445         /* Get data_seg numbers */
446         max_cnt = max(1U, cap->max_send_sge);
447         if (hr_dev->caps.max_sq_sg <= 2)
448                 hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
449         else
450                 hr_qp->sq.max_gs = max_cnt;
451
452         if (hr_qp->sq.max_gs > 2) {
453                 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
454                                      (hr_qp->sq.max_gs - 2));
455                 hr_qp->sge.sge_shift = 4;
456         }
457
458         /* ud sqwqe's sge use extend sge */
459         if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
460                 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
461                                      hr_qp->sq.max_gs);
462                 hr_qp->sge.sge_shift = 4;
463         }
464
465         /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
466         page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
467         hr_qp->sq.offset = 0;
468         size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
469                                  page_size);
470
471         if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
472                 hr_qp->sge.offset = size;
473                 size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
474                                           hr_qp->sge.sge_shift, page_size);
475         }
476
477         hr_qp->rq.offset = size;
478         size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
479                                   page_size);
480         hr_qp->buff_size = size;
481
482         /* Get wr and sge number which send */
483         cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
484         cap->max_send_sge = hr_qp->sq.max_gs;
485
486         /* We don't support inline sends for kernel QPs (yet) */
487         cap->max_inline_data = 0;
488
489         return 0;
490 }
491
492 static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
493 {
494         if (attr->qp_type == IB_QPT_XRC_INI ||
495             attr->qp_type == IB_QPT_XRC_TGT || attr->srq)
496                 return 0;
497
498         return 1;
499 }
500
501 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
502                                      struct ib_pd *ib_pd,
503                                      struct ib_qp_init_attr *init_attr,
504                                      struct ib_udata *udata, unsigned long sqpn,
505                                      struct hns_roce_qp *hr_qp)
506 {
507         struct device *dev = hr_dev->dev;
508         struct hns_roce_ib_create_qp ucmd;
509         struct hns_roce_ib_create_qp_resp resp = {};
510         unsigned long qpn = 0;
511         int ret = 0;
512         u32 page_shift;
513         u32 npages;
514         int i;
515
516         mutex_init(&hr_qp->mutex);
517         spin_lock_init(&hr_qp->sq.lock);
518         spin_lock_init(&hr_qp->rq.lock);
519
520         hr_qp->state = IB_QPS_RESET;
521
522         hr_qp->ibqp.qp_type = init_attr->qp_type;
523
524         if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
525                 hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_ALL_WR);
526         else
527                 hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR);
528
529         ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject,
530                                    !!init_attr->srq, hr_qp);
531         if (ret) {
532                 dev_err(dev, "hns_roce_set_rq_size failed\n");
533                 goto err_out;
534         }
535
536         if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
537                 /* allocate recv inline buf */
538                 hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt,
539                                                sizeof(struct hns_roce_rinl_wqe),
540                                                GFP_KERNEL);
541                 if (!hr_qp->rq_inl_buf.wqe_list) {
542                         ret = -ENOMEM;
543                         goto err_out;
544                 }
545
546                 hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt;
547
548                 /* Firstly, allocate a list of sge space buffer */
549                 hr_qp->rq_inl_buf.wqe_list[0].sg_list =
550                                         kcalloc(hr_qp->rq_inl_buf.wqe_cnt,
551                                                init_attr->cap.max_recv_sge *
552                                                sizeof(struct hns_roce_rinl_sge),
553                                                GFP_KERNEL);
554                 if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) {
555                         ret = -ENOMEM;
556                         goto err_wqe_list;
557                 }
558
559                 for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++)
560                         /* Secondly, reallocate the buffer */
561                         hr_qp->rq_inl_buf.wqe_list[i].sg_list =
562                                 &hr_qp->rq_inl_buf.wqe_list[0].sg_list[i *
563                                 init_attr->cap.max_recv_sge];
564         }
565
566         if (ib_pd->uobject) {
567                 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
568                         dev_err(dev, "ib_copy_from_udata error for create qp\n");
569                         ret = -EFAULT;
570                         goto err_rq_sge_list;
571                 }
572
573                 ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
574                                                 &ucmd);
575                 if (ret) {
576                         dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
577                         goto err_rq_sge_list;
578                 }
579
580                 hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
581                                           ucmd.buf_addr, hr_qp->buff_size, 0,
582                                           0);
583                 if (IS_ERR(hr_qp->umem)) {
584                         dev_err(dev, "ib_umem_get error for create qp\n");
585                         ret = PTR_ERR(hr_qp->umem);
586                         goto err_rq_sge_list;
587                 }
588
589                 hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
590                 if (hr_dev->caps.mtt_buf_pg_sz) {
591                         npages = (ib_umem_page_count(hr_qp->umem) +
592                                   (1 << hr_dev->caps.mtt_buf_pg_sz) - 1) /
593                                   (1 << hr_dev->caps.mtt_buf_pg_sz);
594                         page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
595                         ret = hns_roce_mtt_init(hr_dev, npages,
596                                     page_shift,
597                                     &hr_qp->mtt);
598                 } else {
599                         ret = hns_roce_mtt_init(hr_dev,
600                                     ib_umem_page_count(hr_qp->umem),
601                                     hr_qp->umem->page_shift,
602                                     &hr_qp->mtt);
603                 }
604                 if (ret) {
605                         dev_err(dev, "hns_roce_mtt_init error for create qp\n");
606                         goto err_buf;
607                 }
608
609                 ret = hns_roce_ib_umem_write_mtt(hr_dev, &hr_qp->mtt,
610                                                  hr_qp->umem);
611                 if (ret) {
612                         dev_err(dev, "hns_roce_ib_umem_write_mtt error for create qp\n");
613                         goto err_mtt;
614                 }
615
616                 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
617                     (udata->outlen >= sizeof(resp)) &&
618                     hns_roce_qp_has_rq(init_attr)) {
619                         ret = hns_roce_db_map_user(
620                                         to_hr_ucontext(ib_pd->uobject->context),
621                                         ucmd.db_addr, &hr_qp->rdb);
622                         if (ret) {
623                                 dev_err(dev, "rq record doorbell map failed!\n");
624                                 goto err_mtt;
625                         }
626                 }
627         } else {
628                 if (init_attr->create_flags &
629                     IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
630                         dev_err(dev, "init_attr->create_flags error!\n");
631                         ret = -EINVAL;
632                         goto err_rq_sge_list;
633                 }
634
635                 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
636                         dev_err(dev, "init_attr->create_flags error!\n");
637                         ret = -EINVAL;
638                         goto err_rq_sge_list;
639                 }
640
641                 /* Set SQ size */
642                 ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
643                                                   hr_qp);
644                 if (ret) {
645                         dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
646                         goto err_rq_sge_list;
647                 }
648
649                 /* QP doorbell register address */
650                 hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
651                                      DB_REG_OFFSET * hr_dev->priv_uar.index;
652                 hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
653                                      DB_REG_OFFSET * hr_dev->priv_uar.index;
654
655                 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
656                     hns_roce_qp_has_rq(init_attr)) {
657                         ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
658                         if (ret) {
659                                 dev_err(dev, "rq record doorbell alloc failed!\n");
660                                 goto err_rq_sge_list;
661                         }
662                         *hr_qp->rdb.db_record = 0;
663                 }
664
665                 /* Allocate QP buf */
666                 page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
667                 if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size,
668                                        (1 << page_shift) * 2,
669                                        &hr_qp->hr_buf, page_shift)) {
670                         dev_err(dev, "hns_roce_buf_alloc error!\n");
671                         ret = -ENOMEM;
672                         goto err_db;
673                 }
674
675                 hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
676                 /* Write MTT */
677                 ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages,
678                                         hr_qp->hr_buf.page_shift, &hr_qp->mtt);
679                 if (ret) {
680                         dev_err(dev, "hns_roce_mtt_init error for kernel create qp\n");
681                         goto err_buf;
682                 }
683
684                 ret = hns_roce_buf_write_mtt(hr_dev, &hr_qp->mtt,
685                                              &hr_qp->hr_buf);
686                 if (ret) {
687                         dev_err(dev, "hns_roce_buf_write_mtt error for kernel create qp\n");
688                         goto err_mtt;
689                 }
690
691                 hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64),
692                                                GFP_KERNEL);
693                 hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64),
694                                                GFP_KERNEL);
695                 if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
696                         ret = -ENOMEM;
697                         goto err_wrid;
698                 }
699         }
700
701         if (sqpn) {
702                 qpn = sqpn;
703         } else {
704                 /* Get QPN */
705                 ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn);
706                 if (ret) {
707                         dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n");
708                         goto err_wrid;
709                 }
710         }
711
712         if (init_attr->qp_type == IB_QPT_GSI &&
713             hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
714                 /* In v1 engine, GSI QP context in RoCE engine's register */
715                 ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
716                 if (ret) {
717                         dev_err(dev, "hns_roce_qp_alloc failed!\n");
718                         goto err_qpn;
719                 }
720         } else {
721                 ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp);
722                 if (ret) {
723                         dev_err(dev, "hns_roce_qp_alloc failed!\n");
724                         goto err_qpn;
725                 }
726         }
727
728         if (sqpn)
729                 hr_qp->doorbell_qpn = 1;
730         else
731                 hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
732
733         if (ib_pd->uobject && (udata->outlen >= sizeof(resp)) &&
734                 (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) {
735
736                 /* indicate kernel supports record db */
737                 resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
738                 ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
739                 if (ret)
740                         goto err_qp;
741
742                 hr_qp->rdb_en = 1;
743         }
744         hr_qp->event = hns_roce_ib_qp_event;
745
746         return 0;
747
748 err_qp:
749         if (init_attr->qp_type == IB_QPT_GSI &&
750                 hr_dev->hw_rev == HNS_ROCE_HW_VER1)
751                 hns_roce_qp_remove(hr_dev, hr_qp);
752         else
753                 hns_roce_qp_free(hr_dev, hr_qp);
754
755 err_qpn:
756         if (!sqpn)
757                 hns_roce_release_range_qp(hr_dev, qpn, 1);
758
759 err_wrid:
760         if (ib_pd->uobject) {
761                 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
762                     (udata->outlen >= sizeof(resp)) &&
763                     hns_roce_qp_has_rq(init_attr))
764                         hns_roce_db_unmap_user(
765                                         to_hr_ucontext(ib_pd->uobject->context),
766                                         &hr_qp->rdb);
767         } else {
768                 kfree(hr_qp->sq.wrid);
769                 kfree(hr_qp->rq.wrid);
770         }
771
772 err_mtt:
773         hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
774
775 err_buf:
776         if (ib_pd->uobject)
777                 ib_umem_release(hr_qp->umem);
778         else
779                 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
780
781 err_db:
782         if (!ib_pd->uobject && hns_roce_qp_has_rq(init_attr) &&
783             (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
784                 hns_roce_free_db(hr_dev, &hr_qp->rdb);
785
786 err_rq_sge_list:
787         if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
788                 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
789
790 err_wqe_list:
791         if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
792                 kfree(hr_qp->rq_inl_buf.wqe_list);
793
794 err_out:
795         return ret;
796 }
797
798 struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
799                                  struct ib_qp_init_attr *init_attr,
800                                  struct ib_udata *udata)
801 {
802         struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
803         struct device *dev = hr_dev->dev;
804         struct hns_roce_sqp *hr_sqp;
805         struct hns_roce_qp *hr_qp;
806         int ret;
807
808         switch (init_attr->qp_type) {
809         case IB_QPT_RC: {
810                 hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
811                 if (!hr_qp)
812                         return ERR_PTR(-ENOMEM);
813
814                 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
815                                                 hr_qp);
816                 if (ret) {
817                         dev_err(dev, "Create RC QP failed\n");
818                         kfree(hr_qp);
819                         return ERR_PTR(ret);
820                 }
821
822                 hr_qp->ibqp.qp_num = hr_qp->qpn;
823
824                 break;
825         }
826         case IB_QPT_GSI: {
827                 /* Userspace is not allowed to create special QPs: */
828                 if (pd->uobject) {
829                         dev_err(dev, "not support usr space GSI\n");
830                         return ERR_PTR(-EINVAL);
831                 }
832
833                 hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL);
834                 if (!hr_sqp)
835                         return ERR_PTR(-ENOMEM);
836
837                 hr_qp = &hr_sqp->hr_qp;
838                 hr_qp->port = init_attr->port_num - 1;
839                 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
840
841                 /* when hw version is v1, the sqpn is allocated */
842                 if (hr_dev->caps.max_sq_sg <= 2)
843                         hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
844                                              hr_dev->iboe.phy_port[hr_qp->port];
845                 else
846                         hr_qp->ibqp.qp_num = 1;
847
848                 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
849                                                 hr_qp->ibqp.qp_num, hr_qp);
850                 if (ret) {
851                         dev_err(dev, "Create GSI QP failed!\n");
852                         kfree(hr_sqp);
853                         return ERR_PTR(ret);
854                 }
855
856                 break;
857         }
858         default:{
859                 dev_err(dev, "not support QP type %d\n", init_attr->qp_type);
860                 return ERR_PTR(-EINVAL);
861         }
862         }
863
864         return &hr_qp->ibqp;
865 }
866 EXPORT_SYMBOL_GPL(hns_roce_create_qp);
867
868 int to_hr_qp_type(int qp_type)
869 {
870         int transport_type;
871
872         if (qp_type == IB_QPT_RC)
873                 transport_type = SERV_TYPE_RC;
874         else if (qp_type == IB_QPT_UC)
875                 transport_type = SERV_TYPE_UC;
876         else if (qp_type == IB_QPT_UD)
877                 transport_type = SERV_TYPE_UD;
878         else if (qp_type == IB_QPT_GSI)
879                 transport_type = SERV_TYPE_UD;
880         else
881                 transport_type = -1;
882
883         return transport_type;
884 }
885 EXPORT_SYMBOL_GPL(to_hr_qp_type);
886
887 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
888                        int attr_mask, struct ib_udata *udata)
889 {
890         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
891         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
892         enum ib_qp_state cur_state, new_state;
893         struct device *dev = hr_dev->dev;
894         int ret = -EINVAL;
895         int p;
896         enum ib_mtu active_mtu;
897
898         mutex_lock(&hr_qp->mutex);
899
900         cur_state = attr_mask & IB_QP_CUR_STATE ?
901                     attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
902         new_state = attr_mask & IB_QP_STATE ?
903                     attr->qp_state : cur_state;
904
905         if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
906                                 IB_LINK_LAYER_ETHERNET)) {
907                 dev_err(dev, "ib_modify_qp_is_ok failed\n");
908                 goto out;
909         }
910
911         if ((attr_mask & IB_QP_PORT) &&
912             (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
913                 dev_err(dev, "attr port_num invalid.attr->port_num=%d\n",
914                         attr->port_num);
915                 goto out;
916         }
917
918         if (attr_mask & IB_QP_PKEY_INDEX) {
919                 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
920                 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
921                         dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n",
922                                 attr->pkey_index);
923                         goto out;
924                 }
925         }
926
927         if (attr_mask & IB_QP_PATH_MTU) {
928                 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
929                 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
930
931                 if ((hr_dev->caps.max_mtu == IB_MTU_4096 &&
932                     attr->path_mtu > IB_MTU_4096) ||
933                     (hr_dev->caps.max_mtu == IB_MTU_2048 &&
934                     attr->path_mtu > IB_MTU_2048) ||
935                     attr->path_mtu < IB_MTU_256 ||
936                     attr->path_mtu > active_mtu) {
937                         dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
938                                 attr->path_mtu);
939                         goto out;
940                 }
941         }
942
943         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
944             attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
945                 dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
946                         attr->max_rd_atomic);
947                 goto out;
948         }
949
950         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
951             attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
952                 dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
953                         attr->max_dest_rd_atomic);
954                 goto out;
955         }
956
957         if (cur_state == new_state && cur_state == IB_QPS_RESET) {
958                 ret = 0;
959                 goto out;
960         }
961
962         ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
963                                     new_state);
964
965 out:
966         mutex_unlock(&hr_qp->mutex);
967
968         return ret;
969 }
970
971 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
972                        __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
973 {
974         if (send_cq == recv_cq) {
975                 spin_lock_irq(&send_cq->lock);
976                 __acquire(&recv_cq->lock);
977         } else if (send_cq->cqn < recv_cq->cqn) {
978                 spin_lock_irq(&send_cq->lock);
979                 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
980         } else {
981                 spin_lock_irq(&recv_cq->lock);
982                 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
983         }
984 }
985 EXPORT_SYMBOL_GPL(hns_roce_lock_cqs);
986
987 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
988                          struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
989                          __releases(&recv_cq->lock)
990 {
991         if (send_cq == recv_cq) {
992                 __release(&recv_cq->lock);
993                 spin_unlock_irq(&send_cq->lock);
994         } else if (send_cq->cqn < recv_cq->cqn) {
995                 spin_unlock(&recv_cq->lock);
996                 spin_unlock_irq(&send_cq->lock);
997         } else {
998                 spin_unlock(&send_cq->lock);
999                 spin_unlock_irq(&recv_cq->lock);
1000         }
1001 }
1002 EXPORT_SYMBOL_GPL(hns_roce_unlock_cqs);
1003
1004 static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
1005 {
1006
1007         return hns_roce_buf_offset(&hr_qp->hr_buf, offset);
1008 }
1009
1010 void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
1011 {
1012         return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
1013 }
1014 EXPORT_SYMBOL_GPL(get_recv_wqe);
1015
1016 void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
1017 {
1018         return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
1019 }
1020 EXPORT_SYMBOL_GPL(get_send_wqe);
1021
1022 void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n)
1023 {
1024         return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset +
1025                                         (n << hr_qp->sge.sge_shift));
1026 }
1027 EXPORT_SYMBOL_GPL(get_send_extend_sge);
1028
1029 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
1030                           struct ib_cq *ib_cq)
1031 {
1032         struct hns_roce_cq *hr_cq;
1033         u32 cur;
1034
1035         cur = hr_wq->head - hr_wq->tail;
1036         if (likely(cur + nreq < hr_wq->max_post))
1037                 return false;
1038
1039         hr_cq = to_hr_cq(ib_cq);
1040         spin_lock(&hr_cq->lock);
1041         cur = hr_wq->head - hr_wq->tail;
1042         spin_unlock(&hr_cq->lock);
1043
1044         return cur + nreq >= hr_wq->max_post;
1045 }
1046 EXPORT_SYMBOL_GPL(hns_roce_wq_overflow);
1047
1048 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
1049 {
1050         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
1051         int reserved_from_top = 0;
1052         int ret;
1053
1054         spin_lock_init(&qp_table->lock);
1055         INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
1056
1057         /* A port include two SQP, six port total 12 */
1058         ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
1059                                    hr_dev->caps.num_qps - 1, SQP_NUM,
1060                                    reserved_from_top);
1061         if (ret) {
1062                 dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
1063                         ret);
1064                 return ret;
1065         }
1066
1067         return 0;
1068 }
1069
1070 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
1071 {
1072         hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap);
1073 }