]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/infiniband/hw/hns/hns_roce_hw_v2.c
RDMA/mlx5: Initialize return variable in case pagefault was skipped
[linux.git] / drivers / infiniband / hw / hns / hns_roce_hw_v2.c
1 /*
2  * Copyright (c) 2016-2017 Hisilicon Limited.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/acpi.h>
34 #include <linux/etherdevice.h>
35 #include <linux/interrupt.h>
36 #include <linux/kernel.h>
37 #include <linux/types.h>
38 #include <net/addrconf.h>
39 #include <rdma/ib_addr.h>
40 #include <rdma/ib_umem.h>
41
42 #include "hnae3.h"
43 #include "hns_roce_common.h"
44 #include "hns_roce_device.h"
45 #include "hns_roce_cmd.h"
46 #include "hns_roce_hem.h"
47 #include "hns_roce_hw_v2.h"
48
49 static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
50                             struct ib_sge *sg)
51 {
52         dseg->lkey = cpu_to_le32(sg->lkey);
53         dseg->addr = cpu_to_le64(sg->addr);
54         dseg->len  = cpu_to_le32(sg->length);
55 }
56
57 static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
58                          struct hns_roce_wqe_frmr_seg *fseg,
59                          const struct ib_reg_wr *wr)
60 {
61         struct hns_roce_mr *mr = to_hr_mr(wr->mr);
62
63         /* use ib_access_flags */
64         roce_set_bit(rc_sq_wqe->byte_4,
65                      V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
66                      wr->access & IB_ACCESS_MW_BIND ? 1 : 0);
67         roce_set_bit(rc_sq_wqe->byte_4,
68                      V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
69                      wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
70         roce_set_bit(rc_sq_wqe->byte_4,
71                      V2_RC_FRMR_WQE_BYTE_4_RR_S,
72                      wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0);
73         roce_set_bit(rc_sq_wqe->byte_4,
74                      V2_RC_FRMR_WQE_BYTE_4_RW_S,
75                      wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
76         roce_set_bit(rc_sq_wqe->byte_4,
77                      V2_RC_FRMR_WQE_BYTE_4_LW_S,
78                      wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
79
80         /* Data structure reuse may lead to confusion */
81         rc_sq_wqe->msg_len = cpu_to_le32(mr->pbl_ba & 0xffffffff);
82         rc_sq_wqe->inv_key = cpu_to_le32(mr->pbl_ba >> 32);
83
84         rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
85         rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
86         rc_sq_wqe->rkey = cpu_to_le32(wr->key);
87         rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
88
89         fseg->pbl_size = cpu_to_le32(mr->pbl_size);
90         roce_set_field(fseg->mode_buf_pg_sz,
91                        V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
92                        V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
93                        mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
94         roce_set_bit(fseg->mode_buf_pg_sz,
95                      V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
96 }
97
98 static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg,
99                            const struct ib_atomic_wr *wr)
100 {
101         if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
102                 aseg->fetchadd_swap_data = cpu_to_le64(wr->swap);
103                 aseg->cmp_data  = cpu_to_le64(wr->compare_add);
104         } else {
105                 aseg->fetchadd_swap_data = cpu_to_le64(wr->compare_add);
106                 aseg->cmp_data  = 0;
107         }
108 }
109
110 static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
111                            unsigned int *sge_ind)
112 {
113         struct hns_roce_v2_wqe_data_seg *dseg;
114         struct ib_sge *sg;
115         int num_in_wqe = 0;
116         int extend_sge_num;
117         int fi_sge_num;
118         int se_sge_num;
119         int shift;
120         int i;
121
122         if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
123                 num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
124         extend_sge_num = wr->num_sge - num_in_wqe;
125         sg = wr->sg_list + num_in_wqe;
126         shift = qp->hr_buf.page_shift;
127
128         /*
129          * Check whether wr->num_sge sges are in the same page. If not, we
130          * should calculate how many sges in the first page and the second
131          * page.
132          */
133         dseg = get_send_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
134         fi_sge_num = (round_up((uintptr_t)dseg, 1 << shift) -
135                       (uintptr_t)dseg) /
136                       sizeof(struct hns_roce_v2_wqe_data_seg);
137         if (extend_sge_num > fi_sge_num) {
138                 se_sge_num = extend_sge_num - fi_sge_num;
139                 for (i = 0; i < fi_sge_num; i++) {
140                         set_data_seg_v2(dseg++, sg + i);
141                         (*sge_ind)++;
142                 }
143                 dseg = get_send_extend_sge(qp,
144                                            (*sge_ind) & (qp->sge.sge_cnt - 1));
145                 for (i = 0; i < se_sge_num; i++) {
146                         set_data_seg_v2(dseg++, sg + fi_sge_num + i);
147                         (*sge_ind)++;
148                 }
149         } else {
150                 for (i = 0; i < extend_sge_num; i++) {
151                         set_data_seg_v2(dseg++, sg + i);
152                         (*sge_ind)++;
153                 }
154         }
155 }
156
157 static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
158                              struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
159                              void *wqe, unsigned int *sge_ind,
160                              const struct ib_send_wr **bad_wr)
161 {
162         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
163         struct hns_roce_v2_wqe_data_seg *dseg = wqe;
164         struct hns_roce_qp *qp = to_hr_qp(ibqp);
165         int i;
166
167         if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
168                 if (le32_to_cpu(rc_sq_wqe->msg_len) >
169                     hr_dev->caps.max_sq_inline) {
170                         *bad_wr = wr;
171                         dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
172                                 rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
173                         return -EINVAL;
174                 }
175
176                 if (wr->opcode == IB_WR_RDMA_READ) {
177                         *bad_wr =  wr;
178                         dev_err(hr_dev->dev, "Not support inline data!\n");
179                         return -EINVAL;
180                 }
181
182                 for (i = 0; i < wr->num_sge; i++) {
183                         memcpy(wqe, ((void *)wr->sg_list[i].addr),
184                                wr->sg_list[i].length);
185                         wqe += wr->sg_list[i].length;
186                 }
187
188                 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
189                              1);
190         } else {
191                 if (wr->num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
192                         for (i = 0; i < wr->num_sge; i++) {
193                                 if (likely(wr->sg_list[i].length)) {
194                                         set_data_seg_v2(dseg, wr->sg_list + i);
195                                         dseg++;
196                                 }
197                         }
198                 } else {
199                         roce_set_field(rc_sq_wqe->byte_20,
200                                      V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
201                                      V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
202                                      (*sge_ind) & (qp->sge.sge_cnt - 1));
203
204                         for (i = 0; i < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
205                                 if (likely(wr->sg_list[i].length)) {
206                                         set_data_seg_v2(dseg, wr->sg_list + i);
207                                         dseg++;
208                                 }
209                         }
210
211                         set_extend_sge(qp, wr, sge_ind);
212                 }
213
214                 roce_set_field(rc_sq_wqe->byte_16,
215                                V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
216                                V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, wr->num_sge);
217         }
218
219         return 0;
220 }
221
222 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
223                                  const struct ib_qp_attr *attr,
224                                  int attr_mask, enum ib_qp_state cur_state,
225                                  enum ib_qp_state new_state);
226
227 static int hns_roce_v2_post_send(struct ib_qp *ibqp,
228                                  const struct ib_send_wr *wr,
229                                  const struct ib_send_wr **bad_wr)
230 {
231         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
232         struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
233         struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
234         struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
235         struct hns_roce_qp *qp = to_hr_qp(ibqp);
236         struct hns_roce_wqe_frmr_seg *fseg;
237         struct device *dev = hr_dev->dev;
238         struct hns_roce_v2_db sq_db;
239         struct ib_qp_attr attr;
240         unsigned int sge_ind = 0;
241         unsigned int owner_bit;
242         unsigned long flags;
243         unsigned int ind;
244         void *wqe = NULL;
245         bool loopback;
246         int attr_mask;
247         u32 tmp_len;
248         int ret = 0;
249         u32 hr_op;
250         u8 *smac;
251         int nreq;
252         int i;
253
254         if (unlikely(ibqp->qp_type != IB_QPT_RC &&
255                      ibqp->qp_type != IB_QPT_GSI &&
256                      ibqp->qp_type != IB_QPT_UD)) {
257                 dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
258                 *bad_wr = wr;
259                 return -EOPNOTSUPP;
260         }
261
262         if (unlikely(qp->state == IB_QPS_RESET || qp->state == IB_QPS_INIT ||
263                      qp->state == IB_QPS_RTR)) {
264                 dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
265                 *bad_wr = wr;
266                 return -EINVAL;
267         }
268
269         spin_lock_irqsave(&qp->sq.lock, flags);
270         ind = qp->sq_next_wqe;
271         sge_ind = qp->next_sge;
272
273         for (nreq = 0; wr; ++nreq, wr = wr->next) {
274                 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
275                         ret = -ENOMEM;
276                         *bad_wr = wr;
277                         goto out;
278                 }
279
280                 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
281                         dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
282                                 wr->num_sge, qp->sq.max_gs);
283                         ret = -EINVAL;
284                         *bad_wr = wr;
285                         goto out;
286                 }
287
288                 wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
289                 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
290                                                                       wr->wr_id;
291
292                 owner_bit =
293                        ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
294                 tmp_len = 0;
295
296                 /* Corresponding to the QP type, wqe process separately */
297                 if (ibqp->qp_type == IB_QPT_GSI) {
298                         ud_sq_wqe = wqe;
299                         memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
300
301                         roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
302                                        V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
303                         roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
304                                        V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
305                         roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
306                                        V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
307                         roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
308                                        V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
309                         roce_set_field(ud_sq_wqe->byte_48,
310                                        V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
311                                        V2_UD_SEND_WQE_BYTE_48_DMAC_4_S,
312                                        ah->av.mac[4]);
313                         roce_set_field(ud_sq_wqe->byte_48,
314                                        V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
315                                        V2_UD_SEND_WQE_BYTE_48_DMAC_5_S,
316                                        ah->av.mac[5]);
317
318                         /* MAC loopback */
319                         smac = (u8 *)hr_dev->dev_addr[qp->port];
320                         loopback = ether_addr_equal_unaligned(ah->av.mac,
321                                                               smac) ? 1 : 0;
322
323                         roce_set_bit(ud_sq_wqe->byte_40,
324                                      V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
325
326                         roce_set_field(ud_sq_wqe->byte_4,
327                                        V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
328                                        V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
329                                        HNS_ROCE_V2_WQE_OP_SEND);
330
331                         for (i = 0; i < wr->num_sge; i++)
332                                 tmp_len += wr->sg_list[i].length;
333
334                         ud_sq_wqe->msg_len =
335                          cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len);
336
337                         switch (wr->opcode) {
338                         case IB_WR_SEND_WITH_IMM:
339                         case IB_WR_RDMA_WRITE_WITH_IMM:
340                                 ud_sq_wqe->immtdata =
341                                       cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
342                                 break;
343                         default:
344                                 ud_sq_wqe->immtdata = 0;
345                                 break;
346                         }
347
348                         /* Set sig attr */
349                         roce_set_bit(ud_sq_wqe->byte_4,
350                                    V2_UD_SEND_WQE_BYTE_4_CQE_S,
351                                    (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
352
353                         /* Set se attr */
354                         roce_set_bit(ud_sq_wqe->byte_4,
355                                   V2_UD_SEND_WQE_BYTE_4_SE_S,
356                                   (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
357
358                         roce_set_bit(ud_sq_wqe->byte_4,
359                                      V2_UD_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
360
361                         roce_set_field(ud_sq_wqe->byte_16,
362                                        V2_UD_SEND_WQE_BYTE_16_PD_M,
363                                        V2_UD_SEND_WQE_BYTE_16_PD_S,
364                                        to_hr_pd(ibqp->pd)->pdn);
365
366                         roce_set_field(ud_sq_wqe->byte_16,
367                                        V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
368                                        V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
369                                        wr->num_sge);
370
371                         roce_set_field(ud_sq_wqe->byte_20,
372                                      V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
373                                      V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
374                                      sge_ind & (qp->sge.sge_cnt - 1));
375
376                         roce_set_field(ud_sq_wqe->byte_24,
377                                        V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
378                                        V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
379                         ud_sq_wqe->qkey =
380                              cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
381                              qp->qkey : ud_wr(wr)->remote_qkey);
382                         roce_set_field(ud_sq_wqe->byte_32,
383                                        V2_UD_SEND_WQE_BYTE_32_DQPN_M,
384                                        V2_UD_SEND_WQE_BYTE_32_DQPN_S,
385                                        ud_wr(wr)->remote_qpn);
386
387                         roce_set_field(ud_sq_wqe->byte_36,
388                                        V2_UD_SEND_WQE_BYTE_36_VLAN_M,
389                                        V2_UD_SEND_WQE_BYTE_36_VLAN_S,
390                                        le16_to_cpu(ah->av.vlan));
391                         roce_set_field(ud_sq_wqe->byte_36,
392                                        V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
393                                        V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
394                                        ah->av.hop_limit);
395                         roce_set_field(ud_sq_wqe->byte_36,
396                                        V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
397                                        V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
398                                        ah->av.sl_tclass_flowlabel >>
399                                        HNS_ROCE_TCLASS_SHIFT);
400                         roce_set_field(ud_sq_wqe->byte_40,
401                                        V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
402                                        V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S,
403                                        ah->av.sl_tclass_flowlabel &
404                                        HNS_ROCE_FLOW_LABEL_MASK);
405                         roce_set_field(ud_sq_wqe->byte_40,
406                                        V2_UD_SEND_WQE_BYTE_40_SL_M,
407                                        V2_UD_SEND_WQE_BYTE_40_SL_S,
408                                       le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
409                                       HNS_ROCE_SL_SHIFT);
410                         roce_set_field(ud_sq_wqe->byte_40,
411                                        V2_UD_SEND_WQE_BYTE_40_PORTN_M,
412                                        V2_UD_SEND_WQE_BYTE_40_PORTN_S,
413                                        qp->port);
414
415                         roce_set_bit(ud_sq_wqe->byte_40,
416                                      V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
417                                      ah->av.vlan_en ? 1 : 0);
418                         roce_set_field(ud_sq_wqe->byte_48,
419                                        V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
420                                        V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
421                                        hns_get_gid_index(hr_dev, qp->phy_port,
422                                                          ah->av.gid_index));
423
424                         memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
425                                GID_LEN_V2);
426
427                         set_extend_sge(qp, wr, &sge_ind);
428                         ind++;
429                 } else if (ibqp->qp_type == IB_QPT_RC) {
430                         rc_sq_wqe = wqe;
431                         memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
432                         for (i = 0; i < wr->num_sge; i++)
433                                 tmp_len += wr->sg_list[i].length;
434
435                         rc_sq_wqe->msg_len =
436                          cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len);
437
438                         switch (wr->opcode) {
439                         case IB_WR_SEND_WITH_IMM:
440                         case IB_WR_RDMA_WRITE_WITH_IMM:
441                                 rc_sq_wqe->immtdata =
442                                       cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
443                                 break;
444                         case IB_WR_SEND_WITH_INV:
445                                 rc_sq_wqe->inv_key =
446                                         cpu_to_le32(wr->ex.invalidate_rkey);
447                                 break;
448                         default:
449                                 rc_sq_wqe->immtdata = 0;
450                                 break;
451                         }
452
453                         roce_set_bit(rc_sq_wqe->byte_4,
454                                      V2_RC_SEND_WQE_BYTE_4_FENCE_S,
455                                      (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
456
457                         roce_set_bit(rc_sq_wqe->byte_4,
458                                   V2_RC_SEND_WQE_BYTE_4_SE_S,
459                                   (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
460
461                         roce_set_bit(rc_sq_wqe->byte_4,
462                                    V2_RC_SEND_WQE_BYTE_4_CQE_S,
463                                    (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
464
465                         roce_set_bit(rc_sq_wqe->byte_4,
466                                      V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
467
468                         wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
469                         switch (wr->opcode) {
470                         case IB_WR_RDMA_READ:
471                                 hr_op = HNS_ROCE_V2_WQE_OP_RDMA_READ;
472                                 rc_sq_wqe->rkey =
473                                         cpu_to_le32(rdma_wr(wr)->rkey);
474                                 rc_sq_wqe->va =
475                                         cpu_to_le64(rdma_wr(wr)->remote_addr);
476                                 break;
477                         case IB_WR_RDMA_WRITE:
478                                 hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE;
479                                 rc_sq_wqe->rkey =
480                                         cpu_to_le32(rdma_wr(wr)->rkey);
481                                 rc_sq_wqe->va =
482                                         cpu_to_le64(rdma_wr(wr)->remote_addr);
483                                 break;
484                         case IB_WR_RDMA_WRITE_WITH_IMM:
485                                 hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM;
486                                 rc_sq_wqe->rkey =
487                                         cpu_to_le32(rdma_wr(wr)->rkey);
488                                 rc_sq_wqe->va =
489                                         cpu_to_le64(rdma_wr(wr)->remote_addr);
490                                 break;
491                         case IB_WR_SEND:
492                                 hr_op = HNS_ROCE_V2_WQE_OP_SEND;
493                                 break;
494                         case IB_WR_SEND_WITH_INV:
495                                 hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_INV;
496                                 break;
497                         case IB_WR_SEND_WITH_IMM:
498                                 hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM;
499                                 break;
500                         case IB_WR_LOCAL_INV:
501                                 hr_op = HNS_ROCE_V2_WQE_OP_LOCAL_INV;
502                                 roce_set_bit(rc_sq_wqe->byte_4,
503                                                V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
504                                 rc_sq_wqe->inv_key =
505                                             cpu_to_le32(wr->ex.invalidate_rkey);
506                                 break;
507                         case IB_WR_REG_MR:
508                                 hr_op = HNS_ROCE_V2_WQE_OP_FAST_REG_PMR;
509                                 fseg = wqe;
510                                 set_frmr_seg(rc_sq_wqe, fseg, reg_wr(wr));
511                                 break;
512                         case IB_WR_ATOMIC_CMP_AND_SWP:
513                                 hr_op = HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP;
514                                 rc_sq_wqe->rkey =
515                                         cpu_to_le32(atomic_wr(wr)->rkey);
516                                 rc_sq_wqe->va =
517                                         cpu_to_le64(atomic_wr(wr)->remote_addr);
518                                 break;
519                         case IB_WR_ATOMIC_FETCH_AND_ADD:
520                                 hr_op = HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD;
521                                 rc_sq_wqe->rkey =
522                                         cpu_to_le32(atomic_wr(wr)->rkey);
523                                 rc_sq_wqe->va =
524                                         cpu_to_le64(atomic_wr(wr)->remote_addr);
525                                 break;
526                         case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
527                                 hr_op =
528                                        HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP;
529                                 break;
530                         case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
531                                 hr_op =
532                                       HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD;
533                                 break;
534                         default:
535                                 hr_op = HNS_ROCE_V2_WQE_OP_MASK;
536                                 break;
537                         }
538
539                         roce_set_field(rc_sq_wqe->byte_4,
540                                        V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
541                                        V2_RC_SEND_WQE_BYTE_4_OPCODE_S, hr_op);
542
543                         if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
544                             wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
545                                 struct hns_roce_v2_wqe_data_seg *dseg;
546
547                                 dseg = wqe;
548                                 set_data_seg_v2(dseg, wr->sg_list);
549                                 wqe += sizeof(struct hns_roce_v2_wqe_data_seg);
550                                 set_atomic_seg(wqe, atomic_wr(wr));
551                                 roce_set_field(rc_sq_wqe->byte_16,
552                                                V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
553                                                V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
554                                                wr->num_sge);
555                         } else if (wr->opcode != IB_WR_REG_MR) {
556                                 ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe,
557                                                         wqe, &sge_ind, bad_wr);
558                                 if (ret)
559                                         goto out;
560                         }
561
562                         ind++;
563                 } else {
564                         dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
565                         spin_unlock_irqrestore(&qp->sq.lock, flags);
566                         *bad_wr = wr;
567                         return -EOPNOTSUPP;
568                 }
569         }
570
571 out:
572         if (likely(nreq)) {
573                 qp->sq.head += nreq;
574                 /* Memory barrier */
575                 wmb();
576
577                 sq_db.byte_4 = 0;
578                 sq_db.parameter = 0;
579
580                 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
581                                V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
582                 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
583                                V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
584                 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
585                                V2_DB_PARAMETER_IDX_S,
586                                qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
587                 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
588                                V2_DB_PARAMETER_SL_S, qp->sl);
589
590                 hns_roce_write64_k((__le32 *)&sq_db, qp->sq.db_reg_l);
591
592                 qp->sq_next_wqe = ind;
593                 qp->next_sge = sge_ind;
594
595                 if (qp->state == IB_QPS_ERR) {
596                         attr_mask = IB_QP_STATE;
597                         attr.qp_state = IB_QPS_ERR;
598
599                         ret = hns_roce_v2_modify_qp(&qp->ibqp, &attr, attr_mask,
600                                                     qp->state, IB_QPS_ERR);
601                         if (ret) {
602                                 spin_unlock_irqrestore(&qp->sq.lock, flags);
603                                 *bad_wr = wr;
604                                 return ret;
605                         }
606                 }
607         }
608
609         spin_unlock_irqrestore(&qp->sq.lock, flags);
610
611         return ret;
612 }
613
614 static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
615                                  const struct ib_recv_wr *wr,
616                                  const struct ib_recv_wr **bad_wr)
617 {
618         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
619         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
620         struct hns_roce_v2_wqe_data_seg *dseg;
621         struct hns_roce_rinl_sge *sge_list;
622         struct device *dev = hr_dev->dev;
623         struct ib_qp_attr attr;
624         unsigned long flags;
625         void *wqe = NULL;
626         int attr_mask;
627         int ret = 0;
628         int nreq;
629         int ind;
630         int i;
631
632         spin_lock_irqsave(&hr_qp->rq.lock, flags);
633         ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
634
635         if (hr_qp->state == IB_QPS_RESET) {
636                 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
637                 *bad_wr = wr;
638                 return -EINVAL;
639         }
640
641         for (nreq = 0; wr; ++nreq, wr = wr->next) {
642                 if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
643                         hr_qp->ibqp.recv_cq)) {
644                         ret = -ENOMEM;
645                         *bad_wr = wr;
646                         goto out;
647                 }
648
649                 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
650                         dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
651                                 wr->num_sge, hr_qp->rq.max_gs);
652                         ret = -EINVAL;
653                         *bad_wr = wr;
654                         goto out;
655                 }
656
657                 wqe = get_recv_wqe(hr_qp, ind);
658                 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
659                 for (i = 0; i < wr->num_sge; i++) {
660                         if (!wr->sg_list[i].length)
661                                 continue;
662                         set_data_seg_v2(dseg, wr->sg_list + i);
663                         dseg++;
664                 }
665
666                 if (i < hr_qp->rq.max_gs) {
667                         dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
668                         dseg->addr = 0;
669                 }
670
671                 /* rq support inline data */
672                 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
673                         sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
674                         hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt =
675                                                                (u32)wr->num_sge;
676                         for (i = 0; i < wr->num_sge; i++) {
677                                 sge_list[i].addr =
678                                                (void *)(u64)wr->sg_list[i].addr;
679                                 sge_list[i].len = wr->sg_list[i].length;
680                         }
681                 }
682
683                 hr_qp->rq.wrid[ind] = wr->wr_id;
684
685                 ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
686         }
687
688 out:
689         if (likely(nreq)) {
690                 hr_qp->rq.head += nreq;
691                 /* Memory barrier */
692                 wmb();
693
694                 *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
695
696                 if (hr_qp->state == IB_QPS_ERR) {
697                         attr_mask = IB_QP_STATE;
698                         attr.qp_state = IB_QPS_ERR;
699
700                         ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr,
701                                                     attr_mask, hr_qp->state,
702                                                     IB_QPS_ERR);
703                         if (ret) {
704                                 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
705                                 *bad_wr = wr;
706                                 return ret;
707                         }
708                 }
709         }
710         spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
711
712         return ret;
713 }
714
715 static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
716 {
717         int ntu = ring->next_to_use;
718         int ntc = ring->next_to_clean;
719         int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
720
721         return ring->desc_num - used - 1;
722 }
723
724 static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
725                                    struct hns_roce_v2_cmq_ring *ring)
726 {
727         int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
728
729         ring->desc = kzalloc(size, GFP_KERNEL);
730         if (!ring->desc)
731                 return -ENOMEM;
732
733         ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
734                                              DMA_BIDIRECTIONAL);
735         if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
736                 ring->desc_dma_addr = 0;
737                 kfree(ring->desc);
738                 ring->desc = NULL;
739                 return -ENOMEM;
740         }
741
742         return 0;
743 }
744
745 static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
746                                    struct hns_roce_v2_cmq_ring *ring)
747 {
748         dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
749                          ring->desc_num * sizeof(struct hns_roce_cmq_desc),
750                          DMA_BIDIRECTIONAL);
751
752         ring->desc_dma_addr = 0;
753         kfree(ring->desc);
754 }
755
756 static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
757 {
758         struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
759         struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
760                                             &priv->cmq.csq : &priv->cmq.crq;
761
762         ring->flag = ring_type;
763         ring->next_to_clean = 0;
764         ring->next_to_use = 0;
765
766         return hns_roce_alloc_cmq_desc(hr_dev, ring);
767 }
768
769 static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
770 {
771         struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
772         struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
773                                             &priv->cmq.csq : &priv->cmq.crq;
774         dma_addr_t dma = ring->desc_dma_addr;
775
776         if (ring_type == TYPE_CSQ) {
777                 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
778                 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
779                            upper_32_bits(dma));
780                 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
781                           (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
782                            HNS_ROCE_CMQ_ENABLE);
783                 roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
784                 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
785         } else {
786                 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
787                 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
788                            upper_32_bits(dma));
789                 roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
790                           (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
791                            HNS_ROCE_CMQ_ENABLE);
792                 roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
793                 roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
794         }
795 }
796
797 static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
798 {
799         struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
800         int ret;
801
802         /* Setup the queue entries for command queue */
803         priv->cmq.csq.desc_num = CMD_CSQ_DESC_NUM;
804         priv->cmq.crq.desc_num = CMD_CRQ_DESC_NUM;
805
806         /* Setup the lock for command queue */
807         spin_lock_init(&priv->cmq.csq.lock);
808         spin_lock_init(&priv->cmq.crq.lock);
809
810         /* Setup Tx write back timeout */
811         priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
812
813         /* Init CSQ */
814         ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
815         if (ret) {
816                 dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
817                 return ret;
818         }
819
820         /* Init CRQ */
821         ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
822         if (ret) {
823                 dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
824                 goto err_crq;
825         }
826
827         /* Init CSQ REG */
828         hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
829
830         /* Init CRQ REG */
831         hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
832
833         return 0;
834
835 err_crq:
836         hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
837
838         return ret;
839 }
840
841 static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
842 {
843         struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
844
845         hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
846         hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
847 }
848
849 static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
850                                           enum hns_roce_opcode_type opcode,
851                                           bool is_read)
852 {
853         memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
854         desc->opcode = cpu_to_le16(opcode);
855         desc->flag =
856                 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
857         if (is_read)
858                 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
859         else
860                 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
861 }
862
863 static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
864 {
865         struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
866         u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
867
868         return head == priv->cmq.csq.next_to_use;
869 }
870
871 static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
872 {
873         struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
874         struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
875         struct hns_roce_cmq_desc *desc;
876         u16 ntc = csq->next_to_clean;
877         u32 head;
878         int clean = 0;
879
880         desc = &csq->desc[ntc];
881         head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
882         while (head != ntc) {
883                 memset(desc, 0, sizeof(*desc));
884                 ntc++;
885                 if (ntc == csq->desc_num)
886                         ntc = 0;
887                 desc = &csq->desc[ntc];
888                 clean++;
889         }
890         csq->next_to_clean = ntc;
891
892         return clean;
893 }
894
895 static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
896                              struct hns_roce_cmq_desc *desc, int num)
897 {
898         struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
899         struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
900         struct hns_roce_cmq_desc *desc_to_use;
901         bool complete = false;
902         u32 timeout = 0;
903         int handle = 0;
904         u16 desc_ret;
905         int ret = 0;
906         int ntc;
907
908         if (hr_dev->is_reset)
909                 return 0;
910
911         spin_lock_bh(&csq->lock);
912
913         if (num > hns_roce_cmq_space(csq)) {
914                 spin_unlock_bh(&csq->lock);
915                 return -EBUSY;
916         }
917
918         /*
919          * Record the location of desc in the cmq for this time
920          * which will be use for hardware to write back
921          */
922         ntc = csq->next_to_use;
923
924         while (handle < num) {
925                 desc_to_use = &csq->desc[csq->next_to_use];
926                 *desc_to_use = desc[handle];
927                 dev_dbg(hr_dev->dev, "set cmq desc:\n");
928                 csq->next_to_use++;
929                 if (csq->next_to_use == csq->desc_num)
930                         csq->next_to_use = 0;
931                 handle++;
932         }
933
934         /* Write to hardware */
935         roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
936
937         /*
938          * If the command is sync, wait for the firmware to write back,
939          * if multi descriptors to be sent, use the first one to check
940          */
941         if ((desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
942                 do {
943                         if (hns_roce_cmq_csq_done(hr_dev))
944                                 break;
945                         udelay(1);
946                         timeout++;
947                 } while (timeout < priv->cmq.tx_timeout);
948         }
949
950         if (hns_roce_cmq_csq_done(hr_dev)) {
951                 complete = true;
952                 handle = 0;
953                 while (handle < num) {
954                         /* get the result of hardware write back */
955                         desc_to_use = &csq->desc[ntc];
956                         desc[handle] = *desc_to_use;
957                         dev_dbg(hr_dev->dev, "Get cmq desc:\n");
958                         desc_ret = desc[handle].retval;
959                         if (desc_ret == CMD_EXEC_SUCCESS)
960                                 ret = 0;
961                         else
962                                 ret = -EIO;
963                         priv->cmq.last_status = desc_ret;
964                         ntc++;
965                         handle++;
966                         if (ntc == csq->desc_num)
967                                 ntc = 0;
968                 }
969         }
970
971         if (!complete)
972                 ret = -EAGAIN;
973
974         /* clean the command send queue */
975         handle = hns_roce_cmq_csq_clean(hr_dev);
976         if (handle != num)
977                 dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
978                          handle, num);
979
980         spin_unlock_bh(&csq->lock);
981
982         return ret;
983 }
984
985 static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
986 {
987         struct hns_roce_query_version *resp;
988         struct hns_roce_cmq_desc desc;
989         int ret;
990
991         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
992         ret = hns_roce_cmq_send(hr_dev, &desc, 1);
993         if (ret)
994                 return ret;
995
996         resp = (struct hns_roce_query_version *)desc.data;
997         hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version);
998         hr_dev->vendor_id = hr_dev->pci_dev->vendor;
999
1000         return 0;
1001 }
1002
1003 static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
1004 {
1005         struct hns_roce_query_fw_info *resp;
1006         struct hns_roce_cmq_desc desc;
1007         int ret;
1008
1009         hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
1010         ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1011         if (ret)
1012                 return ret;
1013
1014         resp = (struct hns_roce_query_fw_info *)desc.data;
1015         hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
1016
1017         return 0;
1018 }
1019
1020 static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
1021 {
1022         struct hns_roce_cfg_global_param *req;
1023         struct hns_roce_cmq_desc desc;
1024
1025         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
1026                                       false);
1027
1028         req = (struct hns_roce_cfg_global_param *)desc.data;
1029         memset(req, 0, sizeof(*req));
1030         roce_set_field(req->time_cfg_udp_port,
1031                        CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
1032                        CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
1033         roce_set_field(req->time_cfg_udp_port,
1034                        CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
1035                        CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
1036
1037         return hns_roce_cmq_send(hr_dev, &desc, 1);
1038 }
1039
1040 static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
1041 {
1042         struct hns_roce_cmq_desc desc[2];
1043         struct hns_roce_pf_res_a *req_a;
1044         struct hns_roce_pf_res_b *req_b;
1045         int ret;
1046         int i;
1047
1048         for (i = 0; i < 2; i++) {
1049                 hns_roce_cmq_setup_basic_desc(&desc[i],
1050                                               HNS_ROCE_OPC_QUERY_PF_RES, true);
1051
1052                 if (i == 0)
1053                         desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1054                 else
1055                         desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1056         }
1057
1058         ret = hns_roce_cmq_send(hr_dev, desc, 2);
1059         if (ret)
1060                 return ret;
1061
1062         req_a = (struct hns_roce_pf_res_a *)desc[0].data;
1063         req_b = (struct hns_roce_pf_res_b *)desc[1].data;
1064
1065         hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num,
1066                                                  PF_RES_DATA_1_PF_QPC_BT_NUM_M,
1067                                                  PF_RES_DATA_1_PF_QPC_BT_NUM_S);
1068         hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num,
1069                                                 PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
1070                                                 PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
1071         hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num,
1072                                                  PF_RES_DATA_3_PF_CQC_BT_NUM_M,
1073                                                  PF_RES_DATA_3_PF_CQC_BT_NUM_S);
1074         hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num,
1075                                                  PF_RES_DATA_4_PF_MPT_BT_NUM_M,
1076                                                  PF_RES_DATA_4_PF_MPT_BT_NUM_S);
1077
1078         hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
1079                                              PF_RES_DATA_3_PF_SL_NUM_M,
1080                                              PF_RES_DATA_3_PF_SL_NUM_S);
1081
1082         return 0;
1083 }
1084
1085 static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
1086 {
1087         struct hns_roce_cmq_desc desc[2];
1088         struct hns_roce_vf_res_a *req_a;
1089         struct hns_roce_vf_res_b *req_b;
1090         int i;
1091
1092         req_a = (struct hns_roce_vf_res_a *)desc[0].data;
1093         req_b = (struct hns_roce_vf_res_b *)desc[1].data;
1094         memset(req_a, 0, sizeof(*req_a));
1095         memset(req_b, 0, sizeof(*req_b));
1096         for (i = 0; i < 2; i++) {
1097                 hns_roce_cmq_setup_basic_desc(&desc[i],
1098                                               HNS_ROCE_OPC_ALLOC_VF_RES, false);
1099
1100                 if (i == 0)
1101                         desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1102                 else
1103                         desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1104
1105                 if (i == 0) {
1106                         roce_set_field(req_a->vf_qpc_bt_idx_num,
1107                                        VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
1108                                        VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
1109                         roce_set_field(req_a->vf_qpc_bt_idx_num,
1110                                        VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
1111                                        VF_RES_A_DATA_1_VF_QPC_BT_NUM_S,
1112                                        HNS_ROCE_VF_QPC_BT_NUM);
1113
1114                         roce_set_field(req_a->vf_srqc_bt_idx_num,
1115                                        VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
1116                                        VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
1117                         roce_set_field(req_a->vf_srqc_bt_idx_num,
1118                                        VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
1119                                        VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
1120                                        HNS_ROCE_VF_SRQC_BT_NUM);
1121
1122                         roce_set_field(req_a->vf_cqc_bt_idx_num,
1123                                        VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
1124                                        VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
1125                         roce_set_field(req_a->vf_cqc_bt_idx_num,
1126                                        VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
1127                                        VF_RES_A_DATA_3_VF_CQC_BT_NUM_S,
1128                                        HNS_ROCE_VF_CQC_BT_NUM);
1129
1130                         roce_set_field(req_a->vf_mpt_bt_idx_num,
1131                                        VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
1132                                        VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
1133                         roce_set_field(req_a->vf_mpt_bt_idx_num,
1134                                        VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
1135                                        VF_RES_A_DATA_4_VF_MPT_BT_NUM_S,
1136                                        HNS_ROCE_VF_MPT_BT_NUM);
1137
1138                         roce_set_field(req_a->vf_eqc_bt_idx_num,
1139                                        VF_RES_A_DATA_5_VF_EQC_IDX_M,
1140                                        VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
1141                         roce_set_field(req_a->vf_eqc_bt_idx_num,
1142                                        VF_RES_A_DATA_5_VF_EQC_NUM_M,
1143                                        VF_RES_A_DATA_5_VF_EQC_NUM_S,
1144                                        HNS_ROCE_VF_EQC_NUM);
1145                 } else {
1146                         roce_set_field(req_b->vf_smac_idx_num,
1147                                        VF_RES_B_DATA_1_VF_SMAC_IDX_M,
1148                                        VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
1149                         roce_set_field(req_b->vf_smac_idx_num,
1150                                        VF_RES_B_DATA_1_VF_SMAC_NUM_M,
1151                                        VF_RES_B_DATA_1_VF_SMAC_NUM_S,
1152                                        HNS_ROCE_VF_SMAC_NUM);
1153
1154                         roce_set_field(req_b->vf_sgid_idx_num,
1155                                        VF_RES_B_DATA_2_VF_SGID_IDX_M,
1156                                        VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
1157                         roce_set_field(req_b->vf_sgid_idx_num,
1158                                        VF_RES_B_DATA_2_VF_SGID_NUM_M,
1159                                        VF_RES_B_DATA_2_VF_SGID_NUM_S,
1160                                        HNS_ROCE_VF_SGID_NUM);
1161
1162                         roce_set_field(req_b->vf_qid_idx_sl_num,
1163                                        VF_RES_B_DATA_3_VF_QID_IDX_M,
1164                                        VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
1165                         roce_set_field(req_b->vf_qid_idx_sl_num,
1166                                        VF_RES_B_DATA_3_VF_SL_NUM_M,
1167                                        VF_RES_B_DATA_3_VF_SL_NUM_S,
1168                                        HNS_ROCE_VF_SL_NUM);
1169                 }
1170         }
1171
1172         return hns_roce_cmq_send(hr_dev, desc, 2);
1173 }
1174
1175 static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1176 {
1177         u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
1178         u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
1179         u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
1180         u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
1181         struct hns_roce_cfg_bt_attr *req;
1182         struct hns_roce_cmq_desc desc;
1183
1184         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1185         req = (struct hns_roce_cfg_bt_attr *)desc.data;
1186         memset(req, 0, sizeof(*req));
1187
1188         roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
1189                        CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
1190                        hr_dev->caps.qpc_ba_pg_sz + PG_SHIFT_OFFSET);
1191         roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
1192                        CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
1193                        hr_dev->caps.qpc_buf_pg_sz + PG_SHIFT_OFFSET);
1194         roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
1195                        CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
1196                        qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
1197
1198         roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
1199                        CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
1200                        hr_dev->caps.srqc_ba_pg_sz + PG_SHIFT_OFFSET);
1201         roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
1202                        CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
1203                        hr_dev->caps.srqc_buf_pg_sz + PG_SHIFT_OFFSET);
1204         roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
1205                        CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
1206                        srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
1207
1208         roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
1209                        CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
1210                        hr_dev->caps.cqc_ba_pg_sz + PG_SHIFT_OFFSET);
1211         roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
1212                        CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
1213                        hr_dev->caps.cqc_buf_pg_sz + PG_SHIFT_OFFSET);
1214         roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
1215                        CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
1216                        cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
1217
1218         roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
1219                        CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
1220                        hr_dev->caps.mpt_ba_pg_sz + PG_SHIFT_OFFSET);
1221         roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
1222                        CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
1223                        hr_dev->caps.mpt_buf_pg_sz + PG_SHIFT_OFFSET);
1224         roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
1225                        CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
1226                        mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
1227
1228         return hns_roce_cmq_send(hr_dev, &desc, 1);
1229 }
1230
1231 static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
1232 {
1233         struct hns_roce_caps *caps = &hr_dev->caps;
1234         int ret;
1235
1236         ret = hns_roce_cmq_query_hw_info(hr_dev);
1237         if (ret) {
1238                 dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n",
1239                         ret);
1240                 return ret;
1241         }
1242
1243         ret = hns_roce_query_fw_ver(hr_dev);
1244         if (ret) {
1245                 dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
1246                         ret);
1247                 return ret;
1248         }
1249
1250         ret = hns_roce_config_global_param(hr_dev);
1251         if (ret) {
1252                 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
1253                         ret);
1254                 return ret;
1255         }
1256
1257         /* Get pf resource owned by every pf */
1258         ret = hns_roce_query_pf_resource(hr_dev);
1259         if (ret) {
1260                 dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
1261                         ret);
1262                 return ret;
1263         }
1264
1265         ret = hns_roce_alloc_vf_resource(hr_dev);
1266         if (ret) {
1267                 dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
1268                         ret);
1269                 return ret;
1270         }
1271
1272
1273         hr_dev->vendor_part_id = hr_dev->pci_dev->device;
1274         hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
1275
1276         caps->num_qps           = HNS_ROCE_V2_MAX_QP_NUM;
1277         caps->max_wqes          = HNS_ROCE_V2_MAX_WQE_NUM;
1278         caps->num_cqs           = HNS_ROCE_V2_MAX_CQ_NUM;
1279         caps->max_cqes          = HNS_ROCE_V2_MAX_CQE_NUM;
1280         caps->max_sq_sg         = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
1281         caps->max_extend_sg     = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
1282         caps->max_rq_sg         = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1283         caps->max_sq_inline     = HNS_ROCE_V2_MAX_SQ_INLINE;
1284         caps->num_uars          = HNS_ROCE_V2_UAR_NUM;
1285         caps->phy_num_uars      = HNS_ROCE_V2_PHY_UAR_NUM;
1286         caps->num_aeq_vectors   = HNS_ROCE_V2_AEQE_VEC_NUM;
1287         caps->num_comp_vectors  = HNS_ROCE_V2_COMP_VEC_NUM;
1288         caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
1289         caps->num_mtpts         = HNS_ROCE_V2_MAX_MTPT_NUM;
1290         caps->num_mtt_segs      = HNS_ROCE_V2_MAX_MTT_SEGS;
1291         caps->num_cqe_segs      = HNS_ROCE_V2_MAX_CQE_SEGS;
1292         caps->num_pds           = HNS_ROCE_V2_MAX_PD_NUM;
1293         caps->max_qp_init_rdma  = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1294         caps->max_qp_dest_rdma  = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1295         caps->max_sq_desc_sz    = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1296         caps->max_rq_desc_sz    = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1297         caps->max_srq_desc_sz   = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
1298         caps->qpc_entry_sz      = HNS_ROCE_V2_QPC_ENTRY_SZ;
1299         caps->irrl_entry_sz     = HNS_ROCE_V2_IRRL_ENTRY_SZ;
1300         caps->trrl_entry_sz     = HNS_ROCE_V2_TRRL_ENTRY_SZ;
1301         caps->cqc_entry_sz      = HNS_ROCE_V2_CQC_ENTRY_SZ;
1302         caps->mtpt_entry_sz     = HNS_ROCE_V2_MTPT_ENTRY_SZ;
1303         caps->mtt_entry_sz      = HNS_ROCE_V2_MTT_ENTRY_SZ;
1304         caps->cq_entry_sz       = HNS_ROCE_V2_CQE_ENTRY_SIZE;
1305         caps->page_size_cap     = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1306         caps->reserved_lkey     = 0;
1307         caps->reserved_pds      = 0;
1308         caps->reserved_mrws     = 1;
1309         caps->reserved_uars     = 0;
1310         caps->reserved_cqs      = 0;
1311         caps->reserved_qps      = HNS_ROCE_V2_RSV_QPS;
1312
1313         caps->qpc_ba_pg_sz      = 0;
1314         caps->qpc_buf_pg_sz     = 0;
1315         caps->qpc_hop_num       = HNS_ROCE_CONTEXT_HOP_NUM;
1316         caps->srqc_ba_pg_sz     = 0;
1317         caps->srqc_buf_pg_sz    = 0;
1318         caps->srqc_hop_num      = HNS_ROCE_HOP_NUM_0;
1319         caps->cqc_ba_pg_sz      = 0;
1320         caps->cqc_buf_pg_sz     = 0;
1321         caps->cqc_hop_num       = HNS_ROCE_CONTEXT_HOP_NUM;
1322         caps->mpt_ba_pg_sz      = 0;
1323         caps->mpt_buf_pg_sz     = 0;
1324         caps->mpt_hop_num       = HNS_ROCE_CONTEXT_HOP_NUM;
1325         caps->pbl_ba_pg_sz      = 0;
1326         caps->pbl_buf_pg_sz     = 0;
1327         caps->pbl_hop_num       = HNS_ROCE_PBL_HOP_NUM;
1328         caps->mtt_ba_pg_sz      = 0;
1329         caps->mtt_buf_pg_sz     = 0;
1330         caps->mtt_hop_num       = HNS_ROCE_MTT_HOP_NUM;
1331         caps->cqe_ba_pg_sz      = 0;
1332         caps->cqe_buf_pg_sz     = 0;
1333         caps->cqe_hop_num       = HNS_ROCE_CQE_HOP_NUM;
1334         caps->eqe_ba_pg_sz      = 0;
1335         caps->eqe_buf_pg_sz     = 0;
1336         caps->eqe_hop_num       = HNS_ROCE_EQE_HOP_NUM;
1337         caps->tsq_buf_pg_sz     = 0;
1338         caps->chunk_sz          = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
1339
1340         caps->flags             = HNS_ROCE_CAP_FLAG_REREG_MR |
1341                                   HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
1342                                   HNS_ROCE_CAP_FLAG_RQ_INLINE |
1343                                   HNS_ROCE_CAP_FLAG_RECORD_DB |
1344                                   HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
1345
1346         if (hr_dev->pci_dev->revision == 0x21)
1347                 caps->flags |= HNS_ROCE_CAP_FLAG_MW |
1348                                HNS_ROCE_CAP_FLAG_FRMR;
1349
1350         caps->pkey_table_len[0] = 1;
1351         caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
1352         caps->ceqe_depth        = HNS_ROCE_V2_COMP_EQE_NUM;
1353         caps->aeqe_depth        = HNS_ROCE_V2_ASYNC_EQE_NUM;
1354         caps->local_ca_ack_delay = 0;
1355         caps->max_mtu = IB_MTU_4096;
1356
1357         if (hr_dev->pci_dev->revision == 0x21)
1358                 caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC;
1359
1360         ret = hns_roce_v2_set_bt(hr_dev);
1361         if (ret)
1362                 dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
1363                         ret);
1364
1365         return ret;
1366 }
1367
1368 static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
1369                                       enum hns_roce_link_table_type type)
1370 {
1371         struct hns_roce_cmq_desc desc[2];
1372         struct hns_roce_cfg_llm_a *req_a =
1373                                 (struct hns_roce_cfg_llm_a *)desc[0].data;
1374         struct hns_roce_cfg_llm_b *req_b =
1375                                 (struct hns_roce_cfg_llm_b *)desc[1].data;
1376         struct hns_roce_v2_priv *priv = hr_dev->priv;
1377         struct hns_roce_link_table *link_tbl;
1378         struct hns_roce_link_table_entry *entry;
1379         enum hns_roce_opcode_type opcode;
1380         u32 page_num;
1381         int i;
1382
1383         switch (type) {
1384         case TSQ_LINK_TABLE:
1385                 link_tbl = &priv->tsq;
1386                 opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
1387                 break;
1388         case TPQ_LINK_TABLE:
1389                 link_tbl = &priv->tpq;
1390                 opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM;
1391                 break;
1392         default:
1393                 return -EINVAL;
1394         }
1395
1396         page_num = link_tbl->npages;
1397         entry = link_tbl->table.buf;
1398         memset(req_a, 0, sizeof(*req_a));
1399         memset(req_b, 0, sizeof(*req_b));
1400
1401         for (i = 0; i < 2; i++) {
1402                 hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
1403
1404                 if (i == 0)
1405                         desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1406                 else
1407                         desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1408
1409                 if (i == 0) {
1410                         req_a->base_addr_l = link_tbl->table.map & 0xffffffff;
1411                         req_a->base_addr_h = (link_tbl->table.map >> 32) &
1412                                              0xffffffff;
1413                         roce_set_field(req_a->depth_pgsz_init_en,
1414                                        CFG_LLM_QUE_DEPTH_M,
1415                                        CFG_LLM_QUE_DEPTH_S,
1416                                        link_tbl->npages);
1417                         roce_set_field(req_a->depth_pgsz_init_en,
1418                                        CFG_LLM_QUE_PGSZ_M,
1419                                        CFG_LLM_QUE_PGSZ_S,
1420                                        link_tbl->pg_sz);
1421                         req_a->head_ba_l = entry[0].blk_ba0;
1422                         req_a->head_ba_h_nxtptr = entry[0].blk_ba1_nxt_ptr;
1423                         roce_set_field(req_a->head_ptr,
1424                                        CFG_LLM_HEAD_PTR_M,
1425                                        CFG_LLM_HEAD_PTR_S, 0);
1426                 } else {
1427                         req_b->tail_ba_l = entry[page_num - 1].blk_ba0;
1428                         roce_set_field(req_b->tail_ba_h,
1429                                        CFG_LLM_TAIL_BA_H_M,
1430                                        CFG_LLM_TAIL_BA_H_S,
1431                                        entry[page_num - 1].blk_ba1_nxt_ptr &
1432                                        HNS_ROCE_LINK_TABLE_BA1_M);
1433                         roce_set_field(req_b->tail_ptr,
1434                                        CFG_LLM_TAIL_PTR_M,
1435                                        CFG_LLM_TAIL_PTR_S,
1436                                        (entry[page_num - 2].blk_ba1_nxt_ptr &
1437                                        HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
1438                                        HNS_ROCE_LINK_TABLE_NXT_PTR_S);
1439                 }
1440         }
1441         roce_set_field(req_a->depth_pgsz_init_en,
1442                        CFG_LLM_INIT_EN_M, CFG_LLM_INIT_EN_S, 1);
1443
1444         return hns_roce_cmq_send(hr_dev, desc, 2);
1445 }
1446
1447 static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
1448                                     enum hns_roce_link_table_type type)
1449 {
1450         struct hns_roce_v2_priv *priv = hr_dev->priv;
1451         struct hns_roce_link_table *link_tbl;
1452         struct hns_roce_link_table_entry *entry;
1453         struct device *dev = hr_dev->dev;
1454         u32 buf_chk_sz;
1455         dma_addr_t t;
1456         int func_num = 1;
1457         int pg_num_a;
1458         int pg_num_b;
1459         int pg_num;
1460         int size;
1461         int i;
1462
1463         switch (type) {
1464         case TSQ_LINK_TABLE:
1465                 link_tbl = &priv->tsq;
1466                 buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT);
1467                 pg_num_a = hr_dev->caps.num_qps * 8 / buf_chk_sz;
1468                 pg_num_b = hr_dev->caps.sl_num * 4 + 2;
1469                 break;
1470         case TPQ_LINK_TABLE:
1471                 link_tbl = &priv->tpq;
1472                 buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz + PAGE_SHIFT);
1473                 pg_num_a = hr_dev->caps.num_cqs * 4 / buf_chk_sz;
1474                 pg_num_b = 2 * 4 * func_num + 2;
1475                 break;
1476         default:
1477                 return -EINVAL;
1478         }
1479
1480         pg_num = max(pg_num_a, pg_num_b);
1481         size = pg_num * sizeof(struct hns_roce_link_table_entry);
1482
1483         link_tbl->table.buf = dma_alloc_coherent(dev, size,
1484                                                  &link_tbl->table.map,
1485                                                  GFP_KERNEL);
1486         if (!link_tbl->table.buf)
1487                 goto out;
1488
1489         link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list),
1490                                     GFP_KERNEL);
1491         if (!link_tbl->pg_list)
1492                 goto err_kcalloc_failed;
1493
1494         entry = link_tbl->table.buf;
1495         for (i = 0; i < pg_num; ++i) {
1496                 link_tbl->pg_list[i].buf = dma_alloc_coherent(dev, buf_chk_sz,
1497                                                               &t, GFP_KERNEL);
1498                 if (!link_tbl->pg_list[i].buf)
1499                         goto err_alloc_buf_failed;
1500
1501                 link_tbl->pg_list[i].map = t;
1502                 memset(link_tbl->pg_list[i].buf, 0, buf_chk_sz);
1503
1504                 entry[i].blk_ba0 = (t >> 12) & 0xffffffff;
1505                 roce_set_field(entry[i].blk_ba1_nxt_ptr,
1506                                HNS_ROCE_LINK_TABLE_BA1_M,
1507                                HNS_ROCE_LINK_TABLE_BA1_S,
1508                                t >> 44);
1509
1510                 if (i < (pg_num - 1))
1511                         roce_set_field(entry[i].blk_ba1_nxt_ptr,
1512                                        HNS_ROCE_LINK_TABLE_NXT_PTR_M,
1513                                        HNS_ROCE_LINK_TABLE_NXT_PTR_S,
1514                                        i + 1);
1515         }
1516         link_tbl->npages = pg_num;
1517         link_tbl->pg_sz = buf_chk_sz;
1518
1519         return hns_roce_config_link_table(hr_dev, type);
1520
1521 err_alloc_buf_failed:
1522         for (i -= 1; i >= 0; i--)
1523                 dma_free_coherent(dev, buf_chk_sz,
1524                                   link_tbl->pg_list[i].buf,
1525                                   link_tbl->pg_list[i].map);
1526         kfree(link_tbl->pg_list);
1527
1528 err_kcalloc_failed:
1529         dma_free_coherent(dev, size, link_tbl->table.buf,
1530                           link_tbl->table.map);
1531
1532 out:
1533         return -ENOMEM;
1534 }
1535
1536 static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
1537                                      struct hns_roce_link_table *link_tbl)
1538 {
1539         struct device *dev = hr_dev->dev;
1540         int size;
1541         int i;
1542
1543         size = link_tbl->npages * sizeof(struct hns_roce_link_table_entry);
1544
1545         for (i = 0; i < link_tbl->npages; ++i)
1546                 if (link_tbl->pg_list[i].buf)
1547                         dma_free_coherent(dev, link_tbl->pg_sz,
1548                                           link_tbl->pg_list[i].buf,
1549                                           link_tbl->pg_list[i].map);
1550         kfree(link_tbl->pg_list);
1551
1552         dma_free_coherent(dev, size, link_tbl->table.buf,
1553                           link_tbl->table.map);
1554 }
1555
1556 static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
1557 {
1558         struct hns_roce_v2_priv *priv = hr_dev->priv;
1559         int ret;
1560
1561         /* TSQ includes SQ doorbell and ack doorbell */
1562         ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
1563         if (ret) {
1564                 dev_err(hr_dev->dev, "TSQ init failed, ret = %d.\n", ret);
1565                 return ret;
1566         }
1567
1568         ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
1569         if (ret) {
1570                 dev_err(hr_dev->dev, "TPQ init failed, ret = %d.\n", ret);
1571                 goto err_tpq_init_failed;
1572         }
1573
1574         return 0;
1575
1576 err_tpq_init_failed:
1577         hns_roce_free_link_table(hr_dev, &priv->tsq);
1578
1579         return ret;
1580 }
1581
1582 static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
1583 {
1584         struct hns_roce_v2_priv *priv = hr_dev->priv;
1585
1586         hns_roce_free_link_table(hr_dev, &priv->tpq);
1587         hns_roce_free_link_table(hr_dev, &priv->tsq);
1588 }
1589
1590 static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
1591 {
1592         u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG);
1593
1594         return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
1595 }
1596
1597 static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
1598 {
1599         u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG);
1600
1601         return status & HNS_ROCE_HW_MB_STATUS_MASK;
1602 }
1603
1604 static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
1605                                  u64 out_param, u32 in_modifier, u8 op_modifier,
1606                                  u16 op, u16 token, int event)
1607 {
1608         struct device *dev = hr_dev->dev;
1609         u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base +
1610                                            ROCEE_VF_MB_CFG0_REG);
1611         unsigned long end;
1612         u32 val0 = 0;
1613         u32 val1 = 0;
1614
1615         end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
1616         while (hns_roce_v2_cmd_pending(hr_dev)) {
1617                 if (time_after(jiffies, end)) {
1618                         dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
1619                                 (int)end);
1620                         return -EAGAIN;
1621                 }
1622                 cond_resched();
1623         }
1624
1625         roce_set_field(val0, HNS_ROCE_VF_MB4_TAG_MASK,
1626                        HNS_ROCE_VF_MB4_TAG_SHIFT, in_modifier);
1627         roce_set_field(val0, HNS_ROCE_VF_MB4_CMD_MASK,
1628                        HNS_ROCE_VF_MB4_CMD_SHIFT, op);
1629         roce_set_field(val1, HNS_ROCE_VF_MB5_EVENT_MASK,
1630                        HNS_ROCE_VF_MB5_EVENT_SHIFT, event);
1631         roce_set_field(val1, HNS_ROCE_VF_MB5_TOKEN_MASK,
1632                        HNS_ROCE_VF_MB5_TOKEN_SHIFT, token);
1633
1634         writeq(in_param, hcr + 0);
1635         writeq(out_param, hcr + 2);
1636
1637         /* Memory barrier */
1638         wmb();
1639
1640         writel(val0, hcr + 4);
1641         writel(val1, hcr + 5);
1642
1643         mmiowb();
1644
1645         return 0;
1646 }
1647
1648 static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
1649                                 unsigned long timeout)
1650 {
1651         struct device *dev = hr_dev->dev;
1652         unsigned long end = 0;
1653         u32 status;
1654
1655         end = msecs_to_jiffies(timeout) + jiffies;
1656         while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
1657                 cond_resched();
1658
1659         if (hns_roce_v2_cmd_pending(hr_dev)) {
1660                 dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
1661                 return -ETIMEDOUT;
1662         }
1663
1664         status = hns_roce_v2_cmd_complete(hr_dev);
1665         if (status != 0x1) {
1666                 dev_err(dev, "mailbox status 0x%x!\n", status);
1667                 return -EBUSY;
1668         }
1669
1670         return 0;
1671 }
1672
1673 static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev,
1674                                       int gid_index, const union ib_gid *gid,
1675                                       enum hns_roce_sgid_type sgid_type)
1676 {
1677         struct hns_roce_cmq_desc desc;
1678         struct hns_roce_cfg_sgid_tb *sgid_tb =
1679                                     (struct hns_roce_cfg_sgid_tb *)desc.data;
1680         u32 *p;
1681
1682         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
1683
1684         roce_set_field(sgid_tb->table_idx_rsv,
1685                        CFG_SGID_TB_TABLE_IDX_M,
1686                        CFG_SGID_TB_TABLE_IDX_S, gid_index);
1687         roce_set_field(sgid_tb->vf_sgid_type_rsv,
1688                        CFG_SGID_TB_VF_SGID_TYPE_M,
1689                        CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
1690
1691         p = (u32 *)&gid->raw[0];
1692         sgid_tb->vf_sgid_l = cpu_to_le32(*p);
1693
1694         p = (u32 *)&gid->raw[4];
1695         sgid_tb->vf_sgid_ml = cpu_to_le32(*p);
1696
1697         p = (u32 *)&gid->raw[8];
1698         sgid_tb->vf_sgid_mh = cpu_to_le32(*p);
1699
1700         p = (u32 *)&gid->raw[0xc];
1701         sgid_tb->vf_sgid_h = cpu_to_le32(*p);
1702
1703         return hns_roce_cmq_send(hr_dev, &desc, 1);
1704 }
1705
1706 static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
1707                                int gid_index, const union ib_gid *gid,
1708                                const struct ib_gid_attr *attr)
1709 {
1710         enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
1711         int ret;
1712
1713         if (!gid || !attr)
1714                 return -EINVAL;
1715
1716         if (attr->gid_type == IB_GID_TYPE_ROCE)
1717                 sgid_type = GID_TYPE_FLAG_ROCE_V1;
1718
1719         if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
1720                 if (ipv6_addr_v4mapped((void *)gid))
1721                         sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
1722                 else
1723                         sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
1724         }
1725
1726         ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type);
1727         if (ret)
1728                 dev_err(hr_dev->dev, "Configure sgid table failed(%d)!\n", ret);
1729
1730         return ret;
1731 }
1732
1733 static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
1734                                u8 *addr)
1735 {
1736         struct hns_roce_cmq_desc desc;
1737         struct hns_roce_cfg_smac_tb *smac_tb =
1738                                     (struct hns_roce_cfg_smac_tb *)desc.data;
1739         u16 reg_smac_h;
1740         u32 reg_smac_l;
1741
1742         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
1743
1744         reg_smac_l = *(u32 *)(&addr[0]);
1745         reg_smac_h = *(u16 *)(&addr[4]);
1746
1747         memset(smac_tb, 0, sizeof(*smac_tb));
1748         roce_set_field(smac_tb->tb_idx_rsv,
1749                        CFG_SMAC_TB_IDX_M,
1750                        CFG_SMAC_TB_IDX_S, phy_port);
1751         roce_set_field(smac_tb->vf_smac_h_rsv,
1752                        CFG_SMAC_TB_VF_SMAC_H_M,
1753                        CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
1754         smac_tb->vf_smac_l = reg_smac_l;
1755
1756         return hns_roce_cmq_send(hr_dev, &desc, 1);
1757 }
1758
1759 static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
1760                         struct hns_roce_mr *mr)
1761 {
1762         struct scatterlist *sg;
1763         u64 page_addr;
1764         u64 *pages;
1765         int i, j;
1766         int len;
1767         int entry;
1768
1769         mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
1770         mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
1771         roce_set_field(mpt_entry->byte_48_mode_ba,
1772                        V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
1773                        upper_32_bits(mr->pbl_ba >> 3));
1774
1775         pages = (u64 *)__get_free_page(GFP_KERNEL);
1776         if (!pages)
1777                 return -ENOMEM;
1778
1779         i = 0;
1780         for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
1781                 len = sg_dma_len(sg) >> PAGE_SHIFT;
1782                 for (j = 0; j < len; ++j) {
1783                         page_addr = sg_dma_address(sg) +
1784                                 (j << mr->umem->page_shift);
1785                         pages[i] = page_addr >> 6;
1786                         /* Record the first 2 entry directly to MTPT table */
1787                         if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
1788                                 goto found;
1789                         i++;
1790                 }
1791         }
1792 found:
1793         mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
1794         roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
1795                        V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
1796
1797         mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
1798         roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
1799                        V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
1800         roce_set_field(mpt_entry->byte_64_buf_pa1,
1801                        V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
1802                        V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
1803                        mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
1804
1805         free_page((unsigned long)pages);
1806
1807         return 0;
1808 }
1809
1810 static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1811                                   unsigned long mtpt_idx)
1812 {
1813         struct hns_roce_v2_mpt_entry *mpt_entry;
1814         int ret;
1815
1816         mpt_entry = mb_buf;
1817         memset(mpt_entry, 0, sizeof(*mpt_entry));
1818
1819         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
1820                        V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
1821         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
1822                        V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
1823                        HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
1824         roce_set_field(mpt_entry->byte_4_pd_hop_st,
1825                        V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
1826                        V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
1827                        mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
1828         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
1829                        V2_MPT_BYTE_4_PD_S, mr->pd);
1830
1831         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
1832         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
1833         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
1834         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
1835                      (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
1836         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S,
1837                      mr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
1838         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
1839                      (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
1840         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
1841                      (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
1842         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
1843                      (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
1844
1845         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
1846                      mr->type == MR_TYPE_MR ? 0 : 1);
1847         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
1848                      1);
1849
1850         mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
1851         mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
1852         mpt_entry->lkey = cpu_to_le32(mr->key);
1853         mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
1854         mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
1855
1856         if (mr->type == MR_TYPE_DMA)
1857                 return 0;
1858
1859         ret = set_mtpt_pbl(mpt_entry, mr);
1860
1861         return ret;
1862 }
1863
1864 static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
1865                                         struct hns_roce_mr *mr, int flags,
1866                                         u32 pdn, int mr_access_flags, u64 iova,
1867                                         u64 size, void *mb_buf)
1868 {
1869         struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
1870         int ret = 0;
1871
1872         if (flags & IB_MR_REREG_PD) {
1873                 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
1874                                V2_MPT_BYTE_4_PD_S, pdn);
1875                 mr->pd = pdn;
1876         }
1877
1878         if (flags & IB_MR_REREG_ACCESS) {
1879                 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
1880                              V2_MPT_BYTE_8_BIND_EN_S,
1881                              (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
1882                 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
1883                              V2_MPT_BYTE_8_ATOMIC_EN_S,
1884                              mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
1885                 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
1886                              mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
1887                 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
1888                              mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
1889                 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
1890                              mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
1891         }
1892
1893         if (flags & IB_MR_REREG_TRANS) {
1894                 mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova));
1895                 mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova));
1896                 mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
1897                 mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
1898
1899                 mr->iova = iova;
1900                 mr->size = size;
1901
1902                 ret = set_mtpt_pbl(mpt_entry, mr);
1903         }
1904
1905         return ret;
1906 }
1907
1908 static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
1909 {
1910         struct hns_roce_v2_mpt_entry *mpt_entry;
1911
1912         mpt_entry = mb_buf;
1913         memset(mpt_entry, 0, sizeof(*mpt_entry));
1914
1915         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
1916                        V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
1917         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
1918                        V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1);
1919         roce_set_field(mpt_entry->byte_4_pd_hop_st,
1920                        V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
1921                        V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
1922                        mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
1923         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
1924                        V2_MPT_BYTE_4_PD_S, mr->pd);
1925
1926         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1);
1927         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
1928         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
1929
1930         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1);
1931         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
1932         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
1933         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
1934
1935         mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
1936
1937         mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
1938         roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
1939                        V2_MPT_BYTE_48_PBL_BA_H_S,
1940                        upper_32_bits(mr->pbl_ba >> 3));
1941
1942         roce_set_field(mpt_entry->byte_64_buf_pa1,
1943                        V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
1944                        V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
1945                        mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
1946
1947         return 0;
1948 }
1949
1950 static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
1951 {
1952         struct hns_roce_v2_mpt_entry *mpt_entry;
1953
1954         mpt_entry = mb_buf;
1955         memset(mpt_entry, 0, sizeof(*mpt_entry));
1956
1957         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
1958                        V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
1959         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
1960                        V2_MPT_BYTE_4_PD_S, mw->pdn);
1961         roce_set_field(mpt_entry->byte_4_pd_hop_st,
1962                        V2_MPT_BYTE_4_PBL_HOP_NUM_M,
1963                        V2_MPT_BYTE_4_PBL_HOP_NUM_S,
1964                        mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ?
1965                        0 : mw->pbl_hop_num);
1966         roce_set_field(mpt_entry->byte_4_pd_hop_st,
1967                        V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
1968                        V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
1969                        mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
1970
1971         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
1972         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
1973
1974         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
1975         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
1976         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
1977         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S,
1978                      mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
1979
1980         roce_set_field(mpt_entry->byte_64_buf_pa1,
1981                        V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
1982                        V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
1983                        mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
1984
1985         mpt_entry->lkey = cpu_to_le32(mw->rkey);
1986
1987         return 0;
1988 }
1989
1990 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
1991 {
1992         return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
1993                                    n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
1994 }
1995
1996 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
1997 {
1998         struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
1999
2000         /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
2001         return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
2002                 !!(n & (hr_cq->ib_cq.cqe + 1))) ? cqe : NULL;
2003 }
2004
2005 static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
2006 {
2007         return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
2008 }
2009
2010 static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
2011 {
2012         *hr_cq->set_ci_db = cons_index & 0xffffff;
2013 }
2014
2015 static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2016                                    struct hns_roce_srq *srq)
2017 {
2018         struct hns_roce_v2_cqe *cqe, *dest;
2019         u32 prod_index;
2020         int nfreed = 0;
2021         u8 owner_bit;
2022
2023         for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
2024              ++prod_index) {
2025                 if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
2026                         break;
2027         }
2028
2029         /*
2030          * Now backwards through the CQ, removing CQ entries
2031          * that match our QP by overwriting them with next entries.
2032          */
2033         while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
2034                 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
2035                 if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2036                                     V2_CQE_BYTE_16_LCL_QPN_S) &
2037                                     HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
2038                         /* In v1 engine, not support SRQ */
2039                         ++nfreed;
2040                 } else if (nfreed) {
2041                         dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
2042                                           hr_cq->ib_cq.cqe);
2043                         owner_bit = roce_get_bit(dest->byte_4,
2044                                                  V2_CQE_BYTE_4_OWNER_S);
2045                         memcpy(dest, cqe, sizeof(*cqe));
2046                         roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
2047                                      owner_bit);
2048                 }
2049         }
2050
2051         if (nfreed) {
2052                 hr_cq->cons_index += nfreed;
2053                 /*
2054                  * Make sure update of buffer contents is done before
2055                  * updating consumer index.
2056                  */
2057                 wmb();
2058                 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
2059         }
2060 }
2061
2062 static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2063                                  struct hns_roce_srq *srq)
2064 {
2065         spin_lock_irq(&hr_cq->lock);
2066         __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
2067         spin_unlock_irq(&hr_cq->lock);
2068 }
2069
2070 static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
2071                                   struct hns_roce_cq *hr_cq, void *mb_buf,
2072                                   u64 *mtts, dma_addr_t dma_handle, int nent,
2073                                   u32 vector)
2074 {
2075         struct hns_roce_v2_cq_context *cq_context;
2076
2077         cq_context = mb_buf;
2078         memset(cq_context, 0, sizeof(*cq_context));
2079
2080         roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
2081                        V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
2082         roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
2083                        V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
2084         roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
2085                        V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
2086         roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
2087                        V2_CQC_BYTE_4_CEQN_S, vector);
2088         cq_context->byte_4_pg_ceqn = cpu_to_le32(cq_context->byte_4_pg_ceqn);
2089
2090         roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
2091                        V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
2092
2093         cq_context->cqe_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
2094         cq_context->cqe_cur_blk_addr =
2095                                 cpu_to_le32(cq_context->cqe_cur_blk_addr);
2096
2097         roce_set_field(cq_context->byte_16_hop_addr,
2098                        V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
2099                        V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
2100                        cpu_to_le32((mtts[0]) >> (32 + PAGE_ADDR_SHIFT)));
2101         roce_set_field(cq_context->byte_16_hop_addr,
2102                        V2_CQC_BYTE_16_CQE_HOP_NUM_M,
2103                        V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
2104                        HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
2105
2106         cq_context->cqe_nxt_blk_addr = (u32)(mtts[1] >> PAGE_ADDR_SHIFT);
2107         roce_set_field(cq_context->byte_24_pgsz_addr,
2108                        V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
2109                        V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
2110                        cpu_to_le32((mtts[1]) >> (32 + PAGE_ADDR_SHIFT)));
2111         roce_set_field(cq_context->byte_24_pgsz_addr,
2112                        V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
2113                        V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
2114                        hr_dev->caps.cqe_ba_pg_sz + PG_SHIFT_OFFSET);
2115         roce_set_field(cq_context->byte_24_pgsz_addr,
2116                        V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
2117                        V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
2118                        hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET);
2119
2120         cq_context->cqe_ba = (u32)(dma_handle >> 3);
2121
2122         roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
2123                        V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
2124
2125         if (hr_cq->db_en)
2126                 roce_set_bit(cq_context->byte_44_db_record,
2127                              V2_CQC_BYTE_44_DB_RECORD_EN_S, 1);
2128
2129         roce_set_field(cq_context->byte_44_db_record,
2130                        V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
2131                        V2_CQC_BYTE_44_DB_RECORD_ADDR_S,
2132                        ((u32)hr_cq->db.dma) >> 1);
2133         cq_context->db_record_addr = hr_cq->db.dma >> 32;
2134
2135         roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
2136                        V2_CQC_BYTE_56_CQ_MAX_CNT_M,
2137                        V2_CQC_BYTE_56_CQ_MAX_CNT_S,
2138                        HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
2139         roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
2140                        V2_CQC_BYTE_56_CQ_PERIOD_M,
2141                        V2_CQC_BYTE_56_CQ_PERIOD_S,
2142                        HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
2143 }
2144
2145 static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
2146                                      enum ib_cq_notify_flags flags)
2147 {
2148         struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2149         u32 notification_flag;
2150         u32 doorbell[2];
2151
2152         doorbell[0] = 0;
2153         doorbell[1] = 0;
2154
2155         notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
2156                              V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
2157         /*
2158          * flags = 0; Notification Flag = 1, next
2159          * flags = 1; Notification Flag = 0, solocited
2160          */
2161         roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
2162                        hr_cq->cqn);
2163         roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
2164                        HNS_ROCE_V2_CQ_DB_NTR);
2165         roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
2166                        V2_CQ_DB_PARAMETER_CONS_IDX_S,
2167                        hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
2168         roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
2169                        V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
2170         roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
2171                      notification_flag);
2172
2173         hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
2174
2175         return 0;
2176 }
2177
2178 static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
2179                                                     struct hns_roce_qp **cur_qp,
2180                                                     struct ib_wc *wc)
2181 {
2182         struct hns_roce_rinl_sge *sge_list;
2183         u32 wr_num, wr_cnt, sge_num;
2184         u32 sge_cnt, data_len, size;
2185         void *wqe_buf;
2186
2187         wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
2188                                 V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
2189         wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
2190
2191         sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
2192         sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
2193         wqe_buf = get_recv_wqe(*cur_qp, wr_cnt);
2194         data_len = wc->byte_len;
2195
2196         for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
2197                 size = min(sge_list[sge_cnt].len, data_len);
2198                 memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
2199
2200                 data_len -= size;
2201                 wqe_buf += size;
2202         }
2203
2204         if (data_len) {
2205                 wc->status = IB_WC_LOC_LEN_ERR;
2206                 return -EAGAIN;
2207         }
2208
2209         return 0;
2210 }
2211
2212 static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
2213                                 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
2214 {
2215         struct hns_roce_dev *hr_dev;
2216         struct hns_roce_v2_cqe *cqe;
2217         struct hns_roce_qp *hr_qp;
2218         struct hns_roce_wq *wq;
2219         struct ib_qp_attr attr;
2220         int attr_mask;
2221         int is_send;
2222         u16 wqe_ctr;
2223         u32 opcode;
2224         u32 status;
2225         int qpn;
2226         int ret;
2227
2228         /* Find cqe according to consumer index */
2229         cqe = next_cqe_sw_v2(hr_cq);
2230         if (!cqe)
2231                 return -EAGAIN;
2232
2233         ++hr_cq->cons_index;
2234         /* Memory barrier */
2235         rmb();
2236
2237         /* 0->SQ, 1->RQ */
2238         is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
2239
2240         qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2241                                 V2_CQE_BYTE_16_LCL_QPN_S);
2242
2243         if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
2244                 hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2245                 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
2246                 if (unlikely(!hr_qp)) {
2247                         dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n",
2248                                 hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK));
2249                         return -EINVAL;
2250                 }
2251                 *cur_qp = hr_qp;
2252         }
2253
2254         wc->qp = &(*cur_qp)->ibqp;
2255         wc->vendor_err = 0;
2256
2257         status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
2258                                 V2_CQE_BYTE_4_STATUS_S);
2259         switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
2260         case HNS_ROCE_CQE_V2_SUCCESS:
2261                 wc->status = IB_WC_SUCCESS;
2262                 break;
2263         case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR:
2264                 wc->status = IB_WC_LOC_LEN_ERR;
2265                 break;
2266         case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR:
2267                 wc->status = IB_WC_LOC_QP_OP_ERR;
2268                 break;
2269         case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR:
2270                 wc->status = IB_WC_LOC_PROT_ERR;
2271                 break;
2272         case HNS_ROCE_CQE_V2_WR_FLUSH_ERR:
2273                 wc->status = IB_WC_WR_FLUSH_ERR;
2274                 break;
2275         case HNS_ROCE_CQE_V2_MW_BIND_ERR:
2276                 wc->status = IB_WC_MW_BIND_ERR;
2277                 break;
2278         case HNS_ROCE_CQE_V2_BAD_RESP_ERR:
2279                 wc->status = IB_WC_BAD_RESP_ERR;
2280                 break;
2281         case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR:
2282                 wc->status = IB_WC_LOC_ACCESS_ERR;
2283                 break;
2284         case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR:
2285                 wc->status = IB_WC_REM_INV_REQ_ERR;
2286                 break;
2287         case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR:
2288                 wc->status = IB_WC_REM_ACCESS_ERR;
2289                 break;
2290         case HNS_ROCE_CQE_V2_REMOTE_OP_ERR:
2291                 wc->status = IB_WC_REM_OP_ERR;
2292                 break;
2293         case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR:
2294                 wc->status = IB_WC_RETRY_EXC_ERR;
2295                 break;
2296         case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR:
2297                 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
2298                 break;
2299         case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR:
2300                 wc->status = IB_WC_REM_ABORT_ERR;
2301                 break;
2302         default:
2303                 wc->status = IB_WC_GENERAL_ERR;
2304                 break;
2305         }
2306
2307         /* flush cqe if wc status is error, excluding flush error */
2308         if ((wc->status != IB_WC_SUCCESS) &&
2309             (wc->status != IB_WC_WR_FLUSH_ERR)) {
2310                 attr_mask = IB_QP_STATE;
2311                 attr.qp_state = IB_QPS_ERR;
2312                 return hns_roce_v2_modify_qp(&(*cur_qp)->ibqp,
2313                                              &attr, attr_mask,
2314                                              (*cur_qp)->state, IB_QPS_ERR);
2315         }
2316
2317         if (wc->status == IB_WC_WR_FLUSH_ERR)
2318                 return 0;
2319
2320         if (is_send) {
2321                 wc->wc_flags = 0;
2322                 /* SQ corresponding to CQE */
2323                 switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
2324                                        V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
2325                 case HNS_ROCE_SQ_OPCODE_SEND:
2326                         wc->opcode = IB_WC_SEND;
2327                         break;
2328                 case HNS_ROCE_SQ_OPCODE_SEND_WITH_INV:
2329                         wc->opcode = IB_WC_SEND;
2330                         break;
2331                 case HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM:
2332                         wc->opcode = IB_WC_SEND;
2333                         wc->wc_flags |= IB_WC_WITH_IMM;
2334                         break;
2335                 case HNS_ROCE_SQ_OPCODE_RDMA_READ:
2336                         wc->opcode = IB_WC_RDMA_READ;
2337                         wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2338                         break;
2339                 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE:
2340                         wc->opcode = IB_WC_RDMA_WRITE;
2341                         break;
2342                 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM:
2343                         wc->opcode = IB_WC_RDMA_WRITE;
2344                         wc->wc_flags |= IB_WC_WITH_IMM;
2345                         break;
2346                 case HNS_ROCE_SQ_OPCODE_LOCAL_INV:
2347                         wc->opcode = IB_WC_LOCAL_INV;
2348                         wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2349                         break;
2350                 case HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP:
2351                         wc->opcode = IB_WC_COMP_SWAP;
2352                         wc->byte_len  = 8;
2353                         break;
2354                 case HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD:
2355                         wc->opcode = IB_WC_FETCH_ADD;
2356                         wc->byte_len  = 8;
2357                         break;
2358                 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP:
2359                         wc->opcode = IB_WC_MASKED_COMP_SWAP;
2360                         wc->byte_len  = 8;
2361                         break;
2362                 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD:
2363                         wc->opcode = IB_WC_MASKED_FETCH_ADD;
2364                         wc->byte_len  = 8;
2365                         break;
2366                 case HNS_ROCE_SQ_OPCODE_FAST_REG_WR:
2367                         wc->opcode = IB_WC_REG_MR;
2368                         break;
2369                 case HNS_ROCE_SQ_OPCODE_BIND_MW:
2370                         wc->opcode = IB_WC_REG_MR;
2371                         break;
2372                 default:
2373                         wc->status = IB_WC_GENERAL_ERR;
2374                         break;
2375                 }
2376
2377                 wq = &(*cur_qp)->sq;
2378                 if ((*cur_qp)->sq_signal_bits) {
2379                         /*
2380                          * If sg_signal_bit is 1,
2381                          * firstly tail pointer updated to wqe
2382                          * which current cqe correspond to
2383                          */
2384                         wqe_ctr = (u16)roce_get_field(cqe->byte_4,
2385                                                       V2_CQE_BYTE_4_WQE_INDX_M,
2386                                                       V2_CQE_BYTE_4_WQE_INDX_S);
2387                         wq->tail += (wqe_ctr - (u16)wq->tail) &
2388                                     (wq->wqe_cnt - 1);
2389                 }
2390
2391                 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2392                 ++wq->tail;
2393         } else {
2394                 /* RQ correspond to CQE */
2395                 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2396
2397                 opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
2398                                         V2_CQE_BYTE_4_OPCODE_S);
2399                 switch (opcode & 0x1f) {
2400                 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
2401                         wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2402                         wc->wc_flags = IB_WC_WITH_IMM;
2403                         wc->ex.imm_data =
2404                                 cpu_to_be32(le32_to_cpu(cqe->immtdata));
2405                         break;
2406                 case HNS_ROCE_V2_OPCODE_SEND:
2407                         wc->opcode = IB_WC_RECV;
2408                         wc->wc_flags = 0;
2409                         break;
2410                 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
2411                         wc->opcode = IB_WC_RECV;
2412                         wc->wc_flags = IB_WC_WITH_IMM;
2413                         wc->ex.imm_data =
2414                                 cpu_to_be32(le32_to_cpu(cqe->immtdata));
2415                         break;
2416                 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
2417                         wc->opcode = IB_WC_RECV;
2418                         wc->wc_flags = IB_WC_WITH_INVALIDATE;
2419                         wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
2420                         break;
2421                 default:
2422                         wc->status = IB_WC_GENERAL_ERR;
2423                         break;
2424                 }
2425
2426                 if ((wc->qp->qp_type == IB_QPT_RC ||
2427                      wc->qp->qp_type == IB_QPT_UC) &&
2428                     (opcode == HNS_ROCE_V2_OPCODE_SEND ||
2429                     opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
2430                     opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
2431                     (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
2432                         ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
2433                         if (ret)
2434                                 return -EAGAIN;
2435                 }
2436
2437                 /* Update tail pointer, record wr_id */
2438                 wq = &(*cur_qp)->rq;
2439                 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2440                 ++wq->tail;
2441
2442                 wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
2443                                             V2_CQE_BYTE_32_SL_S);
2444                 wc->src_qp = (u8)roce_get_field(cqe->byte_32,
2445                                                 V2_CQE_BYTE_32_RMT_QPN_M,
2446                                                 V2_CQE_BYTE_32_RMT_QPN_S);
2447                 wc->slid = 0;
2448                 wc->wc_flags |= (roce_get_bit(cqe->byte_32,
2449                                               V2_CQE_BYTE_32_GRH_S) ?
2450                                               IB_WC_GRH : 0);
2451                 wc->port_num = roce_get_field(cqe->byte_32,
2452                                 V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
2453                 wc->pkey_index = 0;
2454                 memcpy(wc->smac, cqe->smac, 4);
2455                 wc->smac[4] = roce_get_field(cqe->byte_28,
2456                                              V2_CQE_BYTE_28_SMAC_4_M,
2457                                              V2_CQE_BYTE_28_SMAC_4_S);
2458                 wc->smac[5] = roce_get_field(cqe->byte_28,
2459                                              V2_CQE_BYTE_28_SMAC_5_M,
2460                                              V2_CQE_BYTE_28_SMAC_5_S);
2461                 if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
2462                         wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
2463                                                           V2_CQE_BYTE_28_VID_M,
2464                                                           V2_CQE_BYTE_28_VID_S);
2465                 } else {
2466                         wc->vlan_id = 0xffff;
2467                 }
2468
2469                 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
2470                 wc->network_hdr_type = roce_get_field(cqe->byte_28,
2471                                                     V2_CQE_BYTE_28_PORT_TYPE_M,
2472                                                     V2_CQE_BYTE_28_PORT_TYPE_S);
2473         }
2474
2475         return 0;
2476 }
2477
2478 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
2479                                struct ib_wc *wc)
2480 {
2481         struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2482         struct hns_roce_qp *cur_qp = NULL;
2483         unsigned long flags;
2484         int npolled;
2485
2486         spin_lock_irqsave(&hr_cq->lock, flags);
2487
2488         for (npolled = 0; npolled < num_entries; ++npolled) {
2489                 if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
2490                         break;
2491         }
2492
2493         if (npolled) {
2494                 /* Memory barrier */
2495                 wmb();
2496                 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
2497         }
2498
2499         spin_unlock_irqrestore(&hr_cq->lock, flags);
2500
2501         return npolled;
2502 }
2503
2504 static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
2505                                struct hns_roce_hem_table *table, int obj,
2506                                int step_idx)
2507 {
2508         struct device *dev = hr_dev->dev;
2509         struct hns_roce_cmd_mailbox *mailbox;
2510         struct hns_roce_hem_iter iter;
2511         struct hns_roce_hem_mhop mhop;
2512         struct hns_roce_hem *hem;
2513         unsigned long mhop_obj = obj;
2514         int i, j, k;
2515         int ret = 0;
2516         u64 hem_idx = 0;
2517         u64 l1_idx = 0;
2518         u64 bt_ba = 0;
2519         u32 chunk_ba_num;
2520         u32 hop_num;
2521         u16 op = 0xff;
2522
2523         if (!hns_roce_check_whether_mhop(hr_dev, table->type))
2524                 return 0;
2525
2526         hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
2527         i = mhop.l0_idx;
2528         j = mhop.l1_idx;
2529         k = mhop.l2_idx;
2530         hop_num = mhop.hop_num;
2531         chunk_ba_num = mhop.bt_chunk_size / 8;
2532
2533         if (hop_num == 2) {
2534                 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
2535                           k;
2536                 l1_idx = i * chunk_ba_num + j;
2537         } else if (hop_num == 1) {
2538                 hem_idx = i * chunk_ba_num + j;
2539         } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
2540                 hem_idx = i;
2541         }
2542
2543         switch (table->type) {
2544         case HEM_TYPE_QPC:
2545                 op = HNS_ROCE_CMD_WRITE_QPC_BT0;
2546                 break;
2547         case HEM_TYPE_MTPT:
2548                 op = HNS_ROCE_CMD_WRITE_MPT_BT0;
2549                 break;
2550         case HEM_TYPE_CQC:
2551                 op = HNS_ROCE_CMD_WRITE_CQC_BT0;
2552                 break;
2553         case HEM_TYPE_SRQC:
2554                 op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
2555                 break;
2556         default:
2557                 dev_warn(dev, "Table %d not to be written by mailbox!\n",
2558                          table->type);
2559                 return 0;
2560         }
2561         op += step_idx;
2562
2563         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2564         if (IS_ERR(mailbox))
2565                 return PTR_ERR(mailbox);
2566
2567         if (check_whether_last_step(hop_num, step_idx)) {
2568                 hem = table->hem[hem_idx];
2569                 for (hns_roce_hem_first(hem, &iter);
2570                      !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
2571                         bt_ba = hns_roce_hem_addr(&iter);
2572
2573                         /* configure the ba, tag, and op */
2574                         ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma,
2575                                                 obj, 0, op,
2576                                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
2577                 }
2578         } else {
2579                 if (step_idx == 0)
2580                         bt_ba = table->bt_l0_dma_addr[i];
2581                 else if (step_idx == 1 && hop_num == 2)
2582                         bt_ba = table->bt_l1_dma_addr[l1_idx];
2583
2584                 /* configure the ba, tag, and op */
2585                 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
2586                                         0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
2587         }
2588
2589         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2590         return ret;
2591 }
2592
2593 static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
2594                                  struct hns_roce_hem_table *table, int obj,
2595                                  int step_idx)
2596 {
2597         struct device *dev = hr_dev->dev;
2598         struct hns_roce_cmd_mailbox *mailbox;
2599         int ret = 0;
2600         u16 op = 0xff;
2601
2602         if (!hns_roce_check_whether_mhop(hr_dev, table->type))
2603                 return 0;
2604
2605         switch (table->type) {
2606         case HEM_TYPE_QPC:
2607                 op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
2608                 break;
2609         case HEM_TYPE_MTPT:
2610                 op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
2611                 break;
2612         case HEM_TYPE_CQC:
2613                 op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
2614                 break;
2615         case HEM_TYPE_SRQC:
2616                 op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
2617                 break;
2618         default:
2619                 dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
2620                          table->type);
2621                 return 0;
2622         }
2623         op += step_idx;
2624
2625         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2626         if (IS_ERR(mailbox))
2627                 return PTR_ERR(mailbox);
2628
2629         /* configure the tag and op */
2630         ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
2631                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
2632
2633         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2634         return ret;
2635 }
2636
2637 static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
2638                                  struct hns_roce_mtt *mtt,
2639                                  enum ib_qp_state cur_state,
2640                                  enum ib_qp_state new_state,
2641                                  struct hns_roce_v2_qp_context *context,
2642                                  struct hns_roce_qp *hr_qp)
2643 {
2644         struct hns_roce_cmd_mailbox *mailbox;
2645         int ret;
2646
2647         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2648         if (IS_ERR(mailbox))
2649                 return PTR_ERR(mailbox);
2650
2651         memcpy(mailbox->buf, context, sizeof(*context) * 2);
2652
2653         ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
2654                                 HNS_ROCE_CMD_MODIFY_QPC,
2655                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
2656
2657         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2658
2659         return ret;
2660 }
2661
2662 static void set_access_flags(struct hns_roce_qp *hr_qp,
2663                              struct hns_roce_v2_qp_context *context,
2664                              struct hns_roce_v2_qp_context *qpc_mask,
2665                              const struct ib_qp_attr *attr, int attr_mask)
2666 {
2667         u8 dest_rd_atomic;
2668         u32 access_flags;
2669
2670         dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
2671                          attr->max_dest_rd_atomic : hr_qp->resp_depth;
2672
2673         access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
2674                        attr->qp_access_flags : hr_qp->atomic_rd_en;
2675
2676         if (!dest_rd_atomic)
2677                 access_flags &= IB_ACCESS_REMOTE_WRITE;
2678
2679         roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2680                      !!(access_flags & IB_ACCESS_REMOTE_READ));
2681         roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
2682
2683         roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2684                      !!(access_flags & IB_ACCESS_REMOTE_WRITE));
2685         roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
2686
2687         roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2688                      !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
2689         roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
2690 }
2691
2692 static void modify_qp_reset_to_init(struct ib_qp *ibqp,
2693                                     const struct ib_qp_attr *attr,
2694                                     int attr_mask,
2695                                     struct hns_roce_v2_qp_context *context,
2696                                     struct hns_roce_v2_qp_context *qpc_mask)
2697 {
2698         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2699         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2700
2701         /*
2702          * In v2 engine, software pass context and context mask to hardware
2703          * when modifying qp. If software need modify some fields in context,
2704          * we should set all bits of the relevant fields in context mask to
2705          * 0 at the same time, else set them to 0x1.
2706          */
2707         roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
2708                        V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
2709         roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
2710                        V2_QPC_BYTE_4_TST_S, 0);
2711
2712         if (ibqp->qp_type == IB_QPT_GSI)
2713                 roce_set_field(context->byte_4_sqpn_tst,
2714                                V2_QPC_BYTE_4_SGE_SHIFT_M,
2715                                V2_QPC_BYTE_4_SGE_SHIFT_S,
2716                                ilog2((unsigned int)hr_qp->sge.sge_cnt));
2717         else
2718                 roce_set_field(context->byte_4_sqpn_tst,
2719                                V2_QPC_BYTE_4_SGE_SHIFT_M,
2720                                V2_QPC_BYTE_4_SGE_SHIFT_S,
2721                                hr_qp->sq.max_gs > 2 ?
2722                                ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
2723
2724         roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
2725                        V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
2726
2727         roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
2728                        V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
2729         roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
2730                        V2_QPC_BYTE_4_SQPN_S, 0);
2731
2732         roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
2733                        V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
2734         roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
2735                        V2_QPC_BYTE_16_PD_S, 0);
2736
2737         roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
2738                        V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
2739         roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
2740                        V2_QPC_BYTE_20_RQWS_S, 0);
2741
2742         roce_set_field(context->byte_20_smac_sgid_idx,
2743                        V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
2744                        ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2745         roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2746                        V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
2747
2748         roce_set_field(context->byte_20_smac_sgid_idx,
2749                        V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
2750                        ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2751         roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2752                        V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
2753
2754         /* No VLAN need to set 0xFFF */
2755         roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
2756                        V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
2757         roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
2758                        V2_QPC_BYTE_24_VLAN_ID_S, 0);
2759
2760         /*
2761          * Set some fields in context to zero, Because the default values
2762          * of all fields in context are zero, we need not set them to 0 again.
2763          * but we should set the relevant fields of context mask to 0.
2764          */
2765         roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_TX_ERR_S, 0);
2766         roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_RX_ERR_S, 0);
2767         roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
2768         roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
2769
2770         roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_TEMPID_M,
2771                        V2_QPC_BYTE_60_TEMPID_S, 0);
2772
2773         roce_set_field(qpc_mask->byte_60_qpst_tempid,
2774                        V2_QPC_BYTE_60_SCC_TOKEN_M, V2_QPC_BYTE_60_SCC_TOKEN_S,
2775                        0);
2776         roce_set_bit(qpc_mask->byte_60_qpst_tempid,
2777                      V2_QPC_BYTE_60_SQ_DB_DOING_S, 0);
2778         roce_set_bit(qpc_mask->byte_60_qpst_tempid,
2779                      V2_QPC_BYTE_60_RQ_DB_DOING_S, 0);
2780         roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
2781         roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
2782
2783         if (attr_mask & IB_QP_QKEY) {
2784                 context->qkey_xrcd = attr->qkey;
2785                 qpc_mask->qkey_xrcd = 0;
2786                 hr_qp->qkey = attr->qkey;
2787         }
2788
2789         if (hr_qp->rdb_en) {
2790                 roce_set_bit(context->byte_68_rq_db,
2791                              V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
2792                 roce_set_bit(qpc_mask->byte_68_rq_db,
2793                              V2_QPC_BYTE_68_RQ_RECORD_EN_S, 0);
2794         }
2795
2796         roce_set_field(context->byte_68_rq_db,
2797                        V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
2798                        V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
2799                        ((u32)hr_qp->rdb.dma) >> 1);
2800         roce_set_field(qpc_mask->byte_68_rq_db,
2801                        V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
2802                        V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0);
2803         context->rq_db_record_addr = hr_qp->rdb.dma >> 32;
2804         qpc_mask->rq_db_record_addr = 0;
2805
2806         roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
2807                     (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
2808         roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
2809
2810         roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
2811                        V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
2812         roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
2813                        V2_QPC_BYTE_80_RX_CQN_S, 0);
2814         if (ibqp->srq) {
2815                 roce_set_field(context->byte_76_srqn_op_en,
2816                                V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
2817                                to_hr_srq(ibqp->srq)->srqn);
2818                 roce_set_field(qpc_mask->byte_76_srqn_op_en,
2819                                V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
2820                 roce_set_bit(context->byte_76_srqn_op_en,
2821                              V2_QPC_BYTE_76_SRQ_EN_S, 1);
2822                 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
2823                              V2_QPC_BYTE_76_SRQ_EN_S, 0);
2824         }
2825
2826         roce_set_field(qpc_mask->byte_84_rq_ci_pi,
2827                        V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
2828                        V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
2829         roce_set_field(qpc_mask->byte_84_rq_ci_pi,
2830                        V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
2831                        V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
2832
2833         roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_SRQ_INFO_M,
2834                        V2_QPC_BYTE_92_SRQ_INFO_S, 0);
2835
2836         roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
2837                        V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
2838
2839         roce_set_field(qpc_mask->byte_104_rq_sge,
2840                        V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M,
2841                        V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S, 0);
2842
2843         roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
2844                      V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
2845         roce_set_field(qpc_mask->byte_108_rx_reqepsn,
2846                        V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
2847                        V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
2848         roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
2849                      V2_QPC_BYTE_108_RX_REQ_RNR_S, 0);
2850
2851         qpc_mask->rq_rnr_timer = 0;
2852         qpc_mask->rx_msg_len = 0;
2853         qpc_mask->rx_rkey_pkt_info = 0;
2854         qpc_mask->rx_va = 0;
2855
2856         roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
2857                        V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
2858         roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
2859                        V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
2860
2861         roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S,
2862                      0);
2863         roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
2864                        V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
2865         roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
2866                        V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S, 0);
2867
2868         roce_set_field(qpc_mask->byte_144_raq,
2869                        V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
2870                        V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
2871         roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
2872                        V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
2873         roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
2874
2875         roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RQ_MSN_M,
2876                        V2_QPC_BYTE_148_RQ_MSN_S, 0);
2877         roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RAQ_SYNDROME_M,
2878                        V2_QPC_BYTE_148_RAQ_SYNDROME_S, 0);
2879
2880         roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
2881                        V2_QPC_BYTE_152_RAQ_PSN_S, 0);
2882         roce_set_field(qpc_mask->byte_152_raq,
2883                        V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M,
2884                        V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S, 0);
2885
2886         roce_set_field(qpc_mask->byte_156_raq, V2_QPC_BYTE_156_RAQ_USE_PKTN_M,
2887                        V2_QPC_BYTE_156_RAQ_USE_PKTN_S, 0);
2888
2889         roce_set_field(qpc_mask->byte_160_sq_ci_pi,
2890                        V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
2891                        V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
2892         roce_set_field(qpc_mask->byte_160_sq_ci_pi,
2893                        V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
2894                        V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
2895
2896         roce_set_bit(qpc_mask->byte_168_irrl_idx,
2897                      V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S, 0);
2898         roce_set_bit(qpc_mask->byte_168_irrl_idx,
2899                      V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S, 0);
2900         roce_set_bit(qpc_mask->byte_168_irrl_idx,
2901                      V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S, 0);
2902         roce_set_bit(qpc_mask->byte_168_irrl_idx,
2903                      V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
2904         roce_set_bit(qpc_mask->byte_168_irrl_idx,
2905                      V2_QPC_BYTE_168_SQ_INVLD_FLG_S, 0);
2906         roce_set_field(qpc_mask->byte_168_irrl_idx,
2907                        V2_QPC_BYTE_168_IRRL_IDX_LSB_M,
2908                        V2_QPC_BYTE_168_IRRL_IDX_LSB_S, 0);
2909
2910         roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
2911                        V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
2912         roce_set_field(qpc_mask->byte_172_sq_psn,
2913                        V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
2914                        V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
2915
2916         roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
2917                      0);
2918
2919         roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
2920         roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 0);
2921
2922         roce_set_field(qpc_mask->byte_176_msg_pktn,
2923                        V2_QPC_BYTE_176_MSG_USE_PKTN_M,
2924                        V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
2925         roce_set_field(qpc_mask->byte_176_msg_pktn,
2926                        V2_QPC_BYTE_176_IRRL_HEAD_PRE_M,
2927                        V2_QPC_BYTE_176_IRRL_HEAD_PRE_S, 0);
2928
2929         roce_set_field(qpc_mask->byte_184_irrl_idx,
2930                        V2_QPC_BYTE_184_IRRL_IDX_MSB_M,
2931                        V2_QPC_BYTE_184_IRRL_IDX_MSB_S, 0);
2932
2933         qpc_mask->cur_sge_offset = 0;
2934
2935         roce_set_field(qpc_mask->byte_192_ext_sge,
2936                        V2_QPC_BYTE_192_CUR_SGE_IDX_M,
2937                        V2_QPC_BYTE_192_CUR_SGE_IDX_S, 0);
2938         roce_set_field(qpc_mask->byte_192_ext_sge,
2939                        V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M,
2940                        V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S, 0);
2941
2942         roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
2943                        V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
2944
2945         roce_set_field(qpc_mask->byte_200_sq_max, V2_QPC_BYTE_200_SQ_MAX_IDX_M,
2946                        V2_QPC_BYTE_200_SQ_MAX_IDX_S, 0);
2947         roce_set_field(qpc_mask->byte_200_sq_max,
2948                        V2_QPC_BYTE_200_LCL_OPERATED_CNT_M,
2949                        V2_QPC_BYTE_200_LCL_OPERATED_CNT_S, 0);
2950
2951         roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RNR_FLG_S, 0);
2952         roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RTY_FLG_S, 0);
2953
2954         roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
2955                        V2_QPC_BYTE_212_CHECK_FLG_S, 0);
2956
2957         qpc_mask->sq_timer = 0;
2958
2959         roce_set_field(qpc_mask->byte_220_retry_psn_msn,
2960                        V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
2961                        V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
2962         roce_set_field(qpc_mask->byte_232_irrl_sge,
2963                        V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
2964                        V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
2965
2966         roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_SO_LP_VLD_S,
2967                      0);
2968         roce_set_bit(qpc_mask->byte_232_irrl_sge,
2969                      V2_QPC_BYTE_232_FENCE_LP_VLD_S, 0);
2970         roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_IRRL_LP_VLD_S,
2971                      0);
2972
2973         qpc_mask->irrl_cur_sge_offset = 0;
2974
2975         roce_set_field(qpc_mask->byte_240_irrl_tail,
2976                        V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
2977                        V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
2978         roce_set_field(qpc_mask->byte_240_irrl_tail,
2979                        V2_QPC_BYTE_240_IRRL_TAIL_RD_M,
2980                        V2_QPC_BYTE_240_IRRL_TAIL_RD_S, 0);
2981         roce_set_field(qpc_mask->byte_240_irrl_tail,
2982                        V2_QPC_BYTE_240_RX_ACK_MSN_M,
2983                        V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
2984
2985         roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M,
2986                        V2_QPC_BYTE_248_IRRL_PSN_S, 0);
2987         roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S,
2988                      0);
2989         roce_set_field(qpc_mask->byte_248_ack_psn,
2990                        V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
2991                        V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
2992         roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_VLD_S,
2993                      0);
2994         roce_set_bit(qpc_mask->byte_248_ack_psn,
2995                      V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
2996         roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_CQ_ERR_IND_S,
2997                      0);
2998
2999         hr_qp->access_flags = attr->qp_access_flags;
3000         hr_qp->pkey_index = attr->pkey_index;
3001         roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3002                        V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3003         roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3004                        V2_QPC_BYTE_252_TX_CQN_S, 0);
3005
3006         roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_ERR_TYPE_M,
3007                        V2_QPC_BYTE_252_ERR_TYPE_S, 0);
3008
3009         roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
3010                        V2_QPC_BYTE_256_RQ_CQE_IDX_M,
3011                        V2_QPC_BYTE_256_RQ_CQE_IDX_S, 0);
3012         roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
3013                        V2_QPC_BYTE_256_SQ_FLUSH_IDX_M,
3014                        V2_QPC_BYTE_256_SQ_FLUSH_IDX_S, 0);
3015 }
3016
3017 static void modify_qp_init_to_init(struct ib_qp *ibqp,
3018                                    const struct ib_qp_attr *attr, int attr_mask,
3019                                    struct hns_roce_v2_qp_context *context,
3020                                    struct hns_roce_v2_qp_context *qpc_mask)
3021 {
3022         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3023
3024         /*
3025          * In v2 engine, software pass context and context mask to hardware
3026          * when modifying qp. If software need modify some fields in context,
3027          * we should set all bits of the relevant fields in context mask to
3028          * 0 at the same time, else set them to 0x1.
3029          */
3030         roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3031                        V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3032         roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3033                        V2_QPC_BYTE_4_TST_S, 0);
3034
3035         if (ibqp->qp_type == IB_QPT_GSI)
3036                 roce_set_field(context->byte_4_sqpn_tst,
3037                                V2_QPC_BYTE_4_SGE_SHIFT_M,
3038                                V2_QPC_BYTE_4_SGE_SHIFT_S,
3039                                ilog2((unsigned int)hr_qp->sge.sge_cnt));
3040         else
3041                 roce_set_field(context->byte_4_sqpn_tst,
3042                                V2_QPC_BYTE_4_SGE_SHIFT_M,
3043                                V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
3044                                ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
3045
3046         roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
3047                        V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
3048
3049         if (attr_mask & IB_QP_ACCESS_FLAGS) {
3050                 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3051                              !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
3052                 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3053                              0);
3054
3055                 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3056                              !!(attr->qp_access_flags &
3057                              IB_ACCESS_REMOTE_WRITE));
3058                 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3059                              0);
3060
3061                 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3062                              !!(attr->qp_access_flags &
3063                              IB_ACCESS_REMOTE_ATOMIC));
3064                 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3065                              0);
3066         } else {
3067                 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3068                              !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
3069                 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3070                              0);
3071
3072                 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3073                              !!(hr_qp->access_flags & IB_ACCESS_REMOTE_WRITE));
3074                 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3075                              0);
3076
3077                 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3078                              !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
3079                 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3080                              0);
3081         }
3082
3083         roce_set_field(context->byte_20_smac_sgid_idx,
3084                        V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
3085                        ilog2((unsigned int)hr_qp->sq.wqe_cnt));
3086         roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3087                        V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
3088
3089         roce_set_field(context->byte_20_smac_sgid_idx,
3090                        V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
3091                        ilog2((unsigned int)hr_qp->rq.wqe_cnt));
3092         roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3093                        V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
3094
3095         roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3096                        V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3097         roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3098                        V2_QPC_BYTE_16_PD_S, 0);
3099
3100         roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3101                        V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3102         roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3103                        V2_QPC_BYTE_80_RX_CQN_S, 0);
3104
3105         roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3106                        V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3107         roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3108                        V2_QPC_BYTE_252_TX_CQN_S, 0);
3109
3110         if (ibqp->srq) {
3111                 roce_set_bit(context->byte_76_srqn_op_en,
3112                              V2_QPC_BYTE_76_SRQ_EN_S, 1);
3113                 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3114                              V2_QPC_BYTE_76_SRQ_EN_S, 0);
3115                 roce_set_field(context->byte_76_srqn_op_en,
3116                                V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3117                                to_hr_srq(ibqp->srq)->srqn);
3118                 roce_set_field(qpc_mask->byte_76_srqn_op_en,
3119                                V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
3120         }
3121
3122         if (attr_mask & IB_QP_QKEY) {
3123                 context->qkey_xrcd = attr->qkey;
3124                 qpc_mask->qkey_xrcd = 0;
3125         }
3126
3127         roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3128                        V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3129         roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3130                        V2_QPC_BYTE_4_SQPN_S, 0);
3131
3132         if (attr_mask & IB_QP_DEST_QPN) {
3133                 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3134                                V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
3135                 roce_set_field(qpc_mask->byte_56_dqpn_err,
3136                                V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3137         }
3138 }
3139
3140 static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
3141                                  const struct ib_qp_attr *attr, int attr_mask,
3142                                  struct hns_roce_v2_qp_context *context,
3143                                  struct hns_roce_v2_qp_context *qpc_mask)
3144 {
3145         const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
3146         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3147         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3148         struct device *dev = hr_dev->dev;
3149         dma_addr_t dma_handle_3;
3150         dma_addr_t dma_handle_2;
3151         dma_addr_t dma_handle;
3152         u32 page_size;
3153         u8 port_num;
3154         u64 *mtts_3;
3155         u64 *mtts_2;
3156         u64 *mtts;
3157         u8 *dmac;
3158         u8 *smac;
3159         int port;
3160
3161         /* Search qp buf's mtts */
3162         mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
3163                                    hr_qp->mtt.first_seg, &dma_handle);
3164         if (!mtts) {
3165                 dev_err(dev, "qp buf pa find failed\n");
3166                 return -EINVAL;
3167         }
3168
3169         /* Search IRRL's mtts */
3170         mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
3171                                      hr_qp->qpn, &dma_handle_2);
3172         if (!mtts_2) {
3173                 dev_err(dev, "qp irrl_table find failed\n");
3174                 return -EINVAL;
3175         }
3176
3177         /* Search TRRL's mtts */
3178         mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
3179                                      hr_qp->qpn, &dma_handle_3);
3180         if (!mtts_3) {
3181                 dev_err(dev, "qp trrl_table find failed\n");
3182                 return -EINVAL;
3183         }
3184
3185         if (attr_mask & IB_QP_ALT_PATH) {
3186                 dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
3187                 return -EINVAL;
3188         }
3189
3190         dmac = (u8 *)attr->ah_attr.roce.dmac;
3191         context->wqe_sge_ba = (u32)(dma_handle >> 3);
3192         qpc_mask->wqe_sge_ba = 0;
3193
3194         /*
3195          * In v2 engine, software pass context and context mask to hardware
3196          * when modifying qp. If software need modify some fields in context,
3197          * we should set all bits of the relevant fields in context mask to
3198          * 0 at the same time, else set them to 0x1.
3199          */
3200         roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3201                        V2_QPC_BYTE_12_WQE_SGE_BA_S, dma_handle >> (32 + 3));
3202         roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3203                        V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
3204
3205         roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3206                        V2_QPC_BYTE_12_SQ_HOP_NUM_S,
3207                        hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
3208                        0 : hr_dev->caps.mtt_hop_num);
3209         roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3210                        V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
3211
3212         roce_set_field(context->byte_20_smac_sgid_idx,
3213                        V2_QPC_BYTE_20_SGE_HOP_NUM_M,
3214                        V2_QPC_BYTE_20_SGE_HOP_NUM_S,
3215                        ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
3216                        hr_dev->caps.mtt_hop_num : 0);
3217         roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3218                        V2_QPC_BYTE_20_SGE_HOP_NUM_M,
3219                        V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
3220
3221         roce_set_field(context->byte_20_smac_sgid_idx,
3222                        V2_QPC_BYTE_20_RQ_HOP_NUM_M,
3223                        V2_QPC_BYTE_20_RQ_HOP_NUM_S,
3224                        hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
3225                        0 : hr_dev->caps.mtt_hop_num);
3226         roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3227                        V2_QPC_BYTE_20_RQ_HOP_NUM_M,
3228                        V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
3229
3230         roce_set_field(context->byte_16_buf_ba_pg_sz,
3231                        V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
3232                        V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
3233                        hr_dev->caps.mtt_ba_pg_sz + PG_SHIFT_OFFSET);
3234         roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
3235                        V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
3236                        V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
3237
3238         roce_set_field(context->byte_16_buf_ba_pg_sz,
3239                        V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
3240                        V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
3241                        hr_dev->caps.mtt_buf_pg_sz + PG_SHIFT_OFFSET);
3242         roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
3243                        V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
3244                        V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
3245
3246         roce_set_field(context->byte_80_rnr_rx_cqn,
3247                        V2_QPC_BYTE_80_MIN_RNR_TIME_M,
3248                        V2_QPC_BYTE_80_MIN_RNR_TIME_S, attr->min_rnr_timer);
3249         roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
3250                        V2_QPC_BYTE_80_MIN_RNR_TIME_M,
3251                        V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
3252
3253         page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
3254         context->rq_cur_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size]
3255                                     >> PAGE_ADDR_SHIFT);
3256         qpc_mask->rq_cur_blk_addr = 0;
3257
3258         roce_set_field(context->byte_92_srq_info,
3259                        V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
3260                        V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
3261                        mtts[hr_qp->rq.offset / page_size]
3262                        >> (32 + PAGE_ADDR_SHIFT));
3263         roce_set_field(qpc_mask->byte_92_srq_info,
3264                        V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
3265                        V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
3266
3267         context->rq_nxt_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size + 1]
3268                                     >> PAGE_ADDR_SHIFT);
3269         qpc_mask->rq_nxt_blk_addr = 0;
3270
3271         roce_set_field(context->byte_104_rq_sge,
3272                        V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
3273                        V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
3274                        mtts[hr_qp->rq.offset / page_size + 1]
3275                        >> (32 + PAGE_ADDR_SHIFT));
3276         roce_set_field(qpc_mask->byte_104_rq_sge,
3277                        V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
3278                        V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
3279
3280         roce_set_field(context->byte_108_rx_reqepsn,
3281                        V2_QPC_BYTE_108_RX_REQ_EPSN_M,
3282                        V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
3283         roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3284                        V2_QPC_BYTE_108_RX_REQ_EPSN_M,
3285                        V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
3286
3287         roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
3288                        V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
3289         roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
3290                        V2_QPC_BYTE_132_TRRL_BA_S, 0);
3291         context->trrl_ba = (u32)(dma_handle_3 >> (16 + 4));
3292         qpc_mask->trrl_ba = 0;
3293         roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
3294                        V2_QPC_BYTE_140_TRRL_BA_S,
3295                        (u32)(dma_handle_3 >> (32 + 16 + 4)));
3296         roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
3297                        V2_QPC_BYTE_140_TRRL_BA_S, 0);
3298
3299         context->irrl_ba = (u32)(dma_handle_2 >> 6);
3300         qpc_mask->irrl_ba = 0;
3301         roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
3302                        V2_QPC_BYTE_208_IRRL_BA_S,
3303                        dma_handle_2 >> (32 + 6));
3304         roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
3305                        V2_QPC_BYTE_208_IRRL_BA_S, 0);
3306
3307         roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
3308         roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
3309
3310         roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
3311                      hr_qp->sq_signal_bits);
3312         roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
3313                      0);
3314
3315         port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
3316
3317         smac = (u8 *)hr_dev->dev_addr[port];
3318         /* when dmac equals smac or loop_idc is 1, it should loopback */
3319         if (ether_addr_equal_unaligned(dmac, smac) ||
3320             hr_dev->loop_idc == 0x1) {
3321                 roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
3322                 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
3323         }
3324
3325         if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
3326              attr->max_dest_rd_atomic) {
3327                 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
3328                                V2_QPC_BYTE_140_RR_MAX_S,
3329                                fls(attr->max_dest_rd_atomic - 1));
3330                 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
3331                                V2_QPC_BYTE_140_RR_MAX_S, 0);
3332         }
3333
3334         if (attr_mask & IB_QP_DEST_QPN) {
3335                 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3336                                V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
3337                 roce_set_field(qpc_mask->byte_56_dqpn_err,
3338                                V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3339         }
3340
3341         /* Configure GID index */
3342         port_num = rdma_ah_get_port_num(&attr->ah_attr);
3343         roce_set_field(context->byte_20_smac_sgid_idx,
3344                        V2_QPC_BYTE_20_SGID_IDX_M,
3345                        V2_QPC_BYTE_20_SGID_IDX_S,
3346                        hns_get_gid_index(hr_dev, port_num - 1,
3347                                          grh->sgid_index));
3348         roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3349                        V2_QPC_BYTE_20_SGID_IDX_M,
3350                        V2_QPC_BYTE_20_SGID_IDX_S, 0);
3351         memcpy(&(context->dmac), dmac, 4);
3352         roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
3353                        V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
3354         qpc_mask->dmac = 0;
3355         roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
3356                        V2_QPC_BYTE_52_DMAC_S, 0);
3357
3358         roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
3359                        V2_QPC_BYTE_56_LP_PKTN_INI_S, 4);
3360         roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
3361                        V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
3362
3363         if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
3364                 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3365                                V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
3366         else if (attr_mask & IB_QP_PATH_MTU)
3367                 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3368                                V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
3369
3370         roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3371                        V2_QPC_BYTE_24_MTU_S, 0);
3372
3373         roce_set_field(context->byte_84_rq_ci_pi,
3374                        V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3375                        V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
3376         roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3377                        V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3378                        V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3379
3380         roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3381                        V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
3382                        V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
3383         roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3384                      V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
3385         roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
3386                        V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
3387         roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3388                        V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
3389                        V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
3390
3391         context->rq_rnr_timer = 0;
3392         qpc_mask->rq_rnr_timer = 0;
3393
3394         roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
3395                        V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
3396         roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
3397                        V2_QPC_BYTE_152_RAQ_PSN_S, 0);
3398
3399         roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
3400                        V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
3401         roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
3402                        V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
3403
3404         roce_set_field(context->byte_168_irrl_idx,
3405                        V2_QPC_BYTE_168_LP_SGEN_INI_M,
3406                        V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
3407         roce_set_field(qpc_mask->byte_168_irrl_idx,
3408                        V2_QPC_BYTE_168_LP_SGEN_INI_M,
3409                        V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
3410
3411         return 0;
3412 }
3413
3414 static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
3415                                 const struct ib_qp_attr *attr, int attr_mask,
3416                                 struct hns_roce_v2_qp_context *context,
3417                                 struct hns_roce_v2_qp_context *qpc_mask)
3418 {
3419         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3420         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3421         struct device *dev = hr_dev->dev;
3422         dma_addr_t dma_handle;
3423         u32 page_size;
3424         u64 *mtts;
3425
3426         /* Search qp buf's mtts */
3427         mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
3428                                    hr_qp->mtt.first_seg, &dma_handle);
3429         if (!mtts) {
3430                 dev_err(dev, "qp buf pa find failed\n");
3431                 return -EINVAL;
3432         }
3433
3434         /* Not support alternate path and path migration */
3435         if ((attr_mask & IB_QP_ALT_PATH) ||
3436             (attr_mask & IB_QP_PATH_MIG_STATE)) {
3437                 dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
3438                 return -EINVAL;
3439         }
3440
3441         /*
3442          * In v2 engine, software pass context and context mask to hardware
3443          * when modifying qp. If software need modify some fields in context,
3444          * we should set all bits of the relevant fields in context mask to
3445          * 0 at the same time, else set them to 0x1.
3446          */
3447         context->sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
3448         roce_set_field(context->byte_168_irrl_idx,
3449                        V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
3450                        V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
3451                        mtts[0] >> (32 + PAGE_ADDR_SHIFT));
3452         qpc_mask->sq_cur_blk_addr = 0;
3453         roce_set_field(qpc_mask->byte_168_irrl_idx,
3454                        V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
3455                        V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
3456
3457         page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
3458         context->sq_cur_sge_blk_addr =
3459                        ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
3460                                       ((u32)(mtts[hr_qp->sge.offset / page_size]
3461                                       >> PAGE_ADDR_SHIFT)) : 0;
3462         roce_set_field(context->byte_184_irrl_idx,
3463                        V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
3464                        V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
3465                        ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
3466                        (mtts[hr_qp->sge.offset / page_size] >>
3467                        (32 + PAGE_ADDR_SHIFT)) : 0);
3468         qpc_mask->sq_cur_sge_blk_addr = 0;
3469         roce_set_field(qpc_mask->byte_184_irrl_idx,
3470                        V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
3471                        V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
3472
3473         context->rx_sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
3474         roce_set_field(context->byte_232_irrl_sge,
3475                        V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
3476                        V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
3477                        mtts[0] >> (32 + PAGE_ADDR_SHIFT));
3478         qpc_mask->rx_sq_cur_blk_addr = 0;
3479         roce_set_field(qpc_mask->byte_232_irrl_sge,
3480                        V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
3481                        V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
3482
3483         /*
3484          * Set some fields in context to zero, Because the default values
3485          * of all fields in context are zero, we need not set them to 0 again.
3486          * but we should set the relevant fields of context mask to 0.
3487          */
3488         roce_set_field(qpc_mask->byte_232_irrl_sge,
3489                        V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
3490                        V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
3491
3492         roce_set_field(qpc_mask->byte_240_irrl_tail,
3493                        V2_QPC_BYTE_240_RX_ACK_MSN_M,
3494                        V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
3495
3496         roce_set_field(context->byte_244_rnr_rxack,
3497                        V2_QPC_BYTE_244_RX_ACK_EPSN_M,
3498                        V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
3499         roce_set_field(qpc_mask->byte_244_rnr_rxack,
3500                        V2_QPC_BYTE_244_RX_ACK_EPSN_M,
3501                        V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
3502
3503         roce_set_field(qpc_mask->byte_248_ack_psn,
3504                        V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
3505                        V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
3506         roce_set_bit(qpc_mask->byte_248_ack_psn,
3507                      V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
3508         roce_set_field(qpc_mask->byte_248_ack_psn,
3509                        V2_QPC_BYTE_248_IRRL_PSN_M,
3510                        V2_QPC_BYTE_248_IRRL_PSN_S, 0);
3511
3512         roce_set_field(qpc_mask->byte_240_irrl_tail,
3513                        V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
3514                        V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
3515
3516         roce_set_field(context->byte_220_retry_psn_msn,
3517                        V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
3518                        V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
3519         roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3520                        V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
3521                        V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
3522
3523         roce_set_field(context->byte_224_retry_msg,
3524                        V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
3525                        V2_QPC_BYTE_224_RETRY_MSG_PSN_S, attr->sq_psn >> 16);
3526         roce_set_field(qpc_mask->byte_224_retry_msg,
3527                        V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
3528                        V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
3529
3530         roce_set_field(context->byte_224_retry_msg,
3531                        V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
3532                        V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, attr->sq_psn);
3533         roce_set_field(qpc_mask->byte_224_retry_msg,
3534                        V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
3535                        V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
3536
3537         roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3538                        V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
3539                        V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
3540
3541         roce_set_bit(qpc_mask->byte_248_ack_psn,
3542                      V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
3543
3544         roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
3545                        V2_QPC_BYTE_212_CHECK_FLG_S, 0);
3546
3547         roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
3548                        V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
3549         roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
3550                        V2_QPC_BYTE_212_RETRY_CNT_S, 0);
3551
3552         roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
3553                        V2_QPC_BYTE_212_RETRY_NUM_INIT_S, attr->retry_cnt);
3554         roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
3555                        V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
3556
3557         roce_set_field(context->byte_244_rnr_rxack,
3558                        V2_QPC_BYTE_244_RNR_NUM_INIT_M,
3559                        V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
3560         roce_set_field(qpc_mask->byte_244_rnr_rxack,
3561                        V2_QPC_BYTE_244_RNR_NUM_INIT_M,
3562                        V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
3563
3564         roce_set_field(context->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
3565                        V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
3566         roce_set_field(qpc_mask->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
3567                        V2_QPC_BYTE_244_RNR_CNT_S, 0);
3568
3569         roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
3570                        V2_QPC_BYTE_212_LSN_S, 0x100);
3571         roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
3572                        V2_QPC_BYTE_212_LSN_S, 0);
3573
3574         if (attr_mask & IB_QP_TIMEOUT) {
3575                 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
3576                                V2_QPC_BYTE_28_AT_S, attr->timeout);
3577                 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
3578                               V2_QPC_BYTE_28_AT_S, 0);
3579         }
3580
3581         roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
3582                        V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
3583         roce_set_field(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
3584                        V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
3585
3586         roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
3587                        V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
3588         roce_set_field(context->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
3589                        V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
3590         roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
3591                        V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
3592
3593         if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
3594                 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
3595                                V2_QPC_BYTE_208_SR_MAX_S,
3596                                fls(attr->max_rd_atomic - 1));
3597                 roce_set_field(qpc_mask->byte_208_irrl,
3598                                V2_QPC_BYTE_208_SR_MAX_M,
3599                                V2_QPC_BYTE_208_SR_MAX_S, 0);
3600         }
3601         return 0;
3602 }
3603
3604 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
3605                                  const struct ib_qp_attr *attr,
3606                                  int attr_mask, enum ib_qp_state cur_state,
3607                                  enum ib_qp_state new_state)
3608 {
3609         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3610         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3611         struct hns_roce_v2_qp_context *context;
3612         struct hns_roce_v2_qp_context *qpc_mask;
3613         struct device *dev = hr_dev->dev;
3614         int ret = -EINVAL;
3615
3616         context = kcalloc(2, sizeof(*context), GFP_KERNEL);
3617         if (!context)
3618                 return -ENOMEM;
3619
3620         qpc_mask = context + 1;
3621         /*
3622          * In v2 engine, software pass context and context mask to hardware
3623          * when modifying qp. If software need modify some fields in context,
3624          * we should set all bits of the relevant fields in context mask to
3625          * 0 at the same time, else set them to 0x1.
3626          */
3627         memset(qpc_mask, 0xff, sizeof(*qpc_mask));
3628         if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
3629                 modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
3630                                         qpc_mask);
3631         } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
3632                 modify_qp_init_to_init(ibqp, attr, attr_mask, context,
3633                                        qpc_mask);
3634         } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
3635                 ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
3636                                             qpc_mask);
3637                 if (ret)
3638                         goto out;
3639         } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
3640                 ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
3641                                            qpc_mask);
3642                 if (ret)
3643                         goto out;
3644         } else if ((cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) ||
3645                    (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS) ||
3646                    (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD) ||
3647                    (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD) ||
3648                    (cur_state == IB_QPS_SQD && new_state == IB_QPS_RTS) ||
3649                    (cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
3650                    (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
3651                    (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
3652                    (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
3653                    (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
3654                    (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
3655                    (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
3656                    (cur_state == IB_QPS_SQD && new_state == IB_QPS_ERR) ||
3657                    (cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR) ||
3658                    (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR)) {
3659                 /* Nothing */
3660                 ;
3661         } else {
3662                 dev_err(dev, "Illegal state for QP!\n");
3663                 ret = -EINVAL;
3664                 goto out;
3665         }
3666
3667         /* When QP state is err, SQ and RQ WQE should be flushed */
3668         if (new_state == IB_QPS_ERR) {
3669                 roce_set_field(context->byte_160_sq_ci_pi,
3670                                V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
3671                                V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
3672                                hr_qp->sq.head);
3673                 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
3674                                V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
3675                                V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
3676                 roce_set_field(context->byte_84_rq_ci_pi,
3677                                V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3678                                V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
3679                                hr_qp->rq.head);
3680                 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3681                                V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3682                                V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3683         }
3684
3685         if (attr_mask & IB_QP_AV) {
3686                 const struct ib_global_route *grh =
3687                                             rdma_ah_read_grh(&attr->ah_attr);
3688                 const struct ib_gid_attr *gid_attr = NULL;
3689                 u8 src_mac[ETH_ALEN];
3690                 int is_roce_protocol;
3691                 u16 vlan = 0xffff;
3692                 u8 ib_port;
3693                 u8 hr_port;
3694
3695                 ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num :
3696                            hr_qp->port + 1;
3697                 hr_port = ib_port - 1;
3698                 is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
3699                                rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
3700
3701                 if (is_roce_protocol) {
3702                         gid_attr = attr->ah_attr.grh.sgid_attr;
3703                         vlan = rdma_vlan_dev_vlan_id(gid_attr->ndev);
3704                         memcpy(src_mac, gid_attr->ndev->dev_addr, ETH_ALEN);
3705                 }
3706
3707                 if (is_vlan_dev(gid_attr->ndev)) {
3708                         roce_set_bit(context->byte_76_srqn_op_en,
3709                                      V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
3710                         roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3711                                      V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
3712                         roce_set_bit(context->byte_168_irrl_idx,
3713                                      V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
3714                         roce_set_bit(qpc_mask->byte_168_irrl_idx,
3715                                      V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
3716                 }
3717
3718                 roce_set_field(context->byte_24_mtu_tc,
3719                                V2_QPC_BYTE_24_VLAN_ID_M,
3720                                V2_QPC_BYTE_24_VLAN_ID_S, vlan);
3721                 roce_set_field(qpc_mask->byte_24_mtu_tc,
3722                                V2_QPC_BYTE_24_VLAN_ID_M,
3723                                V2_QPC_BYTE_24_VLAN_ID_S, 0);
3724
3725                 if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
3726                         dev_err(hr_dev->dev,
3727                                 "sgid_index(%u) too large. max is %d\n",
3728                                 grh->sgid_index,
3729                                 hr_dev->caps.gid_table_len[hr_port]);
3730                         ret = -EINVAL;
3731                         goto out;
3732                 }
3733
3734                 if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
3735                         dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n");
3736                         ret = -EINVAL;
3737                         goto out;
3738                 }
3739
3740                 roce_set_field(context->byte_52_udpspn_dmac,
3741                            V2_QPC_BYTE_52_UDPSPN_M, V2_QPC_BYTE_52_UDPSPN_S,
3742                            (gid_attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) ?
3743                            0 : 0x12b7);
3744
3745                 roce_set_field(qpc_mask->byte_52_udpspn_dmac,
3746                                V2_QPC_BYTE_52_UDPSPN_M,
3747                                V2_QPC_BYTE_52_UDPSPN_S, 0);
3748
3749                 roce_set_field(context->byte_20_smac_sgid_idx,
3750                                V2_QPC_BYTE_20_SGID_IDX_M,
3751                                V2_QPC_BYTE_20_SGID_IDX_S, grh->sgid_index);
3752
3753                 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3754                                V2_QPC_BYTE_20_SGID_IDX_M,
3755                                V2_QPC_BYTE_20_SGID_IDX_S, 0);
3756
3757                 roce_set_field(context->byte_24_mtu_tc,
3758                                V2_QPC_BYTE_24_HOP_LIMIT_M,
3759                                V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
3760                 roce_set_field(qpc_mask->byte_24_mtu_tc,
3761                                V2_QPC_BYTE_24_HOP_LIMIT_M,
3762                                V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
3763
3764                 if (hr_dev->pci_dev->revision == 0x21 &&
3765                     gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
3766                         roce_set_field(context->byte_24_mtu_tc,
3767                                        V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S,
3768                                        grh->traffic_class >> 2);
3769                 else
3770                         roce_set_field(context->byte_24_mtu_tc,
3771                                        V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S,
3772                                        grh->traffic_class);
3773                 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
3774                                V2_QPC_BYTE_24_TC_S, 0);
3775                 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
3776                                V2_QPC_BYTE_28_FL_S, grh->flow_label);
3777                 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
3778                                V2_QPC_BYTE_28_FL_S, 0);
3779                 memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
3780                 memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
3781                 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
3782                                V2_QPC_BYTE_28_SL_S,
3783                                rdma_ah_get_sl(&attr->ah_attr));
3784                 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
3785                                V2_QPC_BYTE_28_SL_S, 0);
3786                 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3787         }
3788
3789         if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
3790                 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
3791
3792         /* Every status migrate must change state */
3793         roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
3794                        V2_QPC_BYTE_60_QP_ST_S, new_state);
3795         roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
3796                        V2_QPC_BYTE_60_QP_ST_S, 0);
3797
3798         /* SW pass context to HW */
3799         ret = hns_roce_v2_qp_modify(hr_dev, &hr_qp->mtt, cur_state, new_state,
3800                                     context, hr_qp);
3801         if (ret) {
3802                 dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
3803                 goto out;
3804         }
3805
3806         hr_qp->state = new_state;
3807
3808         if (attr_mask & IB_QP_ACCESS_FLAGS)
3809                 hr_qp->atomic_rd_en = attr->qp_access_flags;
3810
3811         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3812                 hr_qp->resp_depth = attr->max_dest_rd_atomic;
3813         if (attr_mask & IB_QP_PORT) {
3814                 hr_qp->port = attr->port_num - 1;
3815                 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
3816         }
3817
3818         if (new_state == IB_QPS_RESET && !ibqp->uobject) {
3819                 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
3820                                      ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
3821                 if (ibqp->send_cq != ibqp->recv_cq)
3822                         hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
3823                                              hr_qp->qpn, NULL);
3824
3825                 hr_qp->rq.head = 0;
3826                 hr_qp->rq.tail = 0;
3827                 hr_qp->sq.head = 0;
3828                 hr_qp->sq.tail = 0;
3829                 hr_qp->sq_next_wqe = 0;
3830                 hr_qp->next_sge = 0;
3831                 if (hr_qp->rq.wqe_cnt)
3832                         *hr_qp->rdb.db_record = 0;
3833         }
3834
3835 out:
3836         kfree(context);
3837         return ret;
3838 }
3839
3840 static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
3841 {
3842         switch (state) {
3843         case HNS_ROCE_QP_ST_RST:        return IB_QPS_RESET;
3844         case HNS_ROCE_QP_ST_INIT:       return IB_QPS_INIT;
3845         case HNS_ROCE_QP_ST_RTR:        return IB_QPS_RTR;
3846         case HNS_ROCE_QP_ST_RTS:        return IB_QPS_RTS;
3847         case HNS_ROCE_QP_ST_SQ_DRAINING:
3848         case HNS_ROCE_QP_ST_SQD:        return IB_QPS_SQD;
3849         case HNS_ROCE_QP_ST_SQER:       return IB_QPS_SQE;
3850         case HNS_ROCE_QP_ST_ERR:        return IB_QPS_ERR;
3851         default:                        return -1;
3852         }
3853 }
3854
3855 static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
3856                                  struct hns_roce_qp *hr_qp,
3857                                  struct hns_roce_v2_qp_context *hr_context)
3858 {
3859         struct hns_roce_cmd_mailbox *mailbox;
3860         int ret;
3861
3862         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3863         if (IS_ERR(mailbox))
3864                 return PTR_ERR(mailbox);
3865
3866         ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
3867                                 HNS_ROCE_CMD_QUERY_QPC,
3868                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
3869         if (ret) {
3870                 dev_err(hr_dev->dev, "QUERY QP cmd process error\n");
3871                 goto out;
3872         }
3873
3874         memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
3875
3876 out:
3877         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3878         return ret;
3879 }
3880
3881 static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3882                                 int qp_attr_mask,
3883                                 struct ib_qp_init_attr *qp_init_attr)
3884 {
3885         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3886         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3887         struct hns_roce_v2_qp_context *context;
3888         struct device *dev = hr_dev->dev;
3889         int tmp_qp_state;
3890         int state;
3891         int ret;
3892
3893         context = kzalloc(sizeof(*context), GFP_KERNEL);
3894         if (!context)
3895                 return -ENOMEM;
3896
3897         memset(qp_attr, 0, sizeof(*qp_attr));
3898         memset(qp_init_attr, 0, sizeof(*qp_init_attr));
3899
3900         mutex_lock(&hr_qp->mutex);
3901
3902         if (hr_qp->state == IB_QPS_RESET) {
3903                 qp_attr->qp_state = IB_QPS_RESET;
3904                 ret = 0;
3905                 goto done;
3906         }
3907
3908         ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, context);
3909         if (ret) {
3910                 dev_err(dev, "query qpc error\n");
3911                 ret = -EINVAL;
3912                 goto out;
3913         }
3914
3915         state = roce_get_field(context->byte_60_qpst_tempid,
3916                                V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
3917         tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
3918         if (tmp_qp_state == -1) {
3919                 dev_err(dev, "Illegal ib_qp_state\n");
3920                 ret = -EINVAL;
3921                 goto out;
3922         }
3923         hr_qp->state = (u8)tmp_qp_state;
3924         qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
3925         qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->byte_24_mtu_tc,
3926                                                         V2_QPC_BYTE_24_MTU_M,
3927                                                         V2_QPC_BYTE_24_MTU_S);
3928         qp_attr->path_mig_state = IB_MIG_ARMED;
3929         qp_attr->ah_attr.type   = RDMA_AH_ATTR_TYPE_ROCE;
3930         if (hr_qp->ibqp.qp_type == IB_QPT_UD)
3931                 qp_attr->qkey = V2_QKEY_VAL;
3932
3933         qp_attr->rq_psn = roce_get_field(context->byte_108_rx_reqepsn,
3934                                          V2_QPC_BYTE_108_RX_REQ_EPSN_M,
3935                                          V2_QPC_BYTE_108_RX_REQ_EPSN_S);
3936         qp_attr->sq_psn = (u32)roce_get_field(context->byte_172_sq_psn,
3937                                               V2_QPC_BYTE_172_SQ_CUR_PSN_M,
3938                                               V2_QPC_BYTE_172_SQ_CUR_PSN_S);
3939         qp_attr->dest_qp_num = (u8)roce_get_field(context->byte_56_dqpn_err,
3940                                                   V2_QPC_BYTE_56_DQPN_M,
3941                                                   V2_QPC_BYTE_56_DQPN_S);
3942         qp_attr->qp_access_flags = ((roce_get_bit(context->byte_76_srqn_op_en,
3943                                                   V2_QPC_BYTE_76_RRE_S)) << 2) |
3944                                    ((roce_get_bit(context->byte_76_srqn_op_en,
3945                                                   V2_QPC_BYTE_76_RWE_S)) << 1) |
3946                                    ((roce_get_bit(context->byte_76_srqn_op_en,
3947                                                   V2_QPC_BYTE_76_ATE_S)) << 3);
3948         if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
3949             hr_qp->ibqp.qp_type == IB_QPT_UC) {
3950                 struct ib_global_route *grh =
3951                                 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
3952
3953                 rdma_ah_set_sl(&qp_attr->ah_attr,
3954                                roce_get_field(context->byte_28_at_fl,
3955                                               V2_QPC_BYTE_28_SL_M,
3956                                               V2_QPC_BYTE_28_SL_S));
3957                 grh->flow_label = roce_get_field(context->byte_28_at_fl,
3958                                                  V2_QPC_BYTE_28_FL_M,
3959                                                  V2_QPC_BYTE_28_FL_S);
3960                 grh->sgid_index = roce_get_field(context->byte_20_smac_sgid_idx,
3961                                                  V2_QPC_BYTE_20_SGID_IDX_M,
3962                                                  V2_QPC_BYTE_20_SGID_IDX_S);
3963                 grh->hop_limit = roce_get_field(context->byte_24_mtu_tc,
3964                                                 V2_QPC_BYTE_24_HOP_LIMIT_M,
3965                                                 V2_QPC_BYTE_24_HOP_LIMIT_S);
3966                 grh->traffic_class = roce_get_field(context->byte_24_mtu_tc,
3967                                                     V2_QPC_BYTE_24_TC_M,
3968                                                     V2_QPC_BYTE_24_TC_S);
3969
3970                 memcpy(grh->dgid.raw, context->dgid, sizeof(grh->dgid.raw));
3971         }
3972
3973         qp_attr->port_num = hr_qp->port + 1;
3974         qp_attr->sq_draining = 0;
3975         qp_attr->max_rd_atomic = 1 << roce_get_field(context->byte_208_irrl,
3976                                                      V2_QPC_BYTE_208_SR_MAX_M,
3977                                                      V2_QPC_BYTE_208_SR_MAX_S);
3978         qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->byte_140_raq,
3979                                                      V2_QPC_BYTE_140_RR_MAX_M,
3980                                                      V2_QPC_BYTE_140_RR_MAX_S);
3981         qp_attr->min_rnr_timer = (u8)roce_get_field(context->byte_80_rnr_rx_cqn,
3982                                                  V2_QPC_BYTE_80_MIN_RNR_TIME_M,
3983                                                  V2_QPC_BYTE_80_MIN_RNR_TIME_S);
3984         qp_attr->timeout = (u8)roce_get_field(context->byte_28_at_fl,
3985                                               V2_QPC_BYTE_28_AT_M,
3986                                               V2_QPC_BYTE_28_AT_S);
3987         qp_attr->retry_cnt = roce_get_field(context->byte_212_lsn,
3988                                             V2_QPC_BYTE_212_RETRY_CNT_M,
3989                                             V2_QPC_BYTE_212_RETRY_CNT_S);
3990         qp_attr->rnr_retry = context->rq_rnr_timer;
3991
3992 done:
3993         qp_attr->cur_qp_state = qp_attr->qp_state;
3994         qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3995         qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3996
3997         if (!ibqp->uobject) {
3998                 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3999                 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
4000         } else {
4001                 qp_attr->cap.max_send_wr = 0;
4002                 qp_attr->cap.max_send_sge = 0;
4003         }
4004
4005         qp_init_attr->cap = qp_attr->cap;
4006
4007 out:
4008         mutex_unlock(&hr_qp->mutex);
4009         kfree(context);
4010         return ret;
4011 }
4012
4013 static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
4014                                          struct hns_roce_qp *hr_qp,
4015                                          int is_user)
4016 {
4017         struct hns_roce_cq *send_cq, *recv_cq;
4018         struct device *dev = hr_dev->dev;
4019         int ret;
4020
4021         if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
4022                 /* Modify qp to reset before destroying qp */
4023                 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
4024                                             hr_qp->state, IB_QPS_RESET);
4025                 if (ret) {
4026                         dev_err(dev, "modify QP %06lx to ERR failed.\n",
4027                                 hr_qp->qpn);
4028                         return ret;
4029                 }
4030         }
4031
4032         send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
4033         recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
4034
4035         hns_roce_lock_cqs(send_cq, recv_cq);
4036
4037         if (!is_user) {
4038                 __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
4039                                        to_hr_srq(hr_qp->ibqp.srq) : NULL);
4040                 if (send_cq != recv_cq)
4041                         __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
4042         }
4043
4044         hns_roce_qp_remove(hr_dev, hr_qp);
4045
4046         hns_roce_unlock_cqs(send_cq, recv_cq);
4047
4048         hns_roce_qp_free(hr_dev, hr_qp);
4049
4050         /* Not special_QP, free their QPN */
4051         if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
4052             (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
4053             (hr_qp->ibqp.qp_type == IB_QPT_UD))
4054                 hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
4055
4056         hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
4057
4058         if (is_user) {
4059                 if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1))
4060                         hns_roce_db_unmap_user(
4061                                 to_hr_ucontext(hr_qp->ibqp.uobject->context),
4062                                 &hr_qp->sdb);
4063
4064                 if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
4065                         hns_roce_db_unmap_user(
4066                                 to_hr_ucontext(hr_qp->ibqp.uobject->context),
4067                                 &hr_qp->rdb);
4068                 ib_umem_release(hr_qp->umem);
4069         } else {
4070                 kfree(hr_qp->sq.wrid);
4071                 kfree(hr_qp->rq.wrid);
4072                 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
4073                 if (hr_qp->rq.wqe_cnt)
4074                         hns_roce_free_db(hr_dev, &hr_qp->rdb);
4075         }
4076
4077         if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
4078                 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
4079                 kfree(hr_qp->rq_inl_buf.wqe_list);
4080         }
4081
4082         return 0;
4083 }
4084
4085 static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp)
4086 {
4087         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4088         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4089         int ret;
4090
4091         ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, !!ibqp->pd->uobject);
4092         if (ret) {
4093                 dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret);
4094                 return ret;
4095         }
4096
4097         if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
4098                 kfree(hr_to_hr_sqp(hr_qp));
4099         else
4100                 kfree(hr_qp);
4101
4102         return 0;
4103 }
4104
4105 static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
4106 {
4107         struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
4108         struct hns_roce_v2_cq_context *cq_context;
4109         struct hns_roce_cq *hr_cq = to_hr_cq(cq);
4110         struct hns_roce_v2_cq_context *cqc_mask;
4111         struct hns_roce_cmd_mailbox *mailbox;
4112         int ret;
4113
4114         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4115         if (IS_ERR(mailbox))
4116                 return PTR_ERR(mailbox);
4117
4118         cq_context = mailbox->buf;
4119         cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
4120
4121         memset(cqc_mask, 0xff, sizeof(*cqc_mask));
4122
4123         roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
4124                        V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
4125                        cq_count);
4126         roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
4127                        V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
4128                        0);
4129         roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
4130                        V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
4131                        cq_period);
4132         roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
4133                        V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
4134                        0);
4135
4136         ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
4137                                 HNS_ROCE_CMD_MODIFY_CQC,
4138                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
4139         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4140         if (ret)
4141                 dev_err(hr_dev->dev, "MODIFY CQ Failed to cmd mailbox.\n");
4142
4143         return ret;
4144 }
4145
4146 static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn)
4147 {
4148         struct hns_roce_qp *hr_qp;
4149         struct ib_qp_attr attr;
4150         int attr_mask;
4151         int ret;
4152
4153         hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
4154         if (!hr_qp) {
4155                 dev_warn(hr_dev->dev, "no hr_qp can be found!\n");
4156                 return;
4157         }
4158
4159         if (hr_qp->ibqp.uobject) {
4160                 if (hr_qp->sdb_en == 1) {
4161                         hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
4162                         hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
4163                 } else {
4164                         dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n");
4165                         return;
4166                 }
4167         }
4168
4169         attr_mask = IB_QP_STATE;
4170         attr.qp_state = IB_QPS_ERR;
4171         ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr, attr_mask,
4172                                     hr_qp->state, IB_QPS_ERR);
4173         if (ret)
4174                 dev_err(hr_dev->dev, "failed to modify qp %d to err state.\n",
4175                         qpn);
4176 }
4177
4178 static void hns_roce_irq_work_handle(struct work_struct *work)
4179 {
4180         struct hns_roce_work *irq_work =
4181                                 container_of(work, struct hns_roce_work, work);
4182         struct device *dev = irq_work->hr_dev->dev;
4183         u32 qpn = irq_work->qpn;
4184         u32 cqn = irq_work->cqn;
4185
4186         switch (irq_work->event_type) {
4187         case HNS_ROCE_EVENT_TYPE_PATH_MIG:
4188                 dev_info(dev, "Path migrated succeeded.\n");
4189                 break;
4190         case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
4191                 dev_warn(dev, "Path migration failed.\n");
4192                 break;
4193         case HNS_ROCE_EVENT_TYPE_COMM_EST:
4194                 dev_info(dev, "Communication established.\n");
4195                 break;
4196         case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
4197                 dev_warn(dev, "Send queue drained.\n");
4198                 break;
4199         case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
4200                 dev_err(dev, "Local work queue catastrophic error.\n");
4201                 hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4202                 switch (irq_work->sub_type) {
4203                 case HNS_ROCE_LWQCE_QPC_ERROR:
4204                         dev_err(dev, "QP %d, QPC error.\n", qpn);
4205                         break;
4206                 case HNS_ROCE_LWQCE_MTU_ERROR:
4207                         dev_err(dev, "QP %d, MTU error.\n", qpn);
4208                         break;
4209                 case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
4210                         dev_err(dev, "QP %d, WQE BA addr error.\n", qpn);
4211                         break;
4212                 case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
4213                         dev_err(dev, "QP %d, WQE addr error.\n", qpn);
4214                         break;
4215                 case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
4216                         dev_err(dev, "QP %d, WQE shift error.\n", qpn);
4217                         break;
4218                 default:
4219                         dev_err(dev, "Unhandled sub_event type %d.\n",
4220                                 irq_work->sub_type);
4221                         break;
4222                 }
4223                 break;
4224         case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
4225                 dev_err(dev, "Invalid request local work queue error.\n");
4226                 hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4227                 break;
4228         case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
4229                 dev_err(dev, "Local access violation work queue error.\n");
4230                 hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4231                 switch (irq_work->sub_type) {
4232                 case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
4233                         dev_err(dev, "QP %d, R_key violation.\n", qpn);
4234                         break;
4235                 case HNS_ROCE_LAVWQE_LENGTH_ERROR:
4236                         dev_err(dev, "QP %d, length error.\n", qpn);
4237                         break;
4238                 case HNS_ROCE_LAVWQE_VA_ERROR:
4239                         dev_err(dev, "QP %d, VA error.\n", qpn);
4240                         break;
4241                 case HNS_ROCE_LAVWQE_PD_ERROR:
4242                         dev_err(dev, "QP %d, PD error.\n", qpn);
4243                         break;
4244                 case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
4245                         dev_err(dev, "QP %d, rw acc error.\n", qpn);
4246                         break;
4247                 case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
4248                         dev_err(dev, "QP %d, key state error.\n", qpn);
4249                         break;
4250                 case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
4251                         dev_err(dev, "QP %d, MR operation error.\n", qpn);
4252                         break;
4253                 default:
4254                         dev_err(dev, "Unhandled sub_event type %d.\n",
4255                                 irq_work->sub_type);
4256                         break;
4257                 }
4258                 break;
4259         case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
4260                 dev_warn(dev, "SRQ limit reach.\n");
4261                 break;
4262         case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
4263                 dev_warn(dev, "SRQ last wqe reach.\n");
4264                 break;
4265         case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
4266                 dev_err(dev, "SRQ catas error.\n");
4267                 break;
4268         case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
4269                 dev_err(dev, "CQ 0x%x access err.\n", cqn);
4270                 break;
4271         case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
4272                 dev_warn(dev, "CQ 0x%x overflow\n", cqn);
4273                 break;
4274         case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
4275                 dev_warn(dev, "DB overflow.\n");
4276                 break;
4277         case HNS_ROCE_EVENT_TYPE_FLR:
4278                 dev_warn(dev, "Function level reset.\n");
4279                 break;
4280         default:
4281                 break;
4282         }
4283
4284         kfree(irq_work);
4285 }
4286
4287 static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
4288                                       struct hns_roce_eq *eq,
4289                                       u32 qpn, u32 cqn)
4290 {
4291         struct hns_roce_work *irq_work;
4292
4293         irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
4294         if (!irq_work)
4295                 return;
4296
4297         INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
4298         irq_work->hr_dev = hr_dev;
4299         irq_work->qpn = qpn;
4300         irq_work->cqn = cqn;
4301         irq_work->event_type = eq->event_type;
4302         irq_work->sub_type = eq->sub_type;
4303         queue_work(hr_dev->irq_workq, &(irq_work->work));
4304 }
4305
4306 static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
4307 {
4308         u32 doorbell[2];
4309
4310         doorbell[0] = 0;
4311         doorbell[1] = 0;
4312
4313         if (eq->type_flag == HNS_ROCE_AEQ) {
4314                 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
4315                                HNS_ROCE_V2_EQ_DB_CMD_S,
4316                                eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
4317                                HNS_ROCE_EQ_DB_CMD_AEQ :
4318                                HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
4319         } else {
4320                 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
4321                                HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
4322
4323                 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
4324                                HNS_ROCE_V2_EQ_DB_CMD_S,
4325                                eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
4326                                HNS_ROCE_EQ_DB_CMD_CEQ :
4327                                HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
4328         }
4329
4330         roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
4331                        HNS_ROCE_V2_EQ_DB_PARA_S,
4332                        (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
4333
4334         hns_roce_write64_k(doorbell, eq->doorbell);
4335 }
4336
4337 static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
4338 {
4339         u32 buf_chk_sz;
4340         unsigned long off;
4341
4342         buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4343         off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
4344
4345         return (struct hns_roce_aeqe *)((char *)(eq->buf_list->buf) +
4346                 off % buf_chk_sz);
4347 }
4348
4349 static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry)
4350 {
4351         u32 buf_chk_sz;
4352         unsigned long off;
4353
4354         buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4355
4356         off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
4357
4358         if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
4359                 return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) +
4360                         off % buf_chk_sz);
4361         else
4362                 return (struct hns_roce_aeqe *)((u8 *)
4363                         (eq->buf[off / buf_chk_sz]) + off % buf_chk_sz);
4364 }
4365
4366 static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
4367 {
4368         struct hns_roce_aeqe *aeqe;
4369
4370         if (!eq->hop_num)
4371                 aeqe = get_aeqe_v2(eq, eq->cons_index);
4372         else
4373                 aeqe = mhop_get_aeqe(eq, eq->cons_index);
4374
4375         return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
4376                 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
4377 }
4378
4379 static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
4380                                struct hns_roce_eq *eq)
4381 {
4382         struct device *dev = hr_dev->dev;
4383         struct hns_roce_aeqe *aeqe;
4384         int aeqe_found = 0;
4385         int event_type;
4386         int sub_type;
4387         u32 qpn;
4388         u32 cqn;
4389
4390         while ((aeqe = next_aeqe_sw_v2(eq))) {
4391
4392                 /* Make sure we read AEQ entry after we have checked the
4393                  * ownership bit
4394                  */
4395                 dma_rmb();
4396
4397                 event_type = roce_get_field(aeqe->asyn,
4398                                             HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
4399                                             HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
4400                 sub_type = roce_get_field(aeqe->asyn,
4401                                           HNS_ROCE_V2_AEQE_SUB_TYPE_M,
4402                                           HNS_ROCE_V2_AEQE_SUB_TYPE_S);
4403                 qpn = roce_get_field(aeqe->event.qp_event.qp,
4404                                      HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
4405                                      HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
4406                 cqn = roce_get_field(aeqe->event.cq_event.cq,
4407                                      HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
4408                                      HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
4409
4410                 switch (event_type) {
4411                 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
4412                 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
4413                 case HNS_ROCE_EVENT_TYPE_COMM_EST:
4414                 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
4415                 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
4416                 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
4417                 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
4418                         hns_roce_qp_event(hr_dev, qpn, event_type);
4419                         break;
4420                 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
4421                 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
4422                 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
4423                         break;
4424                 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
4425                 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
4426                         hns_roce_cq_event(hr_dev, cqn, event_type);
4427                         break;
4428                 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
4429                         break;
4430                 case HNS_ROCE_EVENT_TYPE_MB:
4431                         hns_roce_cmd_event(hr_dev,
4432                                         le16_to_cpu(aeqe->event.cmd.token),
4433                                         aeqe->event.cmd.status,
4434                                         le64_to_cpu(aeqe->event.cmd.out_param));
4435                         break;
4436                 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
4437                         break;
4438                 case HNS_ROCE_EVENT_TYPE_FLR:
4439                         break;
4440                 default:
4441                         dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
4442                                 event_type, eq->eqn, eq->cons_index);
4443                         break;
4444                 };
4445
4446                 eq->event_type = event_type;
4447                 eq->sub_type = sub_type;
4448                 ++eq->cons_index;
4449                 aeqe_found = 1;
4450
4451                 if (eq->cons_index > (2 * eq->entries - 1)) {
4452                         dev_warn(dev, "cons_index overflow, set back to 0.\n");
4453                         eq->cons_index = 0;
4454                 }
4455                 hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn);
4456         }
4457
4458         set_eq_cons_index_v2(eq);
4459         return aeqe_found;
4460 }
4461
4462 static struct hns_roce_ceqe *get_ceqe_v2(struct hns_roce_eq *eq, u32 entry)
4463 {
4464         u32 buf_chk_sz;
4465         unsigned long off;
4466
4467         buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4468         off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
4469
4470         return (struct hns_roce_ceqe *)((char *)(eq->buf_list->buf) +
4471                 off % buf_chk_sz);
4472 }
4473
4474 static struct hns_roce_ceqe *mhop_get_ceqe(struct hns_roce_eq *eq, u32 entry)
4475 {
4476         u32 buf_chk_sz;
4477         unsigned long off;
4478
4479         buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4480
4481         off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
4482
4483         if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
4484                 return (struct hns_roce_ceqe *)((u8 *)(eq->bt_l0) +
4485                         off % buf_chk_sz);
4486         else
4487                 return (struct hns_roce_ceqe *)((u8 *)(eq->buf[off /
4488                         buf_chk_sz]) + off % buf_chk_sz);
4489 }
4490
4491 static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
4492 {
4493         struct hns_roce_ceqe *ceqe;
4494
4495         if (!eq->hop_num)
4496                 ceqe = get_ceqe_v2(eq, eq->cons_index);
4497         else
4498                 ceqe = mhop_get_ceqe(eq, eq->cons_index);
4499
4500         return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
4501                 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
4502 }
4503
4504 static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
4505                                struct hns_roce_eq *eq)
4506 {
4507         struct device *dev = hr_dev->dev;
4508         struct hns_roce_ceqe *ceqe;
4509         int ceqe_found = 0;
4510         u32 cqn;
4511
4512         while ((ceqe = next_ceqe_sw_v2(eq))) {
4513
4514                 /* Make sure we read CEQ entry after we have checked the
4515                  * ownership bit
4516                  */
4517                 dma_rmb();
4518
4519                 cqn = roce_get_field(ceqe->comp,
4520                                      HNS_ROCE_V2_CEQE_COMP_CQN_M,
4521                                      HNS_ROCE_V2_CEQE_COMP_CQN_S);
4522
4523                 hns_roce_cq_completion(hr_dev, cqn);
4524
4525                 ++eq->cons_index;
4526                 ceqe_found = 1;
4527
4528                 if (eq->cons_index > (2 * eq->entries - 1)) {
4529                         dev_warn(dev, "cons_index overflow, set back to 0.\n");
4530                         eq->cons_index = 0;
4531                 }
4532         }
4533
4534         set_eq_cons_index_v2(eq);
4535
4536         return ceqe_found;
4537 }
4538
4539 static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
4540 {
4541         struct hns_roce_eq *eq = eq_ptr;
4542         struct hns_roce_dev *hr_dev = eq->hr_dev;
4543         int int_work = 0;
4544
4545         if (eq->type_flag == HNS_ROCE_CEQ)
4546                 /* Completion event interrupt */
4547                 int_work = hns_roce_v2_ceq_int(hr_dev, eq);
4548         else
4549                 /* Asychronous event interrupt */
4550                 int_work = hns_roce_v2_aeq_int(hr_dev, eq);
4551
4552         return IRQ_RETVAL(int_work);
4553 }
4554
4555 static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
4556 {
4557         struct hns_roce_dev *hr_dev = dev_id;
4558         struct device *dev = hr_dev->dev;
4559         int int_work = 0;
4560         u32 int_st;
4561         u32 int_en;
4562
4563         /* Abnormal interrupt */
4564         int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
4565         int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
4566
4567         if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
4568                 dev_err(dev, "AEQ overflow!\n");
4569
4570                 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1);
4571                 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
4572
4573                 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
4574                 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
4575
4576                 int_work = 1;
4577         } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
4578                 dev_err(dev, "BUS ERR!\n");
4579
4580                 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S, 1);
4581                 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
4582
4583                 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
4584                 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
4585
4586                 int_work = 1;
4587         } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
4588                 dev_err(dev, "OTHER ERR!\n");
4589
4590                 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S, 1);
4591                 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
4592
4593                 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
4594                 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
4595
4596                 int_work = 1;
4597         } else
4598                 dev_err(dev, "There is no abnormal irq found!\n");
4599
4600         return IRQ_RETVAL(int_work);
4601 }
4602
4603 static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
4604                                         int eq_num, int enable_flag)
4605 {
4606         int i;
4607
4608         if (enable_flag == EQ_ENABLE) {
4609                 for (i = 0; i < eq_num; i++)
4610                         roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
4611                                    i * EQ_REG_OFFSET,
4612                                    HNS_ROCE_V2_VF_EVENT_INT_EN_M);
4613
4614                 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
4615                            HNS_ROCE_V2_VF_ABN_INT_EN_M);
4616                 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
4617                            HNS_ROCE_V2_VF_ABN_INT_CFG_M);
4618         } else {
4619                 for (i = 0; i < eq_num; i++)
4620                         roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
4621                                    i * EQ_REG_OFFSET,
4622                                    HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
4623
4624                 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
4625                            HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
4626                 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
4627                            HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
4628         }
4629 }
4630
4631 static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
4632 {
4633         struct device *dev = hr_dev->dev;
4634         int ret;
4635
4636         if (eqn < hr_dev->caps.num_comp_vectors)
4637                 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
4638                                         0, HNS_ROCE_CMD_DESTROY_CEQC,
4639                                         HNS_ROCE_CMD_TIMEOUT_MSECS);
4640         else
4641                 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
4642                                         0, HNS_ROCE_CMD_DESTROY_AEQC,
4643                                         HNS_ROCE_CMD_TIMEOUT_MSECS);
4644         if (ret)
4645                 dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
4646 }
4647
4648 static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
4649                                   struct hns_roce_eq *eq)
4650 {
4651         struct device *dev = hr_dev->dev;
4652         u64 idx;
4653         u64 size;
4654         u32 buf_chk_sz;
4655         u32 bt_chk_sz;
4656         u32 mhop_num;
4657         int eqe_alloc;
4658         int i = 0;
4659         int j = 0;
4660
4661         mhop_num = hr_dev->caps.eqe_hop_num;
4662         buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
4663         bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
4664
4665         /* hop_num = 0 */
4666         if (mhop_num == HNS_ROCE_HOP_NUM_0) {
4667                 dma_free_coherent(dev, (unsigned int)(eq->entries *
4668                                   eq->eqe_size), eq->bt_l0, eq->l0_dma);
4669                 return;
4670         }
4671
4672         /* hop_num = 1 or hop = 2 */
4673         dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
4674         if (mhop_num == 1) {
4675                 for (i = 0; i < eq->l0_last_num; i++) {
4676                         if (i == eq->l0_last_num - 1) {
4677                                 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
4678                                 size = (eq->entries - eqe_alloc) * eq->eqe_size;
4679                                 dma_free_coherent(dev, size, eq->buf[i],
4680                                                   eq->buf_dma[i]);
4681                                 break;
4682                         }
4683                         dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
4684                                           eq->buf_dma[i]);
4685                 }
4686         } else if (mhop_num == 2) {
4687                 for (i = 0; i < eq->l0_last_num; i++) {
4688                         dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
4689                                           eq->l1_dma[i]);
4690
4691                         for (j = 0; j < bt_chk_sz / 8; j++) {
4692                                 idx = i * (bt_chk_sz / 8) + j;
4693                                 if ((i == eq->l0_last_num - 1)
4694                                      && j == eq->l1_last_num - 1) {
4695                                         eqe_alloc = (buf_chk_sz / eq->eqe_size)
4696                                                     * idx;
4697                                         size = (eq->entries - eqe_alloc)
4698                                                 * eq->eqe_size;
4699                                         dma_free_coherent(dev, size,
4700                                                           eq->buf[idx],
4701                                                           eq->buf_dma[idx]);
4702                                         break;
4703                                 }
4704                                 dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
4705                                                   eq->buf_dma[idx]);
4706                         }
4707                 }
4708         }
4709         kfree(eq->buf_dma);
4710         kfree(eq->buf);
4711         kfree(eq->l1_dma);
4712         kfree(eq->bt_l1);
4713         eq->buf_dma = NULL;
4714         eq->buf = NULL;
4715         eq->l1_dma = NULL;
4716         eq->bt_l1 = NULL;
4717 }
4718
4719 static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
4720                                 struct hns_roce_eq *eq)
4721 {
4722         u32 buf_chk_sz;
4723
4724         buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4725
4726         if (hr_dev->caps.eqe_hop_num) {
4727                 hns_roce_mhop_free_eq(hr_dev, eq);
4728                 return;
4729         }
4730
4731         if (eq->buf_list)
4732                 dma_free_coherent(hr_dev->dev, buf_chk_sz,
4733                                   eq->buf_list->buf, eq->buf_list->map);
4734 }
4735
4736 static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
4737                                 struct hns_roce_eq *eq,
4738                                 void *mb_buf)
4739 {
4740         struct hns_roce_eq_context *eqc;
4741
4742         eqc = mb_buf;
4743         memset(eqc, 0, sizeof(struct hns_roce_eq_context));
4744
4745         /* init eqc */
4746         eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
4747         eq->hop_num = hr_dev->caps.eqe_hop_num;
4748         eq->cons_index = 0;
4749         eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
4750         eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
4751         eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
4752         eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
4753         eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
4754         eq->shift = ilog2((unsigned int)eq->entries);
4755
4756         if (!eq->hop_num)
4757                 eq->eqe_ba = eq->buf_list->map;
4758         else
4759                 eq->eqe_ba = eq->l0_dma;
4760
4761         /* set eqc state */
4762         roce_set_field(eqc->byte_4,
4763                        HNS_ROCE_EQC_EQ_ST_M,
4764                        HNS_ROCE_EQC_EQ_ST_S,
4765                        HNS_ROCE_V2_EQ_STATE_VALID);
4766
4767         /* set eqe hop num */
4768         roce_set_field(eqc->byte_4,
4769                        HNS_ROCE_EQC_HOP_NUM_M,
4770                        HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
4771
4772         /* set eqc over_ignore */
4773         roce_set_field(eqc->byte_4,
4774                        HNS_ROCE_EQC_OVER_IGNORE_M,
4775                        HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
4776
4777         /* set eqc coalesce */
4778         roce_set_field(eqc->byte_4,
4779                        HNS_ROCE_EQC_COALESCE_M,
4780                        HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
4781
4782         /* set eqc arm_state */
4783         roce_set_field(eqc->byte_4,
4784                        HNS_ROCE_EQC_ARM_ST_M,
4785                        HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
4786
4787         /* set eqn */
4788         roce_set_field(eqc->byte_4,
4789                        HNS_ROCE_EQC_EQN_M,
4790                        HNS_ROCE_EQC_EQN_S, eq->eqn);
4791
4792         /* set eqe_cnt */
4793         roce_set_field(eqc->byte_4,
4794                        HNS_ROCE_EQC_EQE_CNT_M,
4795                        HNS_ROCE_EQC_EQE_CNT_S,
4796                        HNS_ROCE_EQ_INIT_EQE_CNT);
4797
4798         /* set eqe_ba_pg_sz */
4799         roce_set_field(eqc->byte_8,
4800                        HNS_ROCE_EQC_BA_PG_SZ_M,
4801                        HNS_ROCE_EQC_BA_PG_SZ_S,
4802                        eq->eqe_ba_pg_sz + PG_SHIFT_OFFSET);
4803
4804         /* set eqe_buf_pg_sz */
4805         roce_set_field(eqc->byte_8,
4806                        HNS_ROCE_EQC_BUF_PG_SZ_M,
4807                        HNS_ROCE_EQC_BUF_PG_SZ_S,
4808                        eq->eqe_buf_pg_sz + PG_SHIFT_OFFSET);
4809
4810         /* set eq_producer_idx */
4811         roce_set_field(eqc->byte_8,
4812                        HNS_ROCE_EQC_PROD_INDX_M,
4813                        HNS_ROCE_EQC_PROD_INDX_S,
4814                        HNS_ROCE_EQ_INIT_PROD_IDX);
4815
4816         /* set eq_max_cnt */
4817         roce_set_field(eqc->byte_12,
4818                        HNS_ROCE_EQC_MAX_CNT_M,
4819                        HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
4820
4821         /* set eq_period */
4822         roce_set_field(eqc->byte_12,
4823                        HNS_ROCE_EQC_PERIOD_M,
4824                        HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
4825
4826         /* set eqe_report_timer */
4827         roce_set_field(eqc->eqe_report_timer,
4828                        HNS_ROCE_EQC_REPORT_TIMER_M,
4829                        HNS_ROCE_EQC_REPORT_TIMER_S,
4830                        HNS_ROCE_EQ_INIT_REPORT_TIMER);
4831
4832         /* set eqe_ba [34:3] */
4833         roce_set_field(eqc->eqe_ba0,
4834                        HNS_ROCE_EQC_EQE_BA_L_M,
4835                        HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
4836
4837         /* set eqe_ba [64:35] */
4838         roce_set_field(eqc->eqe_ba1,
4839                        HNS_ROCE_EQC_EQE_BA_H_M,
4840                        HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
4841
4842         /* set eq shift */
4843         roce_set_field(eqc->byte_28,
4844                        HNS_ROCE_EQC_SHIFT_M,
4845                        HNS_ROCE_EQC_SHIFT_S, eq->shift);
4846
4847         /* set eq MSI_IDX */
4848         roce_set_field(eqc->byte_28,
4849                        HNS_ROCE_EQC_MSI_INDX_M,
4850                        HNS_ROCE_EQC_MSI_INDX_S,
4851                        HNS_ROCE_EQ_INIT_MSI_IDX);
4852
4853         /* set cur_eqe_ba [27:12] */
4854         roce_set_field(eqc->byte_28,
4855                        HNS_ROCE_EQC_CUR_EQE_BA_L_M,
4856                        HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
4857
4858         /* set cur_eqe_ba [59:28] */
4859         roce_set_field(eqc->byte_32,
4860                        HNS_ROCE_EQC_CUR_EQE_BA_M_M,
4861                        HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
4862
4863         /* set cur_eqe_ba [63:60] */
4864         roce_set_field(eqc->byte_36,
4865                        HNS_ROCE_EQC_CUR_EQE_BA_H_M,
4866                        HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
4867
4868         /* set eq consumer idx */
4869         roce_set_field(eqc->byte_36,
4870                        HNS_ROCE_EQC_CONS_INDX_M,
4871                        HNS_ROCE_EQC_CONS_INDX_S,
4872                        HNS_ROCE_EQ_INIT_CONS_IDX);
4873
4874         /* set nex_eqe_ba[43:12] */
4875         roce_set_field(eqc->nxt_eqe_ba0,
4876                        HNS_ROCE_EQC_NXT_EQE_BA_L_M,
4877                        HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
4878
4879         /* set nex_eqe_ba[63:44] */
4880         roce_set_field(eqc->nxt_eqe_ba1,
4881                        HNS_ROCE_EQC_NXT_EQE_BA_H_M,
4882                        HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
4883 }
4884
4885 static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
4886                                   struct hns_roce_eq *eq)
4887 {
4888         struct device *dev = hr_dev->dev;
4889         int eq_alloc_done = 0;
4890         int eq_buf_cnt = 0;
4891         int eqe_alloc;
4892         u32 buf_chk_sz;
4893         u32 bt_chk_sz;
4894         u32 mhop_num;
4895         u64 size;
4896         u64 idx;
4897         int ba_num;
4898         int bt_num;
4899         int record_i;
4900         int record_j;
4901         int i = 0;
4902         int j = 0;
4903
4904         mhop_num = hr_dev->caps.eqe_hop_num;
4905         buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
4906         bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
4907
4908         ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1)
4909                   / buf_chk_sz;
4910         bt_num = (ba_num + bt_chk_sz / 8 - 1) / (bt_chk_sz / 8);
4911
4912         /* hop_num = 0 */
4913         if (mhop_num == HNS_ROCE_HOP_NUM_0) {
4914                 if (eq->entries > buf_chk_sz / eq->eqe_size) {
4915                         dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
4916                                 eq->entries);
4917                         return -EINVAL;
4918                 }
4919                 eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size,
4920                                                &(eq->l0_dma), GFP_KERNEL);
4921                 if (!eq->bt_l0)
4922                         return -ENOMEM;
4923
4924                 eq->cur_eqe_ba = eq->l0_dma;
4925                 eq->nxt_eqe_ba = 0;
4926
4927                 memset(eq->bt_l0, 0, eq->entries * eq->eqe_size);
4928
4929                 return 0;
4930         }
4931
4932         eq->buf_dma = kcalloc(ba_num, sizeof(*eq->buf_dma), GFP_KERNEL);
4933         if (!eq->buf_dma)
4934                 return -ENOMEM;
4935         eq->buf = kcalloc(ba_num, sizeof(*eq->buf), GFP_KERNEL);
4936         if (!eq->buf)
4937                 goto err_kcalloc_buf;
4938
4939         if (mhop_num == 2) {
4940                 eq->l1_dma = kcalloc(bt_num, sizeof(*eq->l1_dma), GFP_KERNEL);
4941                 if (!eq->l1_dma)
4942                         goto err_kcalloc_l1_dma;
4943
4944                 eq->bt_l1 = kcalloc(bt_num, sizeof(*eq->bt_l1), GFP_KERNEL);
4945                 if (!eq->bt_l1)
4946                         goto err_kcalloc_bt_l1;
4947         }
4948
4949         /* alloc L0 BT */
4950         eq->bt_l0 = dma_alloc_coherent(dev, bt_chk_sz, &eq->l0_dma, GFP_KERNEL);
4951         if (!eq->bt_l0)
4952                 goto err_dma_alloc_l0;
4953
4954         if (mhop_num == 1) {
4955                 if (ba_num > (bt_chk_sz / 8))
4956                         dev_err(dev, "ba_num %d is too large for 1 hop\n",
4957                                 ba_num);
4958
4959                 /* alloc buf */
4960                 for (i = 0; i < bt_chk_sz / 8; i++) {
4961                         if (eq_buf_cnt + 1 < ba_num) {
4962                                 size = buf_chk_sz;
4963                         } else {
4964                                 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
4965                                 size = (eq->entries - eqe_alloc) * eq->eqe_size;
4966                         }
4967                         eq->buf[i] = dma_alloc_coherent(dev, size,
4968                                                         &(eq->buf_dma[i]),
4969                                                         GFP_KERNEL);
4970                         if (!eq->buf[i])
4971                                 goto err_dma_alloc_buf;
4972
4973                         memset(eq->buf[i], 0, size);
4974                         *(eq->bt_l0 + i) = eq->buf_dma[i];
4975
4976                         eq_buf_cnt++;
4977                         if (eq_buf_cnt >= ba_num)
4978                                 break;
4979                 }
4980                 eq->cur_eqe_ba = eq->buf_dma[0];
4981                 eq->nxt_eqe_ba = eq->buf_dma[1];
4982
4983         } else if (mhop_num == 2) {
4984                 /* alloc L1 BT and buf */
4985                 for (i = 0; i < bt_chk_sz / 8; i++) {
4986                         eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz,
4987                                                           &(eq->l1_dma[i]),
4988                                                           GFP_KERNEL);
4989                         if (!eq->bt_l1[i])
4990                                 goto err_dma_alloc_l1;
4991                         *(eq->bt_l0 + i) = eq->l1_dma[i];
4992
4993                         for (j = 0; j < bt_chk_sz / 8; j++) {
4994                                 idx = i * bt_chk_sz / 8 + j;
4995                                 if (eq_buf_cnt + 1 < ba_num) {
4996                                         size = buf_chk_sz;
4997                                 } else {
4998                                         eqe_alloc = (buf_chk_sz / eq->eqe_size)
4999                                                     * idx;
5000                                         size = (eq->entries - eqe_alloc)
5001                                                 * eq->eqe_size;
5002                                 }
5003                                 eq->buf[idx] = dma_alloc_coherent(dev, size,
5004                                                             &(eq->buf_dma[idx]),
5005                                                             GFP_KERNEL);
5006                                 if (!eq->buf[idx])
5007                                         goto err_dma_alloc_buf;
5008
5009                                 memset(eq->buf[idx], 0, size);
5010                                 *(eq->bt_l1[i] + j) = eq->buf_dma[idx];
5011
5012                                 eq_buf_cnt++;
5013                                 if (eq_buf_cnt >= ba_num) {
5014                                         eq_alloc_done = 1;
5015                                         break;
5016                                 }
5017                         }
5018
5019                         if (eq_alloc_done)
5020                                 break;
5021                 }
5022                 eq->cur_eqe_ba = eq->buf_dma[0];
5023                 eq->nxt_eqe_ba = eq->buf_dma[1];
5024         }
5025
5026         eq->l0_last_num = i + 1;
5027         if (mhop_num == 2)
5028                 eq->l1_last_num = j + 1;
5029
5030         return 0;
5031
5032 err_dma_alloc_l1:
5033         dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
5034         eq->bt_l0 = NULL;
5035         eq->l0_dma = 0;
5036         for (i -= 1; i >= 0; i--) {
5037                 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
5038                                   eq->l1_dma[i]);
5039
5040                 for (j = 0; j < bt_chk_sz / 8; j++) {
5041                         idx = i * bt_chk_sz / 8 + j;
5042                         dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
5043                                           eq->buf_dma[idx]);
5044                 }
5045         }
5046         goto err_dma_alloc_l0;
5047
5048 err_dma_alloc_buf:
5049         dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
5050         eq->bt_l0 = NULL;
5051         eq->l0_dma = 0;
5052
5053         if (mhop_num == 1)
5054                 for (i -= 1; i >= 0; i--)
5055                         dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
5056                                           eq->buf_dma[i]);
5057         else if (mhop_num == 2) {
5058                 record_i = i;
5059                 record_j = j;
5060                 for (; i >= 0; i--) {
5061                         dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
5062                                           eq->l1_dma[i]);
5063
5064                         for (j = 0; j < bt_chk_sz / 8; j++) {
5065                                 if (i == record_i && j >= record_j)
5066                                         break;
5067
5068                                 idx = i * bt_chk_sz / 8 + j;
5069                                 dma_free_coherent(dev, buf_chk_sz,
5070                                                   eq->buf[idx],
5071                                                   eq->buf_dma[idx]);
5072                         }
5073                 }
5074         }
5075
5076 err_dma_alloc_l0:
5077         kfree(eq->bt_l1);
5078         eq->bt_l1 = NULL;
5079
5080 err_kcalloc_bt_l1:
5081         kfree(eq->l1_dma);
5082         eq->l1_dma = NULL;
5083
5084 err_kcalloc_l1_dma:
5085         kfree(eq->buf);
5086         eq->buf = NULL;
5087
5088 err_kcalloc_buf:
5089         kfree(eq->buf_dma);
5090         eq->buf_dma = NULL;
5091
5092         return -ENOMEM;
5093 }
5094
5095 static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
5096                                  struct hns_roce_eq *eq,
5097                                  unsigned int eq_cmd)
5098 {
5099         struct device *dev = hr_dev->dev;
5100         struct hns_roce_cmd_mailbox *mailbox;
5101         u32 buf_chk_sz = 0;
5102         int ret;
5103
5104         /* Allocate mailbox memory */
5105         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5106         if (IS_ERR(mailbox))
5107                 return PTR_ERR(mailbox);
5108
5109         if (!hr_dev->caps.eqe_hop_num) {
5110                 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
5111
5112                 eq->buf_list = kzalloc(sizeof(struct hns_roce_buf_list),
5113                                        GFP_KERNEL);
5114                 if (!eq->buf_list) {
5115                         ret = -ENOMEM;
5116                         goto free_cmd_mbox;
5117                 }
5118
5119                 eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
5120                                                        &(eq->buf_list->map),
5121                                                        GFP_KERNEL);
5122                 if (!eq->buf_list->buf) {
5123                         ret = -ENOMEM;
5124                         goto err_alloc_buf;
5125                 }
5126
5127                 memset(eq->buf_list->buf, 0, buf_chk_sz);
5128         } else {
5129                 ret = hns_roce_mhop_alloc_eq(hr_dev, eq);
5130                 if (ret) {
5131                         ret = -ENOMEM;
5132                         goto free_cmd_mbox;
5133                 }
5134         }
5135
5136         hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
5137
5138         ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
5139                                 eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
5140         if (ret) {
5141                 dev_err(dev, "[mailbox cmd] create eqc failed.\n");
5142                 goto err_cmd_mbox;
5143         }
5144
5145         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5146
5147         return 0;
5148
5149 err_cmd_mbox:
5150         if (!hr_dev->caps.eqe_hop_num)
5151                 dma_free_coherent(dev, buf_chk_sz, eq->buf_list->buf,
5152                                   eq->buf_list->map);
5153         else {
5154                 hns_roce_mhop_free_eq(hr_dev, eq);
5155                 goto free_cmd_mbox;
5156         }
5157
5158 err_alloc_buf:
5159         kfree(eq->buf_list);
5160
5161 free_cmd_mbox:
5162         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5163
5164         return ret;
5165 }
5166
5167 static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
5168 {
5169         struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5170         struct device *dev = hr_dev->dev;
5171         struct hns_roce_eq *eq;
5172         unsigned int eq_cmd;
5173         int irq_num;
5174         int eq_num;
5175         int other_num;
5176         int comp_num;
5177         int aeq_num;
5178         int i, j, k;
5179         int ret;
5180
5181         other_num = hr_dev->caps.num_other_vectors;
5182         comp_num = hr_dev->caps.num_comp_vectors;
5183         aeq_num = hr_dev->caps.num_aeq_vectors;
5184
5185         eq_num = comp_num + aeq_num;
5186         irq_num = eq_num + other_num;
5187
5188         eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
5189         if (!eq_table->eq)
5190                 return -ENOMEM;
5191
5192         for (i = 0; i < irq_num; i++) {
5193                 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
5194                                                GFP_KERNEL);
5195                 if (!hr_dev->irq_names[i]) {
5196                         ret = -ENOMEM;
5197                         goto err_failed_kzalloc;
5198                 }
5199         }
5200
5201         /* create eq */
5202         for (j = 0; j < eq_num; j++) {
5203                 eq = &eq_table->eq[j];
5204                 eq->hr_dev = hr_dev;
5205                 eq->eqn = j;
5206                 if (j < comp_num) {
5207                         /* CEQ */
5208                         eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
5209                         eq->type_flag = HNS_ROCE_CEQ;
5210                         eq->entries = hr_dev->caps.ceqe_depth;
5211                         eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
5212                         eq->irq = hr_dev->irq[j + other_num + aeq_num];
5213                         eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
5214                         eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
5215                 } else {
5216                         /* AEQ */
5217                         eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
5218                         eq->type_flag = HNS_ROCE_AEQ;
5219                         eq->entries = hr_dev->caps.aeqe_depth;
5220                         eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
5221                         eq->irq = hr_dev->irq[j - comp_num + other_num];
5222                         eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
5223                         eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
5224                 }
5225
5226                 ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
5227                 if (ret) {
5228                         dev_err(dev, "eq create failed.\n");
5229                         goto err_create_eq_fail;
5230                 }
5231         }
5232
5233         /* enable irq */
5234         hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
5235
5236         /* irq contains: abnormal + AEQ + CEQ*/
5237         for (k = 0; k < irq_num; k++)
5238                 if (k < other_num)
5239                         snprintf((char *)hr_dev->irq_names[k],
5240                                  HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", k);
5241                 else if (k < (other_num + aeq_num))
5242                         snprintf((char *)hr_dev->irq_names[k],
5243                                  HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
5244                                  k - other_num);
5245                 else
5246                         snprintf((char *)hr_dev->irq_names[k],
5247                                  HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
5248                                  k - other_num - aeq_num);
5249
5250         for (k = 0; k < irq_num; k++) {
5251                 if (k < other_num)
5252                         ret = request_irq(hr_dev->irq[k],
5253                                           hns_roce_v2_msix_interrupt_abn,
5254                                           0, hr_dev->irq_names[k], hr_dev);
5255
5256                 else if (k < (other_num + comp_num))
5257                         ret = request_irq(eq_table->eq[k - other_num].irq,
5258                                           hns_roce_v2_msix_interrupt_eq,
5259                                           0, hr_dev->irq_names[k + aeq_num],
5260                                           &eq_table->eq[k - other_num]);
5261                 else
5262                         ret = request_irq(eq_table->eq[k - other_num].irq,
5263                                           hns_roce_v2_msix_interrupt_eq,
5264                                           0, hr_dev->irq_names[k - comp_num],
5265                                           &eq_table->eq[k - other_num]);
5266                 if (ret) {
5267                         dev_err(dev, "Request irq error!\n");
5268                         goto err_request_irq_fail;
5269                 }
5270         }
5271
5272         hr_dev->irq_workq =
5273                 create_singlethread_workqueue("hns_roce_irq_workqueue");
5274         if (!hr_dev->irq_workq) {
5275                 dev_err(dev, "Create irq workqueue failed!\n");
5276                 ret = -ENOMEM;
5277                 goto err_request_irq_fail;
5278         }
5279
5280         return 0;
5281
5282 err_request_irq_fail:
5283         for (k -= 1; k >= 0; k--)
5284                 if (k < other_num)
5285                         free_irq(hr_dev->irq[k], hr_dev);
5286                 else
5287                         free_irq(eq_table->eq[k - other_num].irq,
5288                                  &eq_table->eq[k - other_num]);
5289
5290 err_create_eq_fail:
5291         for (j -= 1; j >= 0; j--)
5292                 hns_roce_v2_free_eq(hr_dev, &eq_table->eq[j]);
5293
5294 err_failed_kzalloc:
5295         for (i -= 1; i >= 0; i--)
5296                 kfree(hr_dev->irq_names[i]);
5297         kfree(eq_table->eq);
5298
5299         return ret;
5300 }
5301
5302 static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
5303 {
5304         struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5305         int irq_num;
5306         int eq_num;
5307         int i;
5308
5309         eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
5310         irq_num = eq_num + hr_dev->caps.num_other_vectors;
5311
5312         /* Disable irq */
5313         hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
5314
5315         for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
5316                 free_irq(hr_dev->irq[i], hr_dev);
5317
5318         for (i = 0; i < eq_num; i++) {
5319                 hns_roce_v2_destroy_eqc(hr_dev, i);
5320
5321                 free_irq(eq_table->eq[i].irq, &eq_table->eq[i]);
5322
5323                 hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
5324         }
5325
5326         for (i = 0; i < irq_num; i++)
5327                 kfree(hr_dev->irq_names[i]);
5328
5329         kfree(eq_table->eq);
5330
5331         flush_workqueue(hr_dev->irq_workq);
5332         destroy_workqueue(hr_dev->irq_workq);
5333 }
5334
5335 static const struct hns_roce_hw hns_roce_hw_v2 = {
5336         .cmq_init = hns_roce_v2_cmq_init,
5337         .cmq_exit = hns_roce_v2_cmq_exit,
5338         .hw_profile = hns_roce_v2_profile,
5339         .hw_init = hns_roce_v2_init,
5340         .hw_exit = hns_roce_v2_exit,
5341         .post_mbox = hns_roce_v2_post_mbox,
5342         .chk_mbox = hns_roce_v2_chk_mbox,
5343         .set_gid = hns_roce_v2_set_gid,
5344         .set_mac = hns_roce_v2_set_mac,
5345         .write_mtpt = hns_roce_v2_write_mtpt,
5346         .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
5347         .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
5348         .mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
5349         .write_cqc = hns_roce_v2_write_cqc,
5350         .set_hem = hns_roce_v2_set_hem,
5351         .clear_hem = hns_roce_v2_clear_hem,
5352         .modify_qp = hns_roce_v2_modify_qp,
5353         .query_qp = hns_roce_v2_query_qp,
5354         .destroy_qp = hns_roce_v2_destroy_qp,
5355         .modify_cq = hns_roce_v2_modify_cq,
5356         .post_send = hns_roce_v2_post_send,
5357         .post_recv = hns_roce_v2_post_recv,
5358         .req_notify_cq = hns_roce_v2_req_notify_cq,
5359         .poll_cq = hns_roce_v2_poll_cq,
5360         .init_eq = hns_roce_v2_init_eq_table,
5361         .cleanup_eq = hns_roce_v2_cleanup_eq_table,
5362 };
5363
5364 static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
5365         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
5366         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
5367         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
5368         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
5369         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
5370         /* required last entry */
5371         {0, }
5372 };
5373
5374 MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
5375
5376 static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
5377                                   struct hnae3_handle *handle)
5378 {
5379         const struct pci_device_id *id;
5380         int i;
5381
5382         id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
5383         if (!id) {
5384                 dev_err(hr_dev->dev, "device is not compatible!\n");
5385                 return -ENXIO;
5386         }
5387
5388         hr_dev->hw = &hns_roce_hw_v2;
5389         hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
5390         hr_dev->odb_offset = hr_dev->sdb_offset;
5391
5392         /* Get info from NIC driver. */
5393         hr_dev->reg_base = handle->rinfo.roce_io_base;
5394         hr_dev->caps.num_ports = 1;
5395         hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
5396         hr_dev->iboe.phy_port[0] = 0;
5397
5398         addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
5399                             hr_dev->iboe.netdevs[0]->dev_addr);
5400
5401         for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
5402                 hr_dev->irq[i] = pci_irq_vector(handle->pdev,
5403                                                 i + handle->rinfo.base_vector);
5404
5405         /* cmd issue mode: 0 is poll, 1 is event */
5406         hr_dev->cmd_mod = 1;
5407         hr_dev->loop_idc = 0;
5408
5409         return 0;
5410 }
5411
5412 static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
5413 {
5414         struct hns_roce_dev *hr_dev;
5415         int ret;
5416
5417         hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
5418         if (!hr_dev)
5419                 return -ENOMEM;
5420
5421         hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
5422         if (!hr_dev->priv) {
5423                 ret = -ENOMEM;
5424                 goto error_failed_kzalloc;
5425         }
5426
5427         hr_dev->pci_dev = handle->pdev;
5428         hr_dev->dev = &handle->pdev->dev;
5429         handle->priv = hr_dev;
5430
5431         ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
5432         if (ret) {
5433                 dev_err(hr_dev->dev, "Get Configuration failed!\n");
5434                 goto error_failed_get_cfg;
5435         }
5436
5437         ret = hns_roce_init(hr_dev);
5438         if (ret) {
5439                 dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
5440                 goto error_failed_get_cfg;
5441         }
5442
5443         return 0;
5444
5445 error_failed_get_cfg:
5446         kfree(hr_dev->priv);
5447
5448 error_failed_kzalloc:
5449         ib_dealloc_device(&hr_dev->ib_dev);
5450
5451         return ret;
5452 }
5453
5454 static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
5455                                            bool reset)
5456 {
5457         struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
5458
5459         if (!hr_dev)
5460                 return;
5461
5462         hns_roce_exit(hr_dev);
5463         kfree(hr_dev->priv);
5464         ib_dealloc_device(&hr_dev->ib_dev);
5465 }
5466
5467 static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
5468 {
5469         struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
5470         struct ib_event event;
5471
5472         if (!hr_dev) {
5473                 dev_err(&handle->pdev->dev,
5474                         "Input parameter handle->priv is NULL!\n");
5475                 return -EINVAL;
5476         }
5477
5478         hr_dev->active = false;
5479         hr_dev->is_reset = true;
5480
5481         event.event = IB_EVENT_DEVICE_FATAL;
5482         event.device = &hr_dev->ib_dev;
5483         event.element.port_num = 1;
5484         ib_dispatch_event(&event);
5485
5486         return 0;
5487 }
5488
5489 static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
5490 {
5491         int ret;
5492
5493         ret = hns_roce_hw_v2_init_instance(handle);
5494         if (ret) {
5495                 /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
5496                  * callback function, RoCE Engine reinitialize. If RoCE reinit
5497                  * failed, we should inform NIC driver.
5498                  */
5499                 handle->priv = NULL;
5500                 dev_err(&handle->pdev->dev,
5501                         "In reset process RoCE reinit failed %d.\n", ret);
5502         }
5503
5504         return ret;
5505 }
5506
5507 static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
5508 {
5509         msleep(100);
5510         hns_roce_hw_v2_uninit_instance(handle, false);
5511         return 0;
5512 }
5513
5514 static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
5515                                        enum hnae3_reset_notify_type type)
5516 {
5517         int ret = 0;
5518
5519         switch (type) {
5520         case HNAE3_DOWN_CLIENT:
5521                 ret = hns_roce_hw_v2_reset_notify_down(handle);
5522                 break;
5523         case HNAE3_INIT_CLIENT:
5524                 ret = hns_roce_hw_v2_reset_notify_init(handle);
5525                 break;
5526         case HNAE3_UNINIT_CLIENT:
5527                 ret = hns_roce_hw_v2_reset_notify_uninit(handle);
5528                 break;
5529         default:
5530                 break;
5531         }
5532
5533         return ret;
5534 }
5535
5536 static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
5537         .init_instance = hns_roce_hw_v2_init_instance,
5538         .uninit_instance = hns_roce_hw_v2_uninit_instance,
5539         .reset_notify = hns_roce_hw_v2_reset_notify,
5540 };
5541
5542 static struct hnae3_client hns_roce_hw_v2_client = {
5543         .name = "hns_roce_hw_v2",
5544         .type = HNAE3_CLIENT_ROCE,
5545         .ops = &hns_roce_hw_v2_ops,
5546 };
5547
5548 static int __init hns_roce_hw_v2_init(void)
5549 {
5550         return hnae3_register_client(&hns_roce_hw_v2_client);
5551 }
5552
5553 static void __exit hns_roce_hw_v2_exit(void)
5554 {
5555         hnae3_unregister_client(&hns_roce_hw_v2_client);
5556 }
5557
5558 module_init(hns_roce_hw_v2_init);
5559 module_exit(hns_roce_hw_v2_exit);
5560
5561 MODULE_LICENSE("Dual BSD/GPL");
5562 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
5563 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
5564 MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
5565 MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");