]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/infiniband/hw/hns/hns_roce_device.h
Merge tag 'nfsd-5.6' of git://linux-nfs.org/~bfields/linux
[linux.git] / drivers / infiniband / hw / hns / hns_roce_device.h
1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #ifndef _HNS_ROCE_DEVICE_H
34 #define _HNS_ROCE_DEVICE_H
35
36 #include <rdma/ib_verbs.h>
37
38 #define DRV_NAME "hns_roce"
39
40 /* hip08 is a pci device, it includes two version according pci version id */
41 #define PCI_REVISION_ID_HIP08_A                 0x20
42 #define PCI_REVISION_ID_HIP08_B                 0x21
43
44 #define HNS_ROCE_HW_VER1        ('h' << 24 | 'i' << 16 | '0' << 8 | '6')
45
46 #define HNS_ROCE_MAX_MSG_LEN                    0x80000000
47
48 #define HNS_ROCE_IB_MIN_SQ_STRIDE               6
49
50 #define HNS_ROCE_BA_SIZE                        (32 * 4096)
51
52 #define BA_BYTE_LEN                             8
53
54 /* Hardware specification only for v1 engine */
55 #define HNS_ROCE_MIN_CQE_NUM                    0x40
56 #define HNS_ROCE_MIN_WQE_NUM                    0x20
57
58 /* Hardware specification only for v1 engine */
59 #define HNS_ROCE_MAX_INNER_MTPT_NUM             0x7
60 #define HNS_ROCE_MAX_MTPT_PBL_NUM               0x100000
61 #define HNS_ROCE_MAX_SGE_NUM                    2
62
63 #define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS        20
64 #define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT   \
65         (5000 / HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS)
66 #define HNS_ROCE_CQE_WCMD_EMPTY_BIT             0x2
67 #define HNS_ROCE_MIN_CQE_CNT                    16
68
69 #define HNS_ROCE_MAX_IRQ_NUM                    128
70
71 #define HNS_ROCE_SGE_IN_WQE                     2
72 #define HNS_ROCE_SGE_SHIFT                      4
73
74 #define EQ_ENABLE                               1
75 #define EQ_DISABLE                              0
76
77 #define HNS_ROCE_CEQ                            0
78 #define HNS_ROCE_AEQ                            1
79
80 #define HNS_ROCE_CEQ_ENTRY_SIZE                 0x4
81 #define HNS_ROCE_AEQ_ENTRY_SIZE                 0x10
82
83 #define HNS_ROCE_SL_SHIFT                       28
84 #define HNS_ROCE_TCLASS_SHIFT                   20
85 #define HNS_ROCE_FLOW_LABEL_MASK                0xfffff
86
87 #define HNS_ROCE_MAX_PORTS                      6
88 #define HNS_ROCE_MAX_GID_NUM                    16
89 #define HNS_ROCE_GID_SIZE                       16
90 #define HNS_ROCE_SGE_SIZE                       16
91
92 #define HNS_ROCE_HOP_NUM_0                      0xff
93
94 #define BITMAP_NO_RR                            0
95 #define BITMAP_RR                               1
96
97 #define MR_TYPE_MR                              0x00
98 #define MR_TYPE_FRMR                            0x01
99 #define MR_TYPE_DMA                             0x03
100
101 #define HNS_ROCE_FRMR_MAX_PA                    512
102
103 #define PKEY_ID                                 0xffff
104 #define GUID_LEN                                8
105 #define NODE_DESC_SIZE                          64
106 #define DB_REG_OFFSET                           0x1000
107
108 /* Configure to HW for PAGE_SIZE larger than 4KB */
109 #define PG_SHIFT_OFFSET                         (PAGE_SHIFT - 12)
110
111 #define PAGES_SHIFT_8                           8
112 #define PAGES_SHIFT_16                          16
113 #define PAGES_SHIFT_24                          24
114 #define PAGES_SHIFT_32                          32
115
116 #define HNS_ROCE_PCI_BAR_NUM                    2
117
118 #define HNS_ROCE_IDX_QUE_ENTRY_SZ               4
119 #define SRQ_DB_REG                              0x230
120
121 /* The chip implementation of the consumer index is calculated
122  * according to twice the actual EQ depth
123  */
124 #define EQ_DEPTH_COEFF                          2
125
126 enum {
127         SERV_TYPE_RC,
128         SERV_TYPE_UC,
129         SERV_TYPE_RD,
130         SERV_TYPE_UD,
131 };
132
133 enum {
134         HNS_ROCE_SUPPORT_RQ_RECORD_DB = 1 << 0,
135         HNS_ROCE_SUPPORT_SQ_RECORD_DB = 1 << 1,
136 };
137
138 enum {
139         HNS_ROCE_SUPPORT_CQ_RECORD_DB = 1 << 0,
140 };
141
142 enum hns_roce_qp_state {
143         HNS_ROCE_QP_STATE_RST,
144         HNS_ROCE_QP_STATE_INIT,
145         HNS_ROCE_QP_STATE_RTR,
146         HNS_ROCE_QP_STATE_RTS,
147         HNS_ROCE_QP_STATE_SQD,
148         HNS_ROCE_QP_STATE_ERR,
149         HNS_ROCE_QP_NUM_STATE,
150 };
151
152 enum hns_roce_event {
153         HNS_ROCE_EVENT_TYPE_PATH_MIG                  = 0x01,
154         HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED           = 0x02,
155         HNS_ROCE_EVENT_TYPE_COMM_EST                  = 0x03,
156         HNS_ROCE_EVENT_TYPE_SQ_DRAINED                = 0x04,
157         HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR            = 0x05,
158         HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR    = 0x06,
159         HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR     = 0x07,
160         HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH           = 0x08,
161         HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH        = 0x09,
162         HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR           = 0x0a,
163         HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR           = 0x0b,
164         HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW               = 0x0c,
165         HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID             = 0x0d,
166         HNS_ROCE_EVENT_TYPE_PORT_CHANGE               = 0x0f,
167         /* 0x10 and 0x11 is unused in currently application case */
168         HNS_ROCE_EVENT_TYPE_DB_OVERFLOW               = 0x12,
169         HNS_ROCE_EVENT_TYPE_MB                        = 0x13,
170         HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW              = 0x14,
171         HNS_ROCE_EVENT_TYPE_FLR                       = 0x15,
172 };
173
174 /* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */
175 enum {
176         HNS_ROCE_LWQCE_QPC_ERROR                = 1,
177         HNS_ROCE_LWQCE_MTU_ERROR                = 2,
178         HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR        = 3,
179         HNS_ROCE_LWQCE_WQE_ADDR_ERROR           = 4,
180         HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR       = 5,
181         HNS_ROCE_LWQCE_SL_ERROR                 = 6,
182         HNS_ROCE_LWQCE_PORT_ERROR               = 7,
183 };
184
185 /* Local Access Violation Work Queue Error,SUBTYPE 0x7 */
186 enum {
187         HNS_ROCE_LAVWQE_R_KEY_VIOLATION         = 1,
188         HNS_ROCE_LAVWQE_LENGTH_ERROR            = 2,
189         HNS_ROCE_LAVWQE_VA_ERROR                = 3,
190         HNS_ROCE_LAVWQE_PD_ERROR                = 4,
191         HNS_ROCE_LAVWQE_RW_ACC_ERROR            = 5,
192         HNS_ROCE_LAVWQE_KEY_STATE_ERROR         = 6,
193         HNS_ROCE_LAVWQE_MR_OPERATION_ERROR      = 7,
194 };
195
196 /* DOORBELL overflow subtype */
197 enum {
198         HNS_ROCE_DB_SUBTYPE_SDB_OVF             = 1,
199         HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF         = 2,
200         HNS_ROCE_DB_SUBTYPE_ODB_OVF             = 3,
201         HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF         = 4,
202         HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP         = 5,
203         HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP         = 6,
204 };
205
206 enum {
207         /* RQ&SRQ related operations */
208         HNS_ROCE_OPCODE_SEND_DATA_RECEIVE       = 0x06,
209         HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE   = 0x07,
210 };
211
212 enum {
213         HNS_ROCE_CAP_FLAG_REREG_MR              = BIT(0),
214         HNS_ROCE_CAP_FLAG_ROCE_V1_V2            = BIT(1),
215         HNS_ROCE_CAP_FLAG_RQ_INLINE             = BIT(2),
216         HNS_ROCE_CAP_FLAG_RECORD_DB             = BIT(3),
217         HNS_ROCE_CAP_FLAG_SQ_RECORD_DB          = BIT(4),
218         HNS_ROCE_CAP_FLAG_SRQ                   = BIT(5),
219         HNS_ROCE_CAP_FLAG_MW                    = BIT(7),
220         HNS_ROCE_CAP_FLAG_FRMR                  = BIT(8),
221         HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL          = BIT(9),
222         HNS_ROCE_CAP_FLAG_ATOMIC                = BIT(10),
223 };
224
225 enum hns_roce_mtt_type {
226         MTT_TYPE_WQE,
227         MTT_TYPE_CQE,
228         MTT_TYPE_SRQWQE,
229         MTT_TYPE_IDX
230 };
231
232 #define HNS_ROCE_DB_TYPE_COUNT                  2
233 #define HNS_ROCE_DB_UNIT_SIZE                   4
234
235 enum {
236         HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
237 };
238
239 enum hns_roce_reset_stage {
240         HNS_ROCE_STATE_NON_RST,
241         HNS_ROCE_STATE_RST_BEF_DOWN,
242         HNS_ROCE_STATE_RST_DOWN,
243         HNS_ROCE_STATE_RST_UNINIT,
244         HNS_ROCE_STATE_RST_INIT,
245         HNS_ROCE_STATE_RST_INITED,
246 };
247
248 enum hns_roce_instance_state {
249         HNS_ROCE_STATE_NON_INIT,
250         HNS_ROCE_STATE_INIT,
251         HNS_ROCE_STATE_INITED,
252         HNS_ROCE_STATE_UNINIT,
253 };
254
255 enum {
256         HNS_ROCE_RST_DIRECT_RETURN              = 0,
257 };
258
259 enum {
260         CMD_RST_PRC_OTHERS,
261         CMD_RST_PRC_SUCCESS,
262         CMD_RST_PRC_EBUSY,
263 };
264
265 #define HNS_ROCE_CMD_SUCCESS                    1
266
267 #define HNS_ROCE_PORT_DOWN                      0
268 #define HNS_ROCE_PORT_UP                        1
269
270 #define HNS_ROCE_MTT_ENTRY_PER_SEG              8
271
272 #define PAGE_ADDR_SHIFT                         12
273
274 struct hns_roce_uar {
275         u64             pfn;
276         unsigned long   index;
277         unsigned long   logic_idx;
278 };
279
280 struct hns_roce_ucontext {
281         struct ib_ucontext      ibucontext;
282         struct hns_roce_uar     uar;
283         struct list_head        page_list;
284         struct mutex            page_mutex;
285 };
286
287 struct hns_roce_pd {
288         struct ib_pd            ibpd;
289         unsigned long           pdn;
290 };
291
292 struct hns_roce_bitmap {
293         /* Bitmap Traversal last a bit which is 1 */
294         unsigned long           last;
295         unsigned long           top;
296         unsigned long           max;
297         unsigned long           reserved_top;
298         unsigned long           mask;
299         spinlock_t              lock;
300         unsigned long           *table;
301 };
302
303 /* Order bitmap length -- bit num compute formula: 1 << (max_order - order) */
304 /* Order = 0: bitmap is biggest, order = max bitmap is least (only a bit) */
305 /* Every bit repesent to a partner free/used status in bitmap */
306 /*
307  * Initial, bits of other bitmap are all 0 except that a bit of max_order is 1
308  * Bit = 1 represent to idle and available; bit = 0: not available
309  */
310 struct hns_roce_buddy {
311         /* Members point to every order level bitmap */
312         unsigned long **bits;
313         /* Represent to avail bits of the order level bitmap */
314         u32            *num_free;
315         int             max_order;
316         spinlock_t      lock;
317 };
318
319 /* For Hardware Entry Memory */
320 struct hns_roce_hem_table {
321         /* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */
322         u32             type;
323         /* HEM array elment num */
324         unsigned long   num_hem;
325         /* HEM entry record obj total num */
326         unsigned long   num_obj;
327         /* Single obj size */
328         unsigned long   obj_size;
329         unsigned long   table_chunk_size;
330         int             lowmem;
331         struct mutex    mutex;
332         struct hns_roce_hem **hem;
333         u64             **bt_l1;
334         dma_addr_t      *bt_l1_dma_addr;
335         u64             **bt_l0;
336         dma_addr_t      *bt_l0_dma_addr;
337 };
338
339 struct hns_roce_mtt {
340         unsigned long           first_seg;
341         int                     order;
342         int                     page_shift;
343         enum hns_roce_mtt_type  mtt_type;
344 };
345
346 struct hns_roce_buf_region {
347         int offset; /* page offset */
348         u32 count; /* page count */
349         int hopnum; /* addressing hop num */
350 };
351
352 #define HNS_ROCE_MAX_BT_REGION  3
353 #define HNS_ROCE_MAX_BT_LEVEL   3
354 struct hns_roce_hem_list {
355         struct list_head root_bt;
356         /* link all bt dma mem by hop config */
357         struct list_head mid_bt[HNS_ROCE_MAX_BT_REGION][HNS_ROCE_MAX_BT_LEVEL];
358         struct list_head btm_bt; /* link all bottom bt in @mid_bt */
359         dma_addr_t root_ba; /* pointer to the root ba table */
360         int bt_pg_shift;
361 };
362
363 /* memory translate region */
364 struct hns_roce_mtr {
365         struct hns_roce_hem_list hem_list;
366         int buf_pg_shift;
367 };
368
369 struct hns_roce_mw {
370         struct ib_mw            ibmw;
371         u32                     pdn;
372         u32                     rkey;
373         int                     enabled; /* MW's active status */
374         u32                     pbl_hop_num;
375         u32                     pbl_ba_pg_sz;
376         u32                     pbl_buf_pg_sz;
377 };
378
379 /* Only support 4K page size for mr register */
380 #define MR_SIZE_4K 0
381
382 struct hns_roce_mr {
383         struct ib_mr            ibmr;
384         struct ib_umem          *umem;
385         u64                     iova; /* MR's virtual orignal addr */
386         u64                     size; /* Address range of MR */
387         u32                     key; /* Key of MR */
388         u32                     pd;   /* PD num of MR */
389         u32                     access; /* Access permission of MR */
390         u32                     npages;
391         int                     enabled; /* MR's active status */
392         int                     type;   /* MR's register type */
393         u64                     *pbl_buf;       /* MR's PBL space */
394         dma_addr_t              pbl_dma_addr;   /* MR's PBL space PA */
395         u32                     pbl_size;       /* PA number in the PBL */
396         u64                     pbl_ba;         /* page table address */
397         u32                     l0_chunk_last_num;      /* L0 last number */
398         u32                     l1_chunk_last_num;      /* L1 last number */
399         u64                     **pbl_bt_l2;    /* PBL BT L2 */
400         u64                     **pbl_bt_l1;    /* PBL BT L1 */
401         u64                     *pbl_bt_l0;     /* PBL BT L0 */
402         dma_addr_t              *pbl_l2_dma_addr;       /* PBL BT L2 dma addr */
403         dma_addr_t              *pbl_l1_dma_addr;       /* PBL BT L1 dma addr */
404         dma_addr_t              pbl_l0_dma_addr;        /* PBL BT L0 dma addr */
405         u32                     pbl_ba_pg_sz;   /* BT chunk page size */
406         u32                     pbl_buf_pg_sz;  /* buf chunk page size */
407         u32                     pbl_hop_num;    /* multi-hop number */
408 };
409
410 struct hns_roce_mr_table {
411         struct hns_roce_bitmap          mtpt_bitmap;
412         struct hns_roce_buddy           mtt_buddy;
413         struct hns_roce_hem_table       mtt_table;
414         struct hns_roce_hem_table       mtpt_table;
415         struct hns_roce_buddy           mtt_cqe_buddy;
416         struct hns_roce_hem_table       mtt_cqe_table;
417         struct hns_roce_buddy           mtt_srqwqe_buddy;
418         struct hns_roce_hem_table       mtt_srqwqe_table;
419         struct hns_roce_buddy           mtt_idx_buddy;
420         struct hns_roce_hem_table       mtt_idx_table;
421 };
422
423 struct hns_roce_wq {
424         u64             *wrid;     /* Work request ID */
425         spinlock_t      lock;
426         u32             wqe_cnt;  /* WQE num */
427         int             max_gs;
428         int             offset;
429         int             wqe_shift;      /* WQE size */
430         u32             head;
431         u32             tail;
432         void __iomem    *db_reg_l;
433 };
434
435 struct hns_roce_sge {
436         int             sge_cnt;        /* SGE num */
437         int             offset;
438         int             sge_shift;      /* SGE size */
439 };
440
441 struct hns_roce_buf_list {
442         void            *buf;
443         dma_addr_t      map;
444 };
445
446 struct hns_roce_buf {
447         struct hns_roce_buf_list        direct;
448         struct hns_roce_buf_list        *page_list;
449         int                             nbufs;
450         u32                             npages;
451         u32                             size;
452         int                             page_shift;
453 };
454
455 struct hns_roce_db_pgdir {
456         struct list_head        list;
457         DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE);
458         DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
459         unsigned long           *bits[HNS_ROCE_DB_TYPE_COUNT];
460         u32                     *page;
461         dma_addr_t              db_dma;
462 };
463
464 struct hns_roce_user_db_page {
465         struct list_head        list;
466         struct ib_umem          *umem;
467         unsigned long           user_virt;
468         refcount_t              refcount;
469 };
470
471 struct hns_roce_db {
472         u32             *db_record;
473         union {
474                 struct hns_roce_db_pgdir *pgdir;
475                 struct hns_roce_user_db_page *user_page;
476         } u;
477         dma_addr_t      dma;
478         void            *virt_addr;
479         int             index;
480         int             order;
481 };
482
483 struct hns_roce_cq {
484         struct ib_cq                    ib_cq;
485         struct hns_roce_buf             buf;
486         struct hns_roce_mtt             mtt;
487         struct hns_roce_db              db;
488         u8                              db_en;
489         spinlock_t                      lock;
490         struct ib_umem                  *umem;
491         u32                             cq_depth;
492         u32                             cons_index;
493         u32                             *set_ci_db;
494         void __iomem                    *cq_db_l;
495         u16                             *tptr_addr;
496         int                             arm_sn;
497         unsigned long                   cqn;
498         u32                             vector;
499         atomic_t                        refcount;
500         struct completion               free;
501         struct list_head                sq_list; /* all qps on this send cq */
502         struct list_head                rq_list; /* all qps on this recv cq */
503         int                             is_armed; /* cq is armed */
504         struct list_head                node; /* all armed cqs are on a list */
505 };
506
507 struct hns_roce_idx_que {
508         struct hns_roce_buf             idx_buf;
509         int                             entry_sz;
510         u32                             buf_size;
511         struct ib_umem                  *umem;
512         struct hns_roce_mtt             mtt;
513         unsigned long                   *bitmap;
514 };
515
516 struct hns_roce_srq {
517         struct ib_srq           ibsrq;
518         unsigned long           srqn;
519         u32                     wqe_cnt;
520         int                     max_gs;
521         int                     wqe_shift;
522         void __iomem            *db_reg_l;
523
524         atomic_t                refcount;
525         struct completion       free;
526
527         struct hns_roce_buf     buf;
528         u64                    *wrid;
529         struct ib_umem         *umem;
530         struct hns_roce_mtt     mtt;
531         struct hns_roce_idx_que idx_que;
532         spinlock_t              lock;
533         int                     head;
534         int                     tail;
535         struct mutex            mutex;
536         void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
537 };
538
539 struct hns_roce_uar_table {
540         struct hns_roce_bitmap bitmap;
541 };
542
543 struct hns_roce_qp_table {
544         struct hns_roce_bitmap          bitmap;
545         struct hns_roce_hem_table       qp_table;
546         struct hns_roce_hem_table       irrl_table;
547         struct hns_roce_hem_table       trrl_table;
548         struct hns_roce_hem_table       sccc_table;
549         struct mutex                    scc_mutex;
550 };
551
552 struct hns_roce_cq_table {
553         struct hns_roce_bitmap          bitmap;
554         struct xarray                   array;
555         struct hns_roce_hem_table       table;
556 };
557
558 struct hns_roce_srq_table {
559         struct hns_roce_bitmap          bitmap;
560         struct xarray                   xa;
561         struct hns_roce_hem_table       table;
562 };
563
564 struct hns_roce_raq_table {
565         struct hns_roce_buf_list        *e_raq_buf;
566 };
567
568 struct hns_roce_av {
569         u8          port;
570         u8          gid_index;
571         u8          stat_rate;
572         u8          hop_limit;
573         u32         flowlabel;
574         u8          sl;
575         u8          tclass;
576         u8          dgid[HNS_ROCE_GID_SIZE];
577         u8          mac[ETH_ALEN];
578         u16         vlan_id;
579         bool        vlan_en;
580 };
581
582 struct hns_roce_ah {
583         struct ib_ah            ibah;
584         struct hns_roce_av      av;
585 };
586
587 struct hns_roce_cmd_context {
588         struct completion       done;
589         int                     result;
590         int                     next;
591         u64                     out_param;
592         u16                     token;
593 };
594
595 struct hns_roce_cmdq {
596         struct dma_pool         *pool;
597         struct mutex            hcr_mutex;
598         struct semaphore        poll_sem;
599         /*
600          * Event mode: cmd register mutex protection,
601          * ensure to not exceed max_cmds and user use limit region
602          */
603         struct semaphore        event_sem;
604         int                     max_cmds;
605         spinlock_t              context_lock;
606         int                     free_head;
607         struct hns_roce_cmd_context *context;
608         /*
609          * Result of get integer part
610          * which max_comds compute according a power of 2
611          */
612         u16                     token_mask;
613         /*
614          * Process whether use event mode, init default non-zero
615          * After the event queue of cmd event ready,
616          * can switch into event mode
617          * close device, switch into poll mode(non event mode)
618          */
619         u8                      use_events;
620 };
621
622 struct hns_roce_cmd_mailbox {
623         void                   *buf;
624         dma_addr_t              dma;
625 };
626
627 struct hns_roce_dev;
628
629 struct hns_roce_rinl_sge {
630         void                    *addr;
631         u32                     len;
632 };
633
634 struct hns_roce_rinl_wqe {
635         struct hns_roce_rinl_sge *sg_list;
636         u32                      sge_cnt;
637 };
638
639 struct hns_roce_rinl_buf {
640         struct hns_roce_rinl_wqe *wqe_list;
641         u32                      wqe_cnt;
642 };
643
644 struct hns_roce_qp {
645         struct ib_qp            ibqp;
646         struct hns_roce_buf     hr_buf;
647         struct hns_roce_wq      rq;
648         struct hns_roce_db      rdb;
649         struct hns_roce_db      sdb;
650         u8                      rdb_en;
651         u8                      sdb_en;
652         u32                     doorbell_qpn;
653         u32                     sq_signal_bits;
654         struct hns_roce_wq      sq;
655
656         struct ib_umem          *umem;
657         struct hns_roce_mtt     mtt;
658         struct hns_roce_mtr     mtr;
659
660         /* this define must less than HNS_ROCE_MAX_BT_REGION */
661 #define HNS_ROCE_WQE_REGION_MAX  3
662         struct hns_roce_buf_region regions[HNS_ROCE_WQE_REGION_MAX];
663         int                     region_cnt;
664         int                     wqe_bt_pg_shift;
665
666         u32                     buff_size;
667         struct mutex            mutex;
668         u8                      port;
669         u8                      phy_port;
670         u8                      sl;
671         u8                      resp_depth;
672         u8                      state;
673         u32                     access_flags;
674         u32                     atomic_rd_en;
675         u32                     pkey_index;
676         u32                     qkey;
677         void                    (*event)(struct hns_roce_qp *qp,
678                                          enum hns_roce_event event_type);
679         unsigned long           qpn;
680
681         atomic_t                refcount;
682         struct completion       free;
683
684         struct hns_roce_sge     sge;
685         u32                     next_sge;
686
687         struct hns_roce_rinl_buf rq_inl_buf;
688         struct list_head        node;           /* all qps are on a list */
689         struct list_head        rq_node;        /* all recv qps are on a list */
690         struct list_head        sq_node;        /* all send qps are on a list */
691 };
692
693 struct hns_roce_ib_iboe {
694         spinlock_t              lock;
695         struct net_device      *netdevs[HNS_ROCE_MAX_PORTS];
696         struct notifier_block   nb;
697         u8                      phy_port[HNS_ROCE_MAX_PORTS];
698 };
699
700 enum {
701         HNS_ROCE_EQ_STAT_INVALID  = 0,
702         HNS_ROCE_EQ_STAT_VALID    = 2,
703 };
704
705 struct hns_roce_ceqe {
706         __le32                  comp;
707 };
708
709 struct hns_roce_aeqe {
710         __le32 asyn;
711         union {
712                 struct {
713                         __le32 qp;
714                         u32 rsv0;
715                         u32 rsv1;
716                 } qp_event;
717
718                 struct {
719                         __le32 srq;
720                         u32 rsv0;
721                         u32 rsv1;
722                 } srq_event;
723
724                 struct {
725                         __le32 cq;
726                         u32 rsv0;
727                         u32 rsv1;
728                 } cq_event;
729
730                 struct {
731                         __le32 ceqe;
732                         u32 rsv0;
733                         u32 rsv1;
734                 } ce_event;
735
736                 struct {
737                         __le64  out_param;
738                         __le16  token;
739                         u8      status;
740                         u8      rsv0;
741                 } __packed cmd;
742          } event;
743 };
744
745 struct hns_roce_eq {
746         struct hns_roce_dev             *hr_dev;
747         void __iomem                    *doorbell;
748
749         int                             type_flag; /* Aeq:1 ceq:0 */
750         int                             eqn;
751         u32                             entries;
752         int                             log_entries;
753         int                             eqe_size;
754         int                             irq;
755         int                             log_page_size;
756         int                             cons_index;
757         struct hns_roce_buf_list        *buf_list;
758         int                             over_ignore;
759         int                             coalesce;
760         int                             arm_st;
761         u64                             eqe_ba;
762         int                             eqe_ba_pg_sz;
763         int                             eqe_buf_pg_sz;
764         int                             hop_num;
765         u64                             *bt_l0; /* Base address table for L0 */
766         u64                             **bt_l1; /* Base address table for L1 */
767         u64                             **buf;
768         dma_addr_t                      l0_dma;
769         dma_addr_t                      *l1_dma;
770         dma_addr_t                      *buf_dma;
771         u32                             l0_last_num; /* L0 last chunk num */
772         u32                             l1_last_num; /* L1 last chunk num */
773         int                             eq_max_cnt;
774         int                             eq_period;
775         int                             shift;
776         dma_addr_t                      cur_eqe_ba;
777         dma_addr_t                      nxt_eqe_ba;
778         int                             event_type;
779         int                             sub_type;
780 };
781
782 struct hns_roce_eq_table {
783         struct hns_roce_eq      *eq;
784         void __iomem            **eqc_base; /* only for hw v1 */
785 };
786
787 struct hns_roce_caps {
788         u64             fw_ver;
789         u8              num_ports;
790         int             gid_table_len[HNS_ROCE_MAX_PORTS];
791         int             pkey_table_len[HNS_ROCE_MAX_PORTS];
792         int             local_ca_ack_delay;
793         int             num_uars;
794         u32             phy_num_uars;
795         u32             max_sq_sg;
796         u32             max_sq_inline;
797         u32             max_rq_sg;
798         u32             max_extend_sg;
799         int             num_qps;
800         int             reserved_qps;
801         int             num_qpc_timer;
802         int             num_cqc_timer;
803         int             num_srqs;
804         u32             max_wqes;
805         u32             max_srq_wrs;
806         u32             max_srq_sges;
807         u32             max_sq_desc_sz;
808         u32             max_rq_desc_sz;
809         u32             max_srq_desc_sz;
810         int             max_qp_init_rdma;
811         int             max_qp_dest_rdma;
812         int             num_cqs;
813         u32             max_cqes;
814         u32             min_cqes;
815         u32             min_wqes;
816         int             reserved_cqs;
817         int             reserved_srqs;
818         int             num_aeq_vectors;
819         int             num_comp_vectors;
820         int             num_other_vectors;
821         int             num_mtpts;
822         u32             num_mtt_segs;
823         u32             num_cqe_segs;
824         u32             num_srqwqe_segs;
825         u32             num_idx_segs;
826         int             reserved_mrws;
827         int             reserved_uars;
828         int             num_pds;
829         int             reserved_pds;
830         u32             mtt_entry_sz;
831         u32             cq_entry_sz;
832         u32             page_size_cap;
833         u32             reserved_lkey;
834         int             mtpt_entry_sz;
835         int             qpc_entry_sz;
836         int             irrl_entry_sz;
837         int             trrl_entry_sz;
838         int             cqc_entry_sz;
839         int             sccc_entry_sz;
840         int             qpc_timer_entry_sz;
841         int             cqc_timer_entry_sz;
842         int             srqc_entry_sz;
843         int             idx_entry_sz;
844         u32             pbl_ba_pg_sz;
845         u32             pbl_buf_pg_sz;
846         u32             pbl_hop_num;
847         int             aeqe_depth;
848         int             ceqe_depth;
849         enum ib_mtu     max_mtu;
850         u32             qpc_bt_num;
851         u32             qpc_timer_bt_num;
852         u32             srqc_bt_num;
853         u32             cqc_bt_num;
854         u32             cqc_timer_bt_num;
855         u32             mpt_bt_num;
856         u32             sccc_bt_num;
857         u32             qpc_ba_pg_sz;
858         u32             qpc_buf_pg_sz;
859         u32             qpc_hop_num;
860         u32             srqc_ba_pg_sz;
861         u32             srqc_buf_pg_sz;
862         u32             srqc_hop_num;
863         u32             cqc_ba_pg_sz;
864         u32             cqc_buf_pg_sz;
865         u32             cqc_hop_num;
866         u32             mpt_ba_pg_sz;
867         u32             mpt_buf_pg_sz;
868         u32             mpt_hop_num;
869         u32             mtt_ba_pg_sz;
870         u32             mtt_buf_pg_sz;
871         u32             mtt_hop_num;
872         u32             wqe_sq_hop_num;
873         u32             wqe_sge_hop_num;
874         u32             wqe_rq_hop_num;
875         u32             sccc_ba_pg_sz;
876         u32             sccc_buf_pg_sz;
877         u32             sccc_hop_num;
878         u32             qpc_timer_ba_pg_sz;
879         u32             qpc_timer_buf_pg_sz;
880         u32             qpc_timer_hop_num;
881         u32             cqc_timer_ba_pg_sz;
882         u32             cqc_timer_buf_pg_sz;
883         u32             cqc_timer_hop_num;
884         u32             cqe_ba_pg_sz;
885         u32             cqe_buf_pg_sz;
886         u32             cqe_hop_num;
887         u32             srqwqe_ba_pg_sz;
888         u32             srqwqe_buf_pg_sz;
889         u32             srqwqe_hop_num;
890         u32             idx_ba_pg_sz;
891         u32             idx_buf_pg_sz;
892         u32             idx_hop_num;
893         u32             eqe_ba_pg_sz;
894         u32             eqe_buf_pg_sz;
895         u32             eqe_hop_num;
896         u32             sl_num;
897         u32             tsq_buf_pg_sz;
898         u32             tpq_buf_pg_sz;
899         u32             chunk_sz;       /* chunk size in non multihop mode */
900         u64             flags;
901         u16             default_ceq_max_cnt;
902         u16             default_ceq_period;
903         u16             default_aeq_max_cnt;
904         u16             default_aeq_period;
905         u16             default_aeq_arm_st;
906         u16             default_ceq_arm_st;
907 };
908
909 struct hns_roce_work {
910         struct hns_roce_dev *hr_dev;
911         struct work_struct work;
912         u32 qpn;
913         u32 cqn;
914         int event_type;
915         int sub_type;
916 };
917
918 struct hns_roce_dfx_hw {
919         int (*query_cqc_info)(struct hns_roce_dev *hr_dev, u32 cqn,
920                               int *buffer);
921 };
922
923 enum hns_roce_device_state {
924         HNS_ROCE_DEVICE_STATE_INITED,
925         HNS_ROCE_DEVICE_STATE_RST_DOWN,
926         HNS_ROCE_DEVICE_STATE_UNINIT,
927 };
928
929 struct hns_roce_hw {
930         int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
931         int (*cmq_init)(struct hns_roce_dev *hr_dev);
932         void (*cmq_exit)(struct hns_roce_dev *hr_dev);
933         int (*hw_profile)(struct hns_roce_dev *hr_dev);
934         int (*hw_init)(struct hns_roce_dev *hr_dev);
935         void (*hw_exit)(struct hns_roce_dev *hr_dev);
936         int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param,
937                          u64 out_param, u32 in_modifier, u8 op_modifier, u16 op,
938                          u16 token, int event);
939         int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout);
940         int (*rst_prc_mbox)(struct hns_roce_dev *hr_dev);
941         int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
942                        const union ib_gid *gid, const struct ib_gid_attr *attr);
943         int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
944         void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
945                         enum ib_mtu mtu);
946         int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr,
947                           unsigned long mtpt_idx);
948         int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
949                                 struct hns_roce_mr *mr, int flags, u32 pdn,
950                                 int mr_access_flags, u64 iova, u64 size,
951                                 void *mb_buf);
952         int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr);
953         int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
954         void (*write_cqc)(struct hns_roce_dev *hr_dev,
955                           struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
956                           dma_addr_t dma_handle);
957         int (*set_hem)(struct hns_roce_dev *hr_dev,
958                        struct hns_roce_hem_table *table, int obj, int step_idx);
959         int (*clear_hem)(struct hns_roce_dev *hr_dev,
960                          struct hns_roce_hem_table *table, int obj,
961                          int step_idx);
962         int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
963                         int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
964         int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
965                          int attr_mask, enum ib_qp_state cur_state,
966                          enum ib_qp_state new_state);
967         int (*destroy_qp)(struct ib_qp *ibqp, struct ib_udata *udata);
968         int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
969                          struct hns_roce_qp *hr_qp);
970         int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr,
971                          const struct ib_send_wr **bad_wr);
972         int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
973                          const struct ib_recv_wr **bad_recv_wr);
974         int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
975         int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
976         int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
977                         struct ib_udata *udata);
978         void (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
979         int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
980         int (*init_eq)(struct hns_roce_dev *hr_dev);
981         void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
982         void (*write_srqc)(struct hns_roce_dev *hr_dev,
983                            struct hns_roce_srq *srq, u32 pdn, u16 xrcd, u32 cqn,
984                            void *mb_buf, u64 *mtts_wqe, u64 *mtts_idx,
985                            dma_addr_t dma_handle_wqe,
986                            dma_addr_t dma_handle_idx);
987         int (*modify_srq)(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
988                        enum ib_srq_attr_mask srq_attr_mask,
989                        struct ib_udata *udata);
990         int (*query_srq)(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
991         int (*post_srq_recv)(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
992                              const struct ib_recv_wr **bad_wr);
993         const struct ib_device_ops *hns_roce_dev_ops;
994         const struct ib_device_ops *hns_roce_dev_srq_ops;
995 };
996
997 struct hns_roce_dev {
998         struct ib_device        ib_dev;
999         struct platform_device  *pdev;
1000         struct pci_dev          *pci_dev;
1001         struct device           *dev;
1002         struct hns_roce_uar     priv_uar;
1003         const char              *irq_names[HNS_ROCE_MAX_IRQ_NUM];
1004         spinlock_t              sm_lock;
1005         spinlock_t              bt_cmd_lock;
1006         bool                    active;
1007         bool                    is_reset;
1008         bool                    dis_db;
1009         unsigned long           reset_cnt;
1010         struct hns_roce_ib_iboe iboe;
1011         enum hns_roce_device_state state;
1012         struct list_head        qp_list; /* list of all qps on this dev */
1013         spinlock_t              qp_list_lock; /* protect qp_list */
1014
1015         struct list_head        pgdir_list;
1016         struct mutex            pgdir_mutex;
1017         int                     irq[HNS_ROCE_MAX_IRQ_NUM];
1018         u8 __iomem              *reg_base;
1019         struct hns_roce_caps    caps;
1020         struct xarray           qp_table_xa;
1021
1022         unsigned char   dev_addr[HNS_ROCE_MAX_PORTS][ETH_ALEN];
1023         u64                     sys_image_guid;
1024         u32                     vendor_id;
1025         u32                     vendor_part_id;
1026         u32                     hw_rev;
1027         void __iomem            *priv_addr;
1028
1029         struct hns_roce_cmdq    cmd;
1030         struct hns_roce_bitmap    pd_bitmap;
1031         struct hns_roce_uar_table uar_table;
1032         struct hns_roce_mr_table  mr_table;
1033         struct hns_roce_cq_table  cq_table;
1034         struct hns_roce_srq_table srq_table;
1035         struct hns_roce_qp_table  qp_table;
1036         struct hns_roce_eq_table  eq_table;
1037         struct hns_roce_hem_table  qpc_timer_table;
1038         struct hns_roce_hem_table  cqc_timer_table;
1039
1040         int                     cmd_mod;
1041         int                     loop_idc;
1042         u32                     sdb_offset;
1043         u32                     odb_offset;
1044         dma_addr_t              tptr_dma_addr;  /* only for hw v1 */
1045         u32                     tptr_size;      /* only for hw v1 */
1046         const struct hns_roce_hw *hw;
1047         void                    *priv;
1048         struct workqueue_struct *irq_workq;
1049         const struct hns_roce_dfx_hw *dfx;
1050 };
1051
1052 static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
1053 {
1054         return container_of(ib_dev, struct hns_roce_dev, ib_dev);
1055 }
1056
1057 static inline struct hns_roce_ucontext
1058                         *to_hr_ucontext(struct ib_ucontext *ibucontext)
1059 {
1060         return container_of(ibucontext, struct hns_roce_ucontext, ibucontext);
1061 }
1062
1063 static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd)
1064 {
1065         return container_of(ibpd, struct hns_roce_pd, ibpd);
1066 }
1067
1068 static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah)
1069 {
1070         return container_of(ibah, struct hns_roce_ah, ibah);
1071 }
1072
1073 static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr)
1074 {
1075         return container_of(ibmr, struct hns_roce_mr, ibmr);
1076 }
1077
1078 static inline struct hns_roce_mw *to_hr_mw(struct ib_mw *ibmw)
1079 {
1080         return container_of(ibmw, struct hns_roce_mw, ibmw);
1081 }
1082
1083 static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp)
1084 {
1085         return container_of(ibqp, struct hns_roce_qp, ibqp);
1086 }
1087
1088 static inline struct hns_roce_cq *to_hr_cq(struct ib_cq *ib_cq)
1089 {
1090         return container_of(ib_cq, struct hns_roce_cq, ib_cq);
1091 }
1092
1093 static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
1094 {
1095         return container_of(ibsrq, struct hns_roce_srq, ibsrq);
1096 }
1097
1098 static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
1099 {
1100         __raw_writeq(*(u64 *) val, dest);
1101 }
1102
1103 static inline struct hns_roce_qp
1104         *__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn)
1105 {
1106         return xa_load(&hr_dev->qp_table_xa, qpn & (hr_dev->caps.num_qps - 1));
1107 }
1108
1109 static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset)
1110 {
1111         u32 page_size = 1 << buf->page_shift;
1112
1113         if (buf->nbufs == 1)
1114                 return (char *)(buf->direct.buf) + offset;
1115         else
1116                 return (char *)(buf->page_list[offset >> buf->page_shift].buf) +
1117                        (offset & (page_size - 1));
1118 }
1119
1120 int hns_roce_init_uar_table(struct hns_roce_dev *dev);
1121 int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
1122 void hns_roce_uar_free(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
1123 void hns_roce_cleanup_uar_table(struct hns_roce_dev *dev);
1124
1125 int hns_roce_cmd_init(struct hns_roce_dev *hr_dev);
1126 void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev);
1127 void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
1128                         u64 out_param);
1129 int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev);
1130 void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
1131
1132 int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
1133                       struct hns_roce_mtt *mtt);
1134 void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev,
1135                           struct hns_roce_mtt *mtt);
1136 int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
1137                            struct hns_roce_mtt *mtt, struct hns_roce_buf *buf);
1138
1139 void hns_roce_mtr_init(struct hns_roce_mtr *mtr, int bt_pg_shift,
1140                        int buf_pg_shift);
1141 int hns_roce_mtr_attach(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1142                         dma_addr_t **bufs, struct hns_roce_buf_region *regions,
1143                         int region_cnt);
1144 void hns_roce_mtr_cleanup(struct hns_roce_dev *hr_dev,
1145                           struct hns_roce_mtr *mtr);
1146
1147 /* hns roce hw need current block and next block addr from mtt */
1148 #define MTT_MIN_COUNT    2
1149 int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1150                       int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
1151
1152 int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
1153 int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
1154 int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
1155 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
1156 int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
1157
1158 void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev);
1159 void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev);
1160 void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
1161 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
1162 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
1163 void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev);
1164
1165 int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj);
1166 void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
1167                          int rr);
1168 int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
1169                          u32 reserved_bot, u32 resetrved_top);
1170 void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap);
1171 void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev);
1172 int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
1173                                 int align, unsigned long *obj);
1174 void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
1175                                 unsigned long obj, int cnt,
1176                                 int rr);
1177
1178 int hns_roce_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
1179                        u32 flags, struct ib_udata *udata);
1180 int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
1181 void hns_roce_destroy_ah(struct ib_ah *ah, u32 flags);
1182
1183 int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
1184 void hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
1185
1186 struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
1187 struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1188                                    u64 virt_addr, int access_flags,
1189                                    struct ib_udata *udata);
1190 int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length,
1191                            u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
1192                            struct ib_udata *udata);
1193 struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1194                                 u32 max_num_sg, struct ib_udata *udata);
1195 int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1196                        unsigned int *sg_offset);
1197 int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
1198 int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
1199                             struct hns_roce_cmd_mailbox *mailbox,
1200                             unsigned long mpt_index);
1201 unsigned long key_to_hw_index(u32 key);
1202
1203 struct ib_mw *hns_roce_alloc_mw(struct ib_pd *pd, enum ib_mw_type,
1204                                 struct ib_udata *udata);
1205 int hns_roce_dealloc_mw(struct ib_mw *ibmw);
1206
1207 void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
1208                        struct hns_roce_buf *buf);
1209 int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
1210                        struct hns_roce_buf *buf, u32 page_shift);
1211
1212 int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
1213                                struct hns_roce_mtt *mtt, struct ib_umem *umem);
1214
1215 void hns_roce_init_buf_region(struct hns_roce_buf_region *region, int hopnum,
1216                               int offset, int buf_cnt);
1217 int hns_roce_alloc_buf_list(struct hns_roce_buf_region *regions,
1218                             dma_addr_t **bufs, int count);
1219 void hns_roce_free_buf_list(dma_addr_t **bufs, int count);
1220
1221 int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
1222                            int buf_cnt, int start, struct hns_roce_buf *buf);
1223 int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
1224                            int buf_cnt, int start, struct ib_umem *umem,
1225                            int page_shift);
1226
1227 int hns_roce_create_srq(struct ib_srq *srq,
1228                         struct ib_srq_init_attr *srq_init_attr,
1229                         struct ib_udata *udata);
1230 int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
1231                         enum ib_srq_attr_mask srq_attr_mask,
1232                         struct ib_udata *udata);
1233 void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
1234
1235 struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
1236                                  struct ib_qp_init_attr *init_attr,
1237                                  struct ib_udata *udata);
1238 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1239                        int attr_mask, struct ib_udata *udata);
1240 void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n);
1241 void *get_send_wqe(struct hns_roce_qp *hr_qp, int n);
1242 void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n);
1243 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
1244                           struct ib_cq *ib_cq);
1245 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state);
1246 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq,
1247                        struct hns_roce_cq *recv_cq);
1248 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
1249                          struct hns_roce_cq *recv_cq);
1250 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
1251 void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
1252 void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
1253                                int cnt);
1254 __be32 send_ieth(const struct ib_send_wr *wr);
1255 int to_hr_qp_type(int qp_type);
1256
1257 int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
1258                        struct ib_udata *udata);
1259
1260 void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
1261 void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
1262
1263 int hns_roce_db_map_user(struct hns_roce_ucontext *context,
1264                          struct ib_udata *udata, unsigned long virt,
1265                          struct hns_roce_db *db);
1266 void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
1267                             struct hns_roce_db *db);
1268 int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
1269                       int order);
1270 void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db);
1271
1272 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
1273 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
1274 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
1275 void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
1276 int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
1277 void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
1278 int hns_roce_init(struct hns_roce_dev *hr_dev);
1279 void hns_roce_exit(struct hns_roce_dev *hr_dev);
1280
1281 int hns_roce_fill_res_entry(struct sk_buff *msg,
1282                             struct rdma_restrack_entry *res);
1283 #endif /* _HNS_ROCE_DEVICE_H */