]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/infiniband/hw/cxgb4/cq.c
8e2d490e757a71f8eeb0419dff7e09c8ec75bee5
[linux.git] / drivers / infiniband / hw / cxgb4 / cq.c
1 /*
2  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include "iw_cxgb4.h"
34
35 static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
36                       struct c4iw_dev_ucontext *uctx, struct sk_buff *skb,
37                       struct c4iw_wr_wait *wr_waitp)
38 {
39         struct fw_ri_res_wr *res_wr;
40         struct fw_ri_res *res;
41         int wr_len;
42         int ret;
43
44         wr_len = sizeof *res_wr + sizeof *res;
45         set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
46
47         res_wr = __skb_put_zero(skb, wr_len);
48         res_wr->op_nres = cpu_to_be32(
49                         FW_WR_OP_V(FW_RI_RES_WR) |
50                         FW_RI_RES_WR_NRES_V(1) |
51                         FW_WR_COMPL_F);
52         res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
53         res_wr->cookie = (uintptr_t)wr_waitp;
54         res = res_wr->res;
55         res->u.cq.restype = FW_RI_RES_TYPE_CQ;
56         res->u.cq.op = FW_RI_RES_OP_RESET;
57         res->u.cq.iqid = cpu_to_be32(cq->cqid);
58
59         c4iw_init_wr_wait(wr_waitp);
60         ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
61
62         kfree(cq->sw_queue);
63         dma_free_coherent(&(rdev->lldi.pdev->dev),
64                           cq->memsize, cq->queue,
65                           dma_unmap_addr(cq, mapping));
66         c4iw_put_cqid(rdev, cq->cqid, uctx);
67         return ret;
68 }
69
70 static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
71                      struct c4iw_dev_ucontext *uctx,
72                      struct c4iw_wr_wait *wr_waitp)
73 {
74         struct fw_ri_res_wr *res_wr;
75         struct fw_ri_res *res;
76         int wr_len;
77         int user = (uctx != &rdev->uctx);
78         int ret;
79         struct sk_buff *skb;
80
81         cq->cqid = c4iw_get_cqid(rdev, uctx);
82         if (!cq->cqid) {
83                 ret = -ENOMEM;
84                 goto err1;
85         }
86
87         if (!user) {
88                 cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
89                 if (!cq->sw_queue) {
90                         ret = -ENOMEM;
91                         goto err2;
92                 }
93         }
94         cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
95                                        &cq->dma_addr, GFP_KERNEL);
96         if (!cq->queue) {
97                 ret = -ENOMEM;
98                 goto err3;
99         }
100         dma_unmap_addr_set(cq, mapping, cq->dma_addr);
101         memset(cq->queue, 0, cq->memsize);
102
103         /* build fw_ri_res_wr */
104         wr_len = sizeof *res_wr + sizeof *res;
105
106         skb = alloc_skb(wr_len, GFP_KERNEL);
107         if (!skb) {
108                 ret = -ENOMEM;
109                 goto err4;
110         }
111         set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
112
113         res_wr = __skb_put_zero(skb, wr_len);
114         res_wr->op_nres = cpu_to_be32(
115                         FW_WR_OP_V(FW_RI_RES_WR) |
116                         FW_RI_RES_WR_NRES_V(1) |
117                         FW_WR_COMPL_F);
118         res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
119         res_wr->cookie = (uintptr_t)wr_waitp;
120         res = res_wr->res;
121         res->u.cq.restype = FW_RI_RES_TYPE_CQ;
122         res->u.cq.op = FW_RI_RES_OP_WRITE;
123         res->u.cq.iqid = cpu_to_be32(cq->cqid);
124         res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
125                         FW_RI_RES_WR_IQANUS_V(0) |
126                         FW_RI_RES_WR_IQANUD_V(1) |
127                         FW_RI_RES_WR_IQANDST_F |
128                         FW_RI_RES_WR_IQANDSTINDEX_V(
129                                 rdev->lldi.ciq_ids[cq->vector]));
130         res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
131                         FW_RI_RES_WR_IQDROPRSS_F |
132                         FW_RI_RES_WR_IQPCIECH_V(2) |
133                         FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
134                         FW_RI_RES_WR_IQO_F |
135                         FW_RI_RES_WR_IQESIZE_V(1));
136         res->u.cq.iqsize = cpu_to_be16(cq->size);
137         res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
138
139         c4iw_init_wr_wait(wr_waitp);
140         ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
141         if (ret)
142                 goto err4;
143
144         cq->gen = 1;
145         cq->gts = rdev->lldi.gts_reg;
146         cq->rdev = rdev;
147
148         cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
149                                       &cq->bar2_qid,
150                                       user ? &cq->bar2_pa : NULL);
151         if (user && !cq->bar2_pa) {
152                 pr_warn("%s: cqid %u not in BAR2 range\n",
153                         pci_name(rdev->lldi.pdev), cq->cqid);
154                 ret = -EINVAL;
155                 goto err4;
156         }
157         return 0;
158 err4:
159         dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
160                           dma_unmap_addr(cq, mapping));
161 err3:
162         kfree(cq->sw_queue);
163 err2:
164         c4iw_put_cqid(rdev, cq->cqid, uctx);
165 err1:
166         return ret;
167 }
168
169 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
170 {
171         struct t4_cqe cqe;
172
173         pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
174                  wq, cq, cq->sw_cidx, cq->sw_pidx);
175         memset(&cqe, 0, sizeof(cqe));
176         cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
177                                  CQE_OPCODE_V(FW_RI_SEND) |
178                                  CQE_TYPE_V(0) |
179                                  CQE_SWCQE_V(1) |
180                                  CQE_QPID_V(wq->sq.qid));
181         cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
182         cq->sw_queue[cq->sw_pidx] = cqe;
183         t4_swcq_produce(cq);
184 }
185
186 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
187 {
188         int flushed = 0;
189         int in_use = wq->rq.in_use - count;
190
191         BUG_ON(in_use < 0);
192         pr_debug("wq %p cq %p rq.in_use %u skip count %u\n",
193                  wq, cq, wq->rq.in_use, count);
194         while (in_use--) {
195                 insert_recv_cqe(wq, cq);
196                 flushed++;
197         }
198         return flushed;
199 }
200
201 static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
202                           struct t4_swsqe *swcqe)
203 {
204         struct t4_cqe cqe;
205
206         pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
207                  wq, cq, cq->sw_cidx, cq->sw_pidx);
208         memset(&cqe, 0, sizeof(cqe));
209         cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
210                                  CQE_OPCODE_V(swcqe->opcode) |
211                                  CQE_TYPE_V(1) |
212                                  CQE_SWCQE_V(1) |
213                                  CQE_QPID_V(wq->sq.qid));
214         CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
215         cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
216         cq->sw_queue[cq->sw_pidx] = cqe;
217         t4_swcq_produce(cq);
218 }
219
220 static void advance_oldest_read(struct t4_wq *wq);
221
222 int c4iw_flush_sq(struct c4iw_qp *qhp)
223 {
224         int flushed = 0;
225         struct t4_wq *wq = &qhp->wq;
226         struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq);
227         struct t4_cq *cq = &chp->cq;
228         int idx;
229         struct t4_swsqe *swsqe;
230
231         if (wq->sq.flush_cidx == -1)
232                 wq->sq.flush_cidx = wq->sq.cidx;
233         idx = wq->sq.flush_cidx;
234         BUG_ON(idx >= wq->sq.size);
235         while (idx != wq->sq.pidx) {
236                 swsqe = &wq->sq.sw_sq[idx];
237                 BUG_ON(swsqe->flushed);
238                 swsqe->flushed = 1;
239                 insert_sq_cqe(wq, cq, swsqe);
240                 if (wq->sq.oldest_read == swsqe) {
241                         BUG_ON(swsqe->opcode != FW_RI_READ_REQ);
242                         advance_oldest_read(wq);
243                 }
244                 flushed++;
245                 if (++idx == wq->sq.size)
246                         idx = 0;
247         }
248         wq->sq.flush_cidx += flushed;
249         if (wq->sq.flush_cidx >= wq->sq.size)
250                 wq->sq.flush_cidx -= wq->sq.size;
251         return flushed;
252 }
253
254 static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
255 {
256         struct t4_swsqe *swsqe;
257         int cidx;
258
259         if (wq->sq.flush_cidx == -1)
260                 wq->sq.flush_cidx = wq->sq.cidx;
261         cidx = wq->sq.flush_cidx;
262         BUG_ON(cidx > wq->sq.size);
263
264         while (cidx != wq->sq.pidx) {
265                 swsqe = &wq->sq.sw_sq[cidx];
266                 if (!swsqe->signaled) {
267                         if (++cidx == wq->sq.size)
268                                 cidx = 0;
269                 } else if (swsqe->complete) {
270
271                         BUG_ON(swsqe->flushed);
272
273                         /*
274                          * Insert this completed cqe into the swcq.
275                          */
276                         pr_debug("moving cqe into swcq sq idx %u cq idx %u\n",
277                                  cidx, cq->sw_pidx);
278                         swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
279                         cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
280                         t4_swcq_produce(cq);
281                         swsqe->flushed = 1;
282                         if (++cidx == wq->sq.size)
283                                 cidx = 0;
284                         wq->sq.flush_cidx = cidx;
285                 } else
286                         break;
287         }
288 }
289
290 static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
291                 struct t4_cqe *read_cqe)
292 {
293         read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
294         read_cqe->len = htonl(wq->sq.oldest_read->read_len);
295         read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) |
296                         CQE_SWCQE_V(SW_CQE(hw_cqe)) |
297                         CQE_OPCODE_V(FW_RI_READ_REQ) |
298                         CQE_TYPE_V(1));
299         read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
300 }
301
302 static void advance_oldest_read(struct t4_wq *wq)
303 {
304
305         u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
306
307         if (rptr == wq->sq.size)
308                 rptr = 0;
309         while (rptr != wq->sq.pidx) {
310                 wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
311
312                 if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
313                         return;
314                 if (++rptr == wq->sq.size)
315                         rptr = 0;
316         }
317         wq->sq.oldest_read = NULL;
318 }
319
320 /*
321  * Move all CQEs from the HWCQ into the SWCQ.
322  * Deal with out-of-order and/or completions that complete
323  * prior unsignalled WRs.
324  */
325 void c4iw_flush_hw_cq(struct c4iw_cq *chp)
326 {
327         struct t4_cqe *hw_cqe, *swcqe, read_cqe;
328         struct c4iw_qp *qhp;
329         struct t4_swsqe *swsqe;
330         int ret;
331
332         pr_debug("cqid 0x%x\n", chp->cq.cqid);
333         ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
334
335         /*
336          * This logic is similar to poll_cq(), but not quite the same
337          * unfortunately.  Need to move pertinent HW CQEs to the SW CQ but
338          * also do any translation magic that poll_cq() normally does.
339          */
340         while (!ret) {
341                 qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
342
343                 /*
344                  * drop CQEs with no associated QP
345                  */
346                 if (qhp == NULL)
347                         goto next_cqe;
348
349                 if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
350                         goto next_cqe;
351
352                 if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
353
354                         /* If we have reached here because of async
355                          * event or other error, and have egress error
356                          * then drop
357                          */
358                         if (CQE_TYPE(hw_cqe) == 1)
359                                 goto next_cqe;
360
361                         /* drop peer2peer RTR reads.
362                          */
363                         if (CQE_WRID_STAG(hw_cqe) == 1)
364                                 goto next_cqe;
365
366                         /*
367                          * Eat completions for unsignaled read WRs.
368                          */
369                         if (!qhp->wq.sq.oldest_read->signaled) {
370                                 advance_oldest_read(&qhp->wq);
371                                 goto next_cqe;
372                         }
373
374                         /*
375                          * Don't write to the HWCQ, create a new read req CQE
376                          * in local memory and move it into the swcq.
377                          */
378                         create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe);
379                         hw_cqe = &read_cqe;
380                         advance_oldest_read(&qhp->wq);
381                 }
382
383                 /* if its a SQ completion, then do the magic to move all the
384                  * unsignaled and now in-order completions into the swcq.
385                  */
386                 if (SQ_TYPE(hw_cqe)) {
387                         swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
388                         swsqe->cqe = *hw_cqe;
389                         swsqe->complete = 1;
390                         flush_completed_wrs(&qhp->wq, &chp->cq);
391                 } else {
392                         swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
393                         *swcqe = *hw_cqe;
394                         swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1));
395                         t4_swcq_produce(&chp->cq);
396                 }
397 next_cqe:
398                 t4_hwcq_consume(&chp->cq);
399                 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
400         }
401 }
402
403 static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
404 {
405         if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
406                 return 0;
407
408         if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
409                 return 0;
410
411         if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
412                 return 0;
413
414         if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
415                 return 0;
416         return 1;
417 }
418
419 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
420 {
421         struct t4_cqe *cqe;
422         u32 ptr;
423
424         *count = 0;
425         pr_debug("count zero %d\n", *count);
426         ptr = cq->sw_cidx;
427         while (ptr != cq->sw_pidx) {
428                 cqe = &cq->sw_queue[ptr];
429                 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
430                     (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
431                         (*count)++;
432                 if (++ptr == cq->size)
433                         ptr = 0;
434         }
435         pr_debug("cq %p count %d\n", cq, *count);
436 }
437
438 /*
439  * poll_cq
440  *
441  * Caller must:
442  *     check the validity of the first CQE,
443  *     supply the wq assicated with the qpid.
444  *
445  * credit: cq credit to return to sge.
446  * cqe_flushed: 1 iff the CQE is flushed.
447  * cqe: copy of the polled CQE.
448  *
449  * return value:
450  *    0             CQE returned ok.
451  *    -EAGAIN       CQE skipped, try again.
452  *    -EOVERFLOW    CQ overflow detected.
453  */
454 static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
455                    u8 *cqe_flushed, u64 *cookie, u32 *credit)
456 {
457         int ret = 0;
458         struct t4_cqe *hw_cqe, read_cqe;
459
460         *cqe_flushed = 0;
461         *credit = 0;
462         ret = t4_next_cqe(cq, &hw_cqe);
463         if (ret)
464                 return ret;
465
466         pr_debug("CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
467                  CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
468                  CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
469                  CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
470                  CQE_WRID_LOW(hw_cqe));
471
472         /*
473          * skip cqe's not affiliated with a QP.
474          */
475         if (wq == NULL) {
476                 ret = -EAGAIN;
477                 goto skip_cqe;
478         }
479
480         /*
481         * skip hw cqe's if the wq is flushed.
482         */
483         if (wq->flushed && !SW_CQE(hw_cqe)) {
484                 ret = -EAGAIN;
485                 goto skip_cqe;
486         }
487
488         /*
489          * skip TERMINATE cqes...
490          */
491         if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
492                 ret = -EAGAIN;
493                 goto skip_cqe;
494         }
495
496         /*
497          * Special cqe for drain WR completions...
498          */
499         if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
500                 *cookie = CQE_DRAIN_COOKIE(hw_cqe);
501                 *cqe = *hw_cqe;
502                 goto skip_cqe;
503         }
504
505         /*
506          * Gotta tweak READ completions:
507          *      1) the cqe doesn't contain the sq_wptr from the wr.
508          *      2) opcode not reflected from the wr.
509          *      3) read_len not reflected from the wr.
510          *      4) cq_type is RQ_TYPE not SQ_TYPE.
511          */
512         if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
513
514                 /* If we have reached here because of async
515                  * event or other error, and have egress error
516                  * then drop
517                  */
518                 if (CQE_TYPE(hw_cqe) == 1) {
519                         if (CQE_STATUS(hw_cqe))
520                                 t4_set_wq_in_error(wq);
521                         ret = -EAGAIN;
522                         goto skip_cqe;
523                 }
524
525                 /* If this is an unsolicited read response, then the read
526                  * was generated by the kernel driver as part of peer-2-peer
527                  * connection setup.  So ignore the completion.
528                  */
529                 if (CQE_WRID_STAG(hw_cqe) == 1) {
530                         if (CQE_STATUS(hw_cqe))
531                                 t4_set_wq_in_error(wq);
532                         ret = -EAGAIN;
533                         goto skip_cqe;
534                 }
535
536                 /*
537                  * Eat completions for unsignaled read WRs.
538                  */
539                 if (!wq->sq.oldest_read->signaled) {
540                         advance_oldest_read(wq);
541                         ret = -EAGAIN;
542                         goto skip_cqe;
543                 }
544
545                 /*
546                  * Don't write to the HWCQ, so create a new read req CQE
547                  * in local memory.
548                  */
549                 create_read_req_cqe(wq, hw_cqe, &read_cqe);
550                 hw_cqe = &read_cqe;
551                 advance_oldest_read(wq);
552         }
553
554         if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
555                 *cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
556                 t4_set_wq_in_error(wq);
557         }
558
559         /*
560          * RECV completion.
561          */
562         if (RQ_TYPE(hw_cqe)) {
563
564                 /*
565                  * HW only validates 4 bits of MSN.  So we must validate that
566                  * the MSN in the SEND is the next expected MSN.  If its not,
567                  * then we complete this with T4_ERR_MSN and mark the wq in
568                  * error.
569                  */
570
571                 if (t4_rq_empty(wq)) {
572                         t4_set_wq_in_error(wq);
573                         ret = -EAGAIN;
574                         goto skip_cqe;
575                 }
576                 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
577                         t4_set_wq_in_error(wq);
578                         hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
579                         goto proc_cqe;
580                 }
581                 goto proc_cqe;
582         }
583
584         /*
585          * If we get here its a send completion.
586          *
587          * Handle out of order completion. These get stuffed
588          * in the SW SQ. Then the SW SQ is walked to move any
589          * now in-order completions into the SW CQ.  This handles
590          * 2 cases:
591          *      1) reaping unsignaled WRs when the first subsequent
592          *         signaled WR is completed.
593          *      2) out of order read completions.
594          */
595         if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
596                 struct t4_swsqe *swsqe;
597
598                 pr_debug("out of order completion going in sw_sq at idx %u\n",
599                          CQE_WRID_SQ_IDX(hw_cqe));
600                 swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
601                 swsqe->cqe = *hw_cqe;
602                 swsqe->complete = 1;
603                 ret = -EAGAIN;
604                 goto flush_wq;
605         }
606
607 proc_cqe:
608         *cqe = *hw_cqe;
609
610         /*
611          * Reap the associated WR(s) that are freed up with this
612          * completion.
613          */
614         if (SQ_TYPE(hw_cqe)) {
615                 int idx = CQE_WRID_SQ_IDX(hw_cqe);
616                 BUG_ON(idx >= wq->sq.size);
617
618                 /*
619                 * Account for any unsignaled completions completed by
620                 * this signaled completion.  In this case, cidx points
621                 * to the first unsignaled one, and idx points to the
622                 * signaled one.  So adjust in_use based on this delta.
623                 * if this is not completing any unsigned wrs, then the
624                 * delta will be 0. Handle wrapping also!
625                 */
626                 if (idx < wq->sq.cidx)
627                         wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
628                 else
629                         wq->sq.in_use -= idx - wq->sq.cidx;
630                 BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
631
632                 wq->sq.cidx = (uint16_t)idx;
633                 pr_debug("completing sq idx %u\n", wq->sq.cidx);
634                 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
635                 if (c4iw_wr_log)
636                         c4iw_log_wr_stats(wq, hw_cqe);
637                 t4_sq_consume(wq);
638         } else {
639                 pr_debug("completing rq idx %u\n", wq->rq.cidx);
640                 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
641                 BUG_ON(t4_rq_empty(wq));
642                 if (c4iw_wr_log)
643                         c4iw_log_wr_stats(wq, hw_cqe);
644                 t4_rq_consume(wq);
645                 goto skip_cqe;
646         }
647
648 flush_wq:
649         /*
650          * Flush any completed cqes that are now in-order.
651          */
652         flush_completed_wrs(wq, cq);
653
654 skip_cqe:
655         if (SW_CQE(hw_cqe)) {
656                 pr_debug("cq %p cqid 0x%x skip sw cqe cidx %u\n",
657                          cq, cq->cqid, cq->sw_cidx);
658                 t4_swcq_consume(cq);
659         } else {
660                 pr_debug("cq %p cqid 0x%x skip hw cqe cidx %u\n",
661                          cq, cq->cqid, cq->cidx);
662                 t4_hwcq_consume(cq);
663         }
664         return ret;
665 }
666
667 /*
668  * Get one cq entry from c4iw and map it to openib.
669  *
670  * Returns:
671  *      0                       cqe returned
672  *      -ENODATA                EMPTY;
673  *      -EAGAIN                 caller must try again
674  *      any other -errno        fatal error
675  */
676 static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
677 {
678         struct c4iw_qp *qhp = NULL;
679         struct t4_cqe uninitialized_var(cqe), *rd_cqe;
680         struct t4_wq *wq;
681         u32 credit = 0;
682         u8 cqe_flushed;
683         u64 cookie = 0;
684         int ret;
685
686         ret = t4_next_cqe(&chp->cq, &rd_cqe);
687
688         if (ret)
689                 return ret;
690
691         qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
692         if (!qhp)
693                 wq = NULL;
694         else {
695                 spin_lock(&qhp->lock);
696                 wq = &(qhp->wq);
697         }
698         ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
699         if (ret)
700                 goto out;
701
702         wc->wr_id = cookie;
703         wc->qp = &qhp->ibqp;
704         wc->vendor_err = CQE_STATUS(&cqe);
705         wc->wc_flags = 0;
706
707         pr_debug("qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
708                  CQE_QPID(&cqe),
709                  CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
710                  CQE_STATUS(&cqe), CQE_LEN(&cqe),
711                  CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
712                  (unsigned long long)cookie);
713
714         if (CQE_TYPE(&cqe) == 0) {
715                 if (!CQE_STATUS(&cqe))
716                         wc->byte_len = CQE_LEN(&cqe);
717                 else
718                         wc->byte_len = 0;
719                 wc->opcode = IB_WC_RECV;
720                 if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
721                     CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
722                         wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
723                         wc->wc_flags |= IB_WC_WITH_INVALIDATE;
724                         c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
725                 }
726         } else {
727                 switch (CQE_OPCODE(&cqe)) {
728                 case FW_RI_RDMA_WRITE:
729                         wc->opcode = IB_WC_RDMA_WRITE;
730                         break;
731                 case FW_RI_READ_REQ:
732                         wc->opcode = IB_WC_RDMA_READ;
733                         wc->byte_len = CQE_LEN(&cqe);
734                         break;
735                 case FW_RI_SEND_WITH_INV:
736                 case FW_RI_SEND_WITH_SE_INV:
737                         wc->opcode = IB_WC_SEND;
738                         wc->wc_flags |= IB_WC_WITH_INVALIDATE;
739                         break;
740                 case FW_RI_SEND:
741                 case FW_RI_SEND_WITH_SE:
742                         wc->opcode = IB_WC_SEND;
743                         break;
744
745                 case FW_RI_LOCAL_INV:
746                         wc->opcode = IB_WC_LOCAL_INV;
747                         break;
748                 case FW_RI_FAST_REGISTER:
749                         wc->opcode = IB_WC_REG_MR;
750
751                         /* Invalidate the MR if the fastreg failed */
752                         if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS)
753                                 c4iw_invalidate_mr(qhp->rhp,
754                                                    CQE_WRID_FR_STAG(&cqe));
755                         break;
756                 case C4IW_DRAIN_OPCODE:
757                         wc->opcode = IB_WC_SEND;
758                         break;
759                 default:
760                         pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
761                                CQE_OPCODE(&cqe), CQE_QPID(&cqe));
762                         ret = -EINVAL;
763                         goto out;
764                 }
765         }
766
767         if (cqe_flushed)
768                 wc->status = IB_WC_WR_FLUSH_ERR;
769         else {
770
771                 switch (CQE_STATUS(&cqe)) {
772                 case T4_ERR_SUCCESS:
773                         wc->status = IB_WC_SUCCESS;
774                         break;
775                 case T4_ERR_STAG:
776                         wc->status = IB_WC_LOC_ACCESS_ERR;
777                         break;
778                 case T4_ERR_PDID:
779                         wc->status = IB_WC_LOC_PROT_ERR;
780                         break;
781                 case T4_ERR_QPID:
782                 case T4_ERR_ACCESS:
783                         wc->status = IB_WC_LOC_ACCESS_ERR;
784                         break;
785                 case T4_ERR_WRAP:
786                         wc->status = IB_WC_GENERAL_ERR;
787                         break;
788                 case T4_ERR_BOUND:
789                         wc->status = IB_WC_LOC_LEN_ERR;
790                         break;
791                 case T4_ERR_INVALIDATE_SHARED_MR:
792                 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
793                         wc->status = IB_WC_MW_BIND_ERR;
794                         break;
795                 case T4_ERR_CRC:
796                 case T4_ERR_MARKER:
797                 case T4_ERR_PDU_LEN_ERR:
798                 case T4_ERR_OUT_OF_RQE:
799                 case T4_ERR_DDP_VERSION:
800                 case T4_ERR_RDMA_VERSION:
801                 case T4_ERR_DDP_QUEUE_NUM:
802                 case T4_ERR_MSN:
803                 case T4_ERR_TBIT:
804                 case T4_ERR_MO:
805                 case T4_ERR_MSN_RANGE:
806                 case T4_ERR_IRD_OVERFLOW:
807                 case T4_ERR_OPCODE:
808                 case T4_ERR_INTERNAL_ERR:
809                         wc->status = IB_WC_FATAL_ERR;
810                         break;
811                 case T4_ERR_SWFLUSH:
812                         wc->status = IB_WC_WR_FLUSH_ERR;
813                         break;
814                 default:
815                         pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
816                                CQE_STATUS(&cqe), CQE_QPID(&cqe));
817                         wc->status = IB_WC_FATAL_ERR;
818                 }
819         }
820 out:
821         if (wq)
822                 spin_unlock(&qhp->lock);
823         return ret;
824 }
825
826 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
827 {
828         struct c4iw_cq *chp;
829         unsigned long flags;
830         int npolled;
831         int err = 0;
832
833         chp = to_c4iw_cq(ibcq);
834
835         spin_lock_irqsave(&chp->lock, flags);
836         for (npolled = 0; npolled < num_entries; ++npolled) {
837                 do {
838                         err = c4iw_poll_cq_one(chp, wc + npolled);
839                 } while (err == -EAGAIN);
840                 if (err)
841                         break;
842         }
843         spin_unlock_irqrestore(&chp->lock, flags);
844         return !err || err == -ENODATA ? npolled : err;
845 }
846
847 int c4iw_destroy_cq(struct ib_cq *ib_cq)
848 {
849         struct c4iw_cq *chp;
850         struct c4iw_ucontext *ucontext;
851
852         pr_debug("ib_cq %p\n", ib_cq);
853         chp = to_c4iw_cq(ib_cq);
854
855         remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
856         atomic_dec(&chp->refcnt);
857         wait_event(chp->wait, !atomic_read(&chp->refcnt));
858
859         ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
860                                   : NULL;
861         destroy_cq(&chp->rhp->rdev, &chp->cq,
862                    ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
863                    chp->destroy_skb, chp->wr_waitp);
864         c4iw_put_wr_wait(chp->wr_waitp);
865         kfree(chp);
866         return 0;
867 }
868
869 struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
870                              const struct ib_cq_init_attr *attr,
871                              struct ib_ucontext *ib_context,
872                              struct ib_udata *udata)
873 {
874         int entries = attr->cqe;
875         int vector = attr->comp_vector;
876         struct c4iw_dev *rhp;
877         struct c4iw_cq *chp;
878         struct c4iw_create_cq_resp uresp;
879         struct c4iw_ucontext *ucontext = NULL;
880         int ret, wr_len;
881         size_t memsize, hwentries;
882         struct c4iw_mm_entry *mm, *mm2;
883
884         pr_debug("ib_dev %p entries %d\n", ibdev, entries);
885         if (attr->flags)
886                 return ERR_PTR(-EINVAL);
887
888         rhp = to_c4iw_dev(ibdev);
889
890         if (vector >= rhp->rdev.lldi.nciq)
891                 return ERR_PTR(-EINVAL);
892
893         chp = kzalloc(sizeof(*chp), GFP_KERNEL);
894         if (!chp)
895                 return ERR_PTR(-ENOMEM);
896         chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
897         if (!chp->wr_waitp) {
898                 ret = -ENOMEM;
899                 goto err_free_chp;
900         }
901         c4iw_init_wr_wait(chp->wr_waitp);
902
903         wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
904         chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
905         if (!chp->destroy_skb) {
906                 ret = -ENOMEM;
907                 goto err_free_wr_wait;
908         }
909
910         if (ib_context)
911                 ucontext = to_c4iw_ucontext(ib_context);
912
913         /* account for the status page. */
914         entries++;
915
916         /* IQ needs one extra entry to differentiate full vs empty. */
917         entries++;
918
919         /*
920          * entries must be multiple of 16 for HW.
921          */
922         entries = roundup(entries, 16);
923
924         /*
925          * Make actual HW queue 2x to avoid cdix_inc overflows.
926          */
927         hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size);
928
929         /*
930          * Make HW queue at least 64 entries so GTS updates aren't too
931          * frequent.
932          */
933         if (hwentries < 64)
934                 hwentries = 64;
935
936         memsize = hwentries * sizeof *chp->cq.queue;
937
938         /*
939          * memsize must be a multiple of the page size if its a user cq.
940          */
941         if (ucontext)
942                 memsize = roundup(memsize, PAGE_SIZE);
943         chp->cq.size = hwentries;
944         chp->cq.memsize = memsize;
945         chp->cq.vector = vector;
946
947         ret = create_cq(&rhp->rdev, &chp->cq,
948                         ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
949                         chp->wr_waitp);
950         if (ret)
951                 goto err_free_skb;
952
953         chp->rhp = rhp;
954         chp->cq.size--;                         /* status page */
955         chp->ibcq.cqe = entries - 2;
956         spin_lock_init(&chp->lock);
957         spin_lock_init(&chp->comp_handler_lock);
958         atomic_set(&chp->refcnt, 1);
959         init_waitqueue_head(&chp->wait);
960         ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
961         if (ret)
962                 goto err_destroy_cq;
963
964         if (ucontext) {
965                 ret = -ENOMEM;
966                 mm = kmalloc(sizeof *mm, GFP_KERNEL);
967                 if (!mm)
968                         goto err_remove_handle;
969                 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
970                 if (!mm2)
971                         goto err_free_mm;
972
973                 uresp.qid_mask = rhp->rdev.cqmask;
974                 uresp.cqid = chp->cq.cqid;
975                 uresp.size = chp->cq.size;
976                 uresp.memsize = chp->cq.memsize;
977                 spin_lock(&ucontext->mmap_lock);
978                 uresp.key = ucontext->key;
979                 ucontext->key += PAGE_SIZE;
980                 uresp.gts_key = ucontext->key;
981                 ucontext->key += PAGE_SIZE;
982                 spin_unlock(&ucontext->mmap_lock);
983                 ret = ib_copy_to_udata(udata, &uresp,
984                                        sizeof(uresp) - sizeof(uresp.reserved));
985                 if (ret)
986                         goto err_free_mm2;
987
988                 mm->key = uresp.key;
989                 mm->addr = virt_to_phys(chp->cq.queue);
990                 mm->len = chp->cq.memsize;
991                 insert_mmap(ucontext, mm);
992
993                 mm2->key = uresp.gts_key;
994                 mm2->addr = chp->cq.bar2_pa;
995                 mm2->len = PAGE_SIZE;
996                 insert_mmap(ucontext, mm2);
997         }
998         pr_debug("cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
999                  chp->cq.cqid, chp, chp->cq.size,
1000                  chp->cq.memsize, (unsigned long long)chp->cq.dma_addr);
1001         return &chp->ibcq;
1002 err_free_mm2:
1003         kfree(mm2);
1004 err_free_mm:
1005         kfree(mm);
1006 err_remove_handle:
1007         remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
1008 err_destroy_cq:
1009         destroy_cq(&chp->rhp->rdev, &chp->cq,
1010                    ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
1011                    chp->destroy_skb, chp->wr_waitp);
1012 err_free_skb:
1013         kfree_skb(chp->destroy_skb);
1014 err_free_wr_wait:
1015         c4iw_put_wr_wait(chp->wr_waitp);
1016 err_free_chp:
1017         kfree(chp);
1018         return ERR_PTR(ret);
1019 }
1020
1021 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
1022 {
1023         return -ENOSYS;
1024 }
1025
1026 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1027 {
1028         struct c4iw_cq *chp;
1029         int ret = 0;
1030         unsigned long flag;
1031
1032         chp = to_c4iw_cq(ibcq);
1033         spin_lock_irqsave(&chp->lock, flag);
1034         t4_arm_cq(&chp->cq,
1035                   (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
1036         if (flags & IB_CQ_REPORT_MISSED_EVENTS)
1037                 ret = t4_cq_notempty(&chp->cq);
1038         spin_unlock_irqrestore(&chp->lock, flag);
1039         return ret;
1040 }