2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 #include <rdma/rdma_vt.h>
50 #include <rdma/rdmavt_qp.h>
54 #include "verbs_txreq.h"
57 /* cut down ridiculously long IB macro names */
58 #define OP(x) RC_OP(x)
61 * hfi1_add_retry_timer - add/start a retry timer
64 * add a retry timer on the QP
66 static inline void hfi1_add_retry_timer(struct rvt_qp *qp)
68 struct ib_qp *ibqp = &qp->ibqp;
69 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
71 lockdep_assert_held(&qp->s_lock);
72 qp->s_flags |= RVT_S_TIMER;
73 /* 4.096 usec. * (1 << qp->timeout) */
74 qp->s_timer.expires = jiffies + qp->timeout_jiffies +
76 add_timer(&qp->s_timer);
80 * hfi1_add_rnr_timer - add/start an rnr timer
82 * @to - timeout in usecs
84 * add an rnr timer on the QP
86 void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to)
88 struct hfi1_qp_priv *priv = qp->priv;
90 lockdep_assert_held(&qp->s_lock);
91 qp->s_flags |= RVT_S_WAIT_RNR;
92 priv->s_rnr_timer.expires = jiffies + usecs_to_jiffies(to);
93 add_timer(&priv->s_rnr_timer);
97 * hfi1_mod_retry_timer - mod a retry timer
100 * Modify a potentially already running retry
103 static inline void hfi1_mod_retry_timer(struct rvt_qp *qp)
105 struct ib_qp *ibqp = &qp->ibqp;
106 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
108 lockdep_assert_held(&qp->s_lock);
109 qp->s_flags |= RVT_S_TIMER;
110 /* 4.096 usec. * (1 << qp->timeout) */
111 mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies +
116 * hfi1_stop_retry_timer - stop a retry timer
119 * stop a retry timer and return if the timer
122 static inline int hfi1_stop_retry_timer(struct rvt_qp *qp)
126 lockdep_assert_held(&qp->s_lock);
127 /* Remove QP from retry */
128 if (qp->s_flags & RVT_S_TIMER) {
129 qp->s_flags &= ~RVT_S_TIMER;
130 rval = del_timer(&qp->s_timer);
136 * hfi1_stop_rc_timers - stop all timers
139 * stop any pending timers
141 void hfi1_stop_rc_timers(struct rvt_qp *qp)
143 struct hfi1_qp_priv *priv = qp->priv;
145 lockdep_assert_held(&qp->s_lock);
146 /* Remove QP from all timers */
147 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
148 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
149 del_timer(&qp->s_timer);
150 del_timer(&priv->s_rnr_timer);
155 * hfi1_stop_rnr_timer - stop an rnr timer
158 * stop an rnr timer and return if the timer
161 static inline int hfi1_stop_rnr_timer(struct rvt_qp *qp)
164 struct hfi1_qp_priv *priv = qp->priv;
166 lockdep_assert_held(&qp->s_lock);
167 /* Remove QP from rnr timer */
168 if (qp->s_flags & RVT_S_WAIT_RNR) {
169 qp->s_flags &= ~RVT_S_WAIT_RNR;
170 rval = del_timer(&priv->s_rnr_timer);
176 * hfi1_del_timers_sync - wait for any timeout routines to exit
179 void hfi1_del_timers_sync(struct rvt_qp *qp)
181 struct hfi1_qp_priv *priv = qp->priv;
183 del_timer_sync(&qp->s_timer);
184 del_timer_sync(&priv->s_rnr_timer);
187 static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
192 len = delta_psn(psn, wqe->psn) * pmtu;
193 ss->sge = wqe->sg_list[0];
194 ss->sg_list = wqe->sg_list + 1;
195 ss->num_sge = wqe->wr.num_sge;
196 ss->total_len = wqe->length;
197 hfi1_skip_sge(ss, len, 0);
198 return wqe->length - len;
202 * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
203 * @dev: the device for this QP
204 * @qp: a pointer to the QP
205 * @ohdr: a pointer to the IB header being constructed
206 * @ps: the xmit packet state
208 * Return 1 if constructed; otherwise, return 0.
209 * Note that we are in the responder's side of the QP context.
210 * Note the QP s_lock must be held.
212 static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
213 struct ib_other_headers *ohdr,
214 struct hfi1_pkt_state *ps)
216 struct rvt_ack_entry *e;
223 struct hfi1_qp_priv *priv = qp->priv;
225 lockdep_assert_held(&qp->s_lock);
226 /* Don't send an ACK if we aren't supposed to. */
227 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
230 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
233 switch (qp->s_ack_state) {
234 case OP(RDMA_READ_RESPONSE_LAST):
235 case OP(RDMA_READ_RESPONSE_ONLY):
236 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
237 if (e->rdma_sge.mr) {
238 rvt_put_mr(e->rdma_sge.mr);
239 e->rdma_sge.mr = NULL;
242 case OP(ATOMIC_ACKNOWLEDGE):
244 * We can increment the tail pointer now that the last
245 * response has been sent instead of only being
248 if (++qp->s_tail_ack_queue > HFI1_MAX_RDMA_ATOMIC)
249 qp->s_tail_ack_queue = 0;
252 case OP(ACKNOWLEDGE):
253 /* Check for no next entry in the queue. */
254 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
255 if (qp->s_flags & RVT_S_ACK_PENDING)
260 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
261 if (e->opcode == OP(RDMA_READ_REQUEST)) {
263 * If a RDMA read response is being resent and
264 * we haven't seen the duplicate request yet,
265 * then stop sending the remaining responses the
266 * responder has seen until the requester re-sends it.
268 len = e->rdma_sge.sge_length;
269 if (len && !e->rdma_sge.mr) {
270 qp->s_tail_ack_queue = qp->r_head_ack_queue;
273 /* Copy SGE state in case we need to resend */
274 ps->s_txreq->mr = e->rdma_sge.mr;
276 rvt_get_mr(ps->s_txreq->mr);
277 qp->s_ack_rdma_sge.sge = e->rdma_sge;
278 qp->s_ack_rdma_sge.num_sge = 1;
279 ps->s_txreq->ss = &qp->s_ack_rdma_sge;
282 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
284 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
287 ohdr->u.aeth = hfi1_compute_aeth(qp);
289 qp->s_ack_rdma_psn = e->psn;
290 bth2 = mask_psn(qp->s_ack_rdma_psn++);
292 /* COMPARE_SWAP or FETCH_ADD */
293 ps->s_txreq->ss = NULL;
295 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
296 ohdr->u.at.aeth = hfi1_compute_aeth(qp);
297 ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
298 hwords += sizeof(ohdr->u.at) / sizeof(u32);
299 bth2 = mask_psn(e->psn);
302 bth0 = qp->s_ack_state << 24;
305 case OP(RDMA_READ_RESPONSE_FIRST):
306 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
308 case OP(RDMA_READ_RESPONSE_MIDDLE):
309 ps->s_txreq->ss = &qp->s_ack_rdma_sge;
310 ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr;
312 rvt_get_mr(ps->s_txreq->mr);
313 len = qp->s_ack_rdma_sge.sge.sge_length;
316 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
318 ohdr->u.aeth = hfi1_compute_aeth(qp);
320 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
321 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
324 bth0 = qp->s_ack_state << 24;
325 bth2 = mask_psn(qp->s_ack_rdma_psn++);
331 * Send a regular ACK.
332 * Set the s_ack_state so we wait until after sending
333 * the ACK before setting s_ack_state to ACKNOWLEDGE
336 qp->s_ack_state = OP(SEND_ONLY);
337 qp->s_flags &= ~RVT_S_ACK_PENDING;
338 ps->s_txreq->ss = NULL;
341 cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) |
343 HFI1_AETH_CREDIT_SHIFT));
345 ohdr->u.aeth = hfi1_compute_aeth(qp);
348 bth0 = OP(ACKNOWLEDGE) << 24;
349 bth2 = mask_psn(qp->s_ack_psn);
351 qp->s_rdma_ack_cnt++;
352 qp->s_hdrwords = hwords;
353 ps->s_txreq->sde = priv->s_sde;
354 ps->s_txreq->s_cur_size = len;
355 hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle, ps);
357 ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
361 qp->s_ack_state = OP(ACKNOWLEDGE);
363 * Ensure s_rdma_ack_cnt changes are committed prior to resetting
367 qp->s_flags &= ~(RVT_S_RESP_PENDING
374 * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
375 * @qp: a pointer to the QP
377 * Assumes s_lock is held.
379 * Return 1 if constructed; otherwise, return 0.
381 int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
383 struct hfi1_qp_priv *priv = qp->priv;
384 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
385 struct ib_other_headers *ohdr;
386 struct rvt_sge_state *ss;
387 struct rvt_swqe *wqe;
388 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
398 lockdep_assert_held(&qp->s_lock);
399 ps->s_txreq = get_txreq(ps->dev, qp);
400 if (IS_ERR(ps->s_txreq))
403 ohdr = &ps->s_txreq->phdr.hdr.u.oth;
404 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
405 ohdr = &ps->s_txreq->phdr.hdr.u.l.oth;
407 /* Sending responses has higher priority over sending requests. */
408 if ((qp->s_flags & RVT_S_RESP_PENDING) &&
409 make_rc_ack(dev, qp, ohdr, ps))
412 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
413 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
415 /* We are in the error state, flush the work request. */
416 smp_read_barrier_depends(); /* see post_one_send() */
417 if (qp->s_last == READ_ONCE(qp->s_head))
419 /* If DMAs are in progress, we can't flush immediately. */
420 if (iowait_sdma_pending(&priv->s_iowait)) {
421 qp->s_flags |= RVT_S_WAIT_DMA;
425 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
426 hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
427 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
428 /* will get called again */
432 if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
435 if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) {
436 if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
437 qp->s_flags |= RVT_S_WAIT_PSN;
440 qp->s_sending_psn = qp->s_psn;
441 qp->s_sending_hpsn = qp->s_psn - 1;
444 /* Send a request. */
445 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
446 switch (qp->s_state) {
448 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
451 * Resend an old request or start a new one.
453 * We keep track of the current SWQE so that
454 * we don't reset the "furthest progress" state
455 * if we need to back up.
458 if (qp->s_cur == qp->s_tail) {
459 /* Check if send work queue is empty. */
460 smp_read_barrier_depends(); /* see post_one_send() */
461 if (qp->s_tail == READ_ONCE(qp->s_head)) {
466 * If a fence is requested, wait for previous
467 * RDMA read and atomic operations to finish.
469 if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
470 qp->s_num_rd_atomic) {
471 qp->s_flags |= RVT_S_WAIT_FENCE;
475 * Local operations are processed immediately
476 * after all prior requests have completed
478 if (wqe->wr.opcode == IB_WR_REG_MR ||
479 wqe->wr.opcode == IB_WR_LOCAL_INV) {
483 if (qp->s_last != qp->s_cur)
485 if (++qp->s_cur == qp->s_size)
487 if (++qp->s_tail == qp->s_size)
489 if (!(wqe->wr.send_flags &
490 RVT_SEND_COMPLETION_ONLY)) {
491 err = rvt_invalidate_rkey(
493 wqe->wr.ex.invalidate_rkey);
496 hfi1_send_complete(qp, wqe,
497 err ? IB_WC_LOC_PROT_ERR
500 atomic_dec(&qp->local_ops_pending);
506 qp->s_psn = wqe->psn;
509 * Note that we have to be careful not to modify the
510 * original work request since we may need to resend
515 bth2 = mask_psn(qp->s_psn);
516 switch (wqe->wr.opcode) {
518 case IB_WR_SEND_WITH_IMM:
519 case IB_WR_SEND_WITH_INV:
520 /* If no credit, return. */
521 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
522 cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
523 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
527 qp->s_state = OP(SEND_FIRST);
531 if (wqe->wr.opcode == IB_WR_SEND) {
532 qp->s_state = OP(SEND_ONLY);
533 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
534 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
535 /* Immediate data comes after the BTH */
536 ohdr->u.imm_data = wqe->wr.ex.imm_data;
539 qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE);
540 /* Invalidate rkey comes after the BTH */
541 ohdr->u.ieth = cpu_to_be32(
542 wqe->wr.ex.invalidate_rkey);
545 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
546 bth0 |= IB_BTH_SOLICITED;
547 bth2 |= IB_BTH_REQ_ACK;
548 if (++qp->s_cur == qp->s_size)
552 case IB_WR_RDMA_WRITE:
553 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
556 case IB_WR_RDMA_WRITE_WITH_IMM:
557 /* If no credit, return. */
558 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
559 cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
560 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
564 wqe->rdma_wr.remote_addr,
566 ohdr->u.rc.reth.rkey =
567 cpu_to_be32(wqe->rdma_wr.rkey);
568 ohdr->u.rc.reth.length = cpu_to_be32(len);
569 hwords += sizeof(struct ib_reth) / sizeof(u32);
571 qp->s_state = OP(RDMA_WRITE_FIRST);
575 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
576 qp->s_state = OP(RDMA_WRITE_ONLY);
579 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
580 /* Immediate data comes after RETH */
581 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
583 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
584 bth0 |= IB_BTH_SOLICITED;
586 bth2 |= IB_BTH_REQ_ACK;
587 if (++qp->s_cur == qp->s_size)
591 case IB_WR_RDMA_READ:
593 * Don't allow more operations to be started
594 * than the QP limits allow.
597 if (qp->s_num_rd_atomic >=
598 qp->s_max_rd_atomic) {
599 qp->s_flags |= RVT_S_WAIT_RDMAR;
602 qp->s_num_rd_atomic++;
603 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
607 wqe->rdma_wr.remote_addr,
609 ohdr->u.rc.reth.rkey =
610 cpu_to_be32(wqe->rdma_wr.rkey);
611 ohdr->u.rc.reth.length = cpu_to_be32(len);
612 qp->s_state = OP(RDMA_READ_REQUEST);
613 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
616 bth2 |= IB_BTH_REQ_ACK;
617 if (++qp->s_cur == qp->s_size)
621 case IB_WR_ATOMIC_CMP_AND_SWP:
622 case IB_WR_ATOMIC_FETCH_AND_ADD:
624 * Don't allow more operations to be started
625 * than the QP limits allow.
628 if (qp->s_num_rd_atomic >=
629 qp->s_max_rd_atomic) {
630 qp->s_flags |= RVT_S_WAIT_RDMAR;
633 qp->s_num_rd_atomic++;
634 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
637 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
638 qp->s_state = OP(COMPARE_SWAP);
639 put_ib_ateth_swap(wqe->atomic_wr.swap,
640 &ohdr->u.atomic_eth);
641 put_ib_ateth_compare(wqe->atomic_wr.compare_add,
642 &ohdr->u.atomic_eth);
644 qp->s_state = OP(FETCH_ADD);
645 put_ib_ateth_swap(wqe->atomic_wr.compare_add,
646 &ohdr->u.atomic_eth);
647 put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
649 put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
650 &ohdr->u.atomic_eth);
651 ohdr->u.atomic_eth.rkey = cpu_to_be32(
652 wqe->atomic_wr.rkey);
653 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
656 bth2 |= IB_BTH_REQ_ACK;
657 if (++qp->s_cur == qp->s_size)
664 qp->s_sge.sge = wqe->sg_list[0];
665 qp->s_sge.sg_list = wqe->sg_list + 1;
666 qp->s_sge.num_sge = wqe->wr.num_sge;
667 qp->s_sge.total_len = wqe->length;
668 qp->s_len = wqe->length;
671 if (qp->s_tail >= qp->s_size)
674 if (wqe->wr.opcode == IB_WR_RDMA_READ)
675 qp->s_psn = wqe->lpsn + 1;
680 case OP(RDMA_READ_RESPONSE_FIRST):
682 * qp->s_state is normally set to the opcode of the
683 * last packet constructed for new requests and therefore
684 * is never set to RDMA read response.
685 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
686 * thread to indicate a SEND needs to be restarted from an
687 * earlier PSN without interfering with the sending thread.
690 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
693 qp->s_state = OP(SEND_MIDDLE);
695 case OP(SEND_MIDDLE):
696 bth2 = mask_psn(qp->s_psn++);
701 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
704 if (wqe->wr.opcode == IB_WR_SEND) {
705 qp->s_state = OP(SEND_LAST);
706 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
707 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
708 /* Immediate data comes after the BTH */
709 ohdr->u.imm_data = wqe->wr.ex.imm_data;
712 qp->s_state = OP(SEND_LAST_WITH_INVALIDATE);
713 /* invalidate data comes after the BTH */
714 ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey);
717 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
718 bth0 |= IB_BTH_SOLICITED;
719 bth2 |= IB_BTH_REQ_ACK;
721 if (qp->s_cur >= qp->s_size)
725 case OP(RDMA_READ_RESPONSE_LAST):
727 * qp->s_state is normally set to the opcode of the
728 * last packet constructed for new requests and therefore
729 * is never set to RDMA read response.
730 * RDMA_READ_RESPONSE_LAST is used by the ACK processing
731 * thread to indicate a RDMA write needs to be restarted from
732 * an earlier PSN without interfering with the sending thread.
735 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
737 case OP(RDMA_WRITE_FIRST):
738 qp->s_state = OP(RDMA_WRITE_MIDDLE);
740 case OP(RDMA_WRITE_MIDDLE):
741 bth2 = mask_psn(qp->s_psn++);
746 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
749 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
750 qp->s_state = OP(RDMA_WRITE_LAST);
752 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
753 /* Immediate data comes after the BTH */
754 ohdr->u.imm_data = wqe->wr.ex.imm_data;
756 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
757 bth0 |= IB_BTH_SOLICITED;
759 bth2 |= IB_BTH_REQ_ACK;
761 if (qp->s_cur >= qp->s_size)
765 case OP(RDMA_READ_RESPONSE_MIDDLE):
767 * qp->s_state is normally set to the opcode of the
768 * last packet constructed for new requests and therefore
769 * is never set to RDMA read response.
770 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
771 * thread to indicate a RDMA read needs to be restarted from
772 * an earlier PSN without interfering with the sending thread.
775 len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu;
777 wqe->rdma_wr.remote_addr + len,
779 ohdr->u.rc.reth.rkey =
780 cpu_to_be32(wqe->rdma_wr.rkey);
781 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
782 qp->s_state = OP(RDMA_READ_REQUEST);
783 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
784 bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK;
785 qp->s_psn = wqe->lpsn + 1;
789 if (qp->s_cur == qp->s_size)
793 qp->s_sending_hpsn = bth2;
794 delta = delta_psn(bth2, wqe->psn);
795 if (delta && delta % HFI1_PSN_CREDIT == 0)
796 bth2 |= IB_BTH_REQ_ACK;
797 if (qp->s_flags & RVT_S_SEND_ONE) {
798 qp->s_flags &= ~RVT_S_SEND_ONE;
799 qp->s_flags |= RVT_S_WAIT_ACK;
800 bth2 |= IB_BTH_REQ_ACK;
803 qp->s_hdrwords = hwords;
804 ps->s_txreq->sde = priv->s_sde;
805 ps->s_txreq->ss = ss;
806 ps->s_txreq->s_cur_size = len;
807 hfi1_make_ruc_header(
810 bth0 | (qp->s_state << 24),
815 ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
819 hfi1_put_txreq(ps->s_txreq);
824 hfi1_put_txreq(ps->s_txreq);
828 qp->s_flags &= ~RVT_S_BUSY;
834 * hfi1_send_rc_ack - Construct an ACK packet and send it
835 * @qp: a pointer to the QP
837 * This is called from hfi1_rc_rcv() and handle_receive_interrupt().
838 * Note that RDMA reads and atomics are handled in the
839 * send side QP state and send engine.
841 void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
844 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
845 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
846 u64 pbc, pbc_flags = 0;
852 struct send_context *sc;
853 struct pio_buf *pbuf;
854 struct ib_header hdr;
855 struct ib_other_headers *ohdr;
857 struct hfi1_qp_priv *priv = qp->priv;
859 /* clear the defer count */
860 priv->r_adefered = 0;
862 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
863 if (qp->s_flags & RVT_S_RESP_PENDING)
866 /* Ensure s_rdma_ack_cnt changes are committed */
867 smp_read_barrier_depends();
868 if (qp->s_rdma_ack_cnt)
871 /* Construct the header */
872 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */
874 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
875 hwords += hfi1_make_grh(ibp, &hdr.u.l.grh,
876 &qp->remote_ah_attr.grh, hwords, 0);
883 /* read pkey_index w/o lock (its atomic) */
884 bth0 = hfi1_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
885 if (qp->s_mig_state == IB_MIG_MIGRATED)
886 bth0 |= IB_BTH_MIG_REQ;
888 ohdr->u.aeth = cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) |
890 HFI1_AETH_CREDIT_SHIFT));
892 ohdr->u.aeth = hfi1_compute_aeth(qp);
893 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
894 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
895 pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT);
896 lrh0 |= (sc5 & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4;
897 hdr.lrh[0] = cpu_to_be16(lrh0);
898 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
899 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
900 hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits);
901 ohdr->bth[0] = cpu_to_be32(bth0);
902 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
903 ohdr->bth[1] |= cpu_to_be32((!!is_fecn) << HFI1_BECN_SHIFT);
904 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn));
906 /* Don't try to send ACKs if the link isn't ACTIVE */
907 if (driver_lstate(ppd) != IB_PORT_ACTIVE)
911 plen = 2 /* PBC */ + hwords;
912 vl = sc_to_vlt(ppd->dd, sc5);
913 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
915 pbuf = sc_buffer_alloc(sc, plen, NULL, NULL);
918 * We have no room to send at the moment. Pass
919 * responsibility for sending the ACK to the send engine
920 * so that when enough buffer space becomes available,
921 * the ACK is sent ahead of other outgoing packets.
926 trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &hdr);
928 /* write the pbc and data */
929 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, &hdr, hwords);
934 spin_lock_irqsave(&qp->s_lock, flags);
935 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
937 this_cpu_inc(*ibp->rvp.rc_qacks);
938 qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
939 qp->s_nak_state = qp->r_nak_state;
940 qp->s_ack_psn = qp->r_ack_psn;
942 qp->s_flags |= RVT_S_ECN;
944 /* Schedule the send engine. */
945 hfi1_schedule_send(qp);
947 spin_unlock_irqrestore(&qp->s_lock, flags);
951 * reset_psn - reset the QP state to send starting from PSN
953 * @psn: the packet sequence number to restart at
955 * This is called from hfi1_rc_rcv() to process an incoming RC ACK
957 * Called at interrupt level with the QP s_lock held.
959 static void reset_psn(struct rvt_qp *qp, u32 psn)
962 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
965 lockdep_assert_held(&qp->s_lock);
969 * If we are starting the request from the beginning,
970 * let the normal send code handle initialization.
972 if (cmp_psn(psn, wqe->psn) <= 0) {
973 qp->s_state = OP(SEND_LAST);
977 /* Find the work request opcode corresponding to the given PSN. */
978 opcode = wqe->wr.opcode;
982 if (++n == qp->s_size)
986 wqe = rvt_get_swqe_ptr(qp, n);
987 diff = cmp_psn(psn, wqe->psn);
992 * If we are starting the request from the beginning,
993 * let the normal send code handle initialization.
996 qp->s_state = OP(SEND_LAST);
999 opcode = wqe->wr.opcode;
1003 * Set the state to restart in the middle of a request.
1004 * Don't change the s_sge, s_cur_sge, or s_cur_size.
1005 * See hfi1_make_rc_req().
1009 case IB_WR_SEND_WITH_IMM:
1010 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
1013 case IB_WR_RDMA_WRITE:
1014 case IB_WR_RDMA_WRITE_WITH_IMM:
1015 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
1018 case IB_WR_RDMA_READ:
1019 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
1024 * This case shouldn't happen since its only
1027 qp->s_state = OP(SEND_LAST);
1032 * Set RVT_S_WAIT_PSN as rc_complete() may start the timer
1033 * asynchronously before the send engine can get scheduled.
1034 * Doing it in hfi1_make_rc_req() is too late.
1036 if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
1037 (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
1038 qp->s_flags |= RVT_S_WAIT_PSN;
1039 qp->s_flags &= ~RVT_S_AHG_VALID;
1043 * Back up requester to resend the last un-ACKed request.
1044 * The QP r_lock and s_lock should be held and interrupts disabled.
1046 static void restart_rc(struct rvt_qp *qp, u32 psn, int wait)
1048 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1049 struct hfi1_ibport *ibp;
1051 lockdep_assert_held(&qp->r_lock);
1052 lockdep_assert_held(&qp->s_lock);
1053 if (qp->s_retry == 0) {
1054 if (qp->s_mig_state == IB_MIG_ARMED) {
1055 hfi1_migrate_qp(qp);
1056 qp->s_retry = qp->s_retry_cnt;
1057 } else if (qp->s_last == qp->s_acked) {
1058 hfi1_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
1059 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1061 } else { /* need to handle delayed completion */
1068 ibp = to_iport(qp->ibqp.device, qp->port_num);
1069 if (wqe->wr.opcode == IB_WR_RDMA_READ)
1070 ibp->rvp.n_rc_resends++;
1072 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
1074 qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
1075 RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
1078 qp->s_flags |= RVT_S_SEND_ONE;
1083 * This is called from s_timer for missing responses.
1085 void hfi1_rc_timeout(unsigned long arg)
1087 struct rvt_qp *qp = (struct rvt_qp *)arg;
1088 struct hfi1_ibport *ibp;
1089 unsigned long flags;
1091 spin_lock_irqsave(&qp->r_lock, flags);
1092 spin_lock(&qp->s_lock);
1093 if (qp->s_flags & RVT_S_TIMER) {
1094 ibp = to_iport(qp->ibqp.device, qp->port_num);
1095 ibp->rvp.n_rc_timeouts++;
1096 qp->s_flags &= ~RVT_S_TIMER;
1097 del_timer(&qp->s_timer);
1098 trace_hfi1_timeout(qp, qp->s_last_psn + 1);
1099 restart_rc(qp, qp->s_last_psn + 1, 1);
1100 hfi1_schedule_send(qp);
1102 spin_unlock(&qp->s_lock);
1103 spin_unlock_irqrestore(&qp->r_lock, flags);
1107 * This is called from s_timer for RNR timeouts.
1109 void hfi1_rc_rnr_retry(unsigned long arg)
1111 struct rvt_qp *qp = (struct rvt_qp *)arg;
1112 unsigned long flags;
1114 spin_lock_irqsave(&qp->s_lock, flags);
1115 hfi1_stop_rnr_timer(qp);
1116 hfi1_schedule_send(qp);
1117 spin_unlock_irqrestore(&qp->s_lock, flags);
1121 * Set qp->s_sending_psn to the next PSN after the given one.
1122 * This would be psn+1 except when RDMA reads are present.
1124 static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
1126 struct rvt_swqe *wqe;
1129 lockdep_assert_held(&qp->s_lock);
1130 /* Find the work request corresponding to the given PSN. */
1132 wqe = rvt_get_swqe_ptr(qp, n);
1133 if (cmp_psn(psn, wqe->lpsn) <= 0) {
1134 if (wqe->wr.opcode == IB_WR_RDMA_READ)
1135 qp->s_sending_psn = wqe->lpsn + 1;
1137 qp->s_sending_psn = psn + 1;
1140 if (++n == qp->s_size)
1142 if (n == qp->s_tail)
1148 * This should be called with the QP s_lock held and interrupts disabled.
1150 void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
1152 struct ib_other_headers *ohdr;
1153 struct rvt_swqe *wqe;
1157 lockdep_assert_held(&qp->s_lock);
1158 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
1161 /* Find out where the BTH is */
1162 if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH)
1165 ohdr = &hdr->u.l.oth;
1167 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
1168 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1169 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1170 WARN_ON(!qp->s_rdma_ack_cnt);
1171 qp->s_rdma_ack_cnt--;
1175 psn = be32_to_cpu(ohdr->bth[2]);
1176 reset_sending_psn(qp, psn);
1179 * Start timer after a packet requesting an ACK has been sent and
1180 * there are still requests that haven't been acked.
1182 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
1184 (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
1185 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
1186 hfi1_add_retry_timer(qp);
1188 while (qp->s_last != qp->s_acked) {
1191 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
1192 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
1193 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
1195 s_last = qp->s_last;
1196 if (++s_last >= qp->s_size)
1198 qp->s_last = s_last;
1199 /* see post_send() */
1202 rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
1205 * If we were waiting for sends to complete before re-sending,
1206 * and they are now complete, restart sending.
1208 trace_hfi1_sendcomplete(qp, psn);
1209 if (qp->s_flags & RVT_S_WAIT_PSN &&
1210 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1211 qp->s_flags &= ~RVT_S_WAIT_PSN;
1212 qp->s_sending_psn = qp->s_psn;
1213 qp->s_sending_hpsn = qp->s_psn - 1;
1214 hfi1_schedule_send(qp);
1218 static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
1220 qp->s_last_psn = psn;
1224 * Generate a SWQE completion.
1225 * This is similar to hfi1_send_complete but has to check to be sure
1226 * that the SGEs are not being referenced if the SWQE is being resent.
1228 static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
1229 struct rvt_swqe *wqe,
1230 struct hfi1_ibport *ibp)
1232 lockdep_assert_held(&qp->s_lock);
1234 * Don't decrement refcount and don't generate a
1235 * completion if the SWQE is being resent until the send
1238 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
1239 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1243 s_last = qp->s_last;
1244 if (++s_last >= qp->s_size)
1246 qp->s_last = s_last;
1247 /* see post_send() */
1249 rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
1251 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1253 this_cpu_inc(*ibp->rvp.rc_delayed_comp);
1255 * If send progress not running attempt to progress
1258 if (ppd->dd->flags & HFI1_HAS_SEND_DMA) {
1259 struct sdma_engine *engine;
1262 /* For now use sc to find engine */
1263 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
1264 engine = qp_to_sdma_engine(qp, sc5);
1265 sdma_engine_progress_schedule(engine);
1269 qp->s_retry = qp->s_retry_cnt;
1270 update_last_psn(qp, wqe->lpsn);
1273 * If we are completing a request which is in the process of
1274 * being resent, we can stop re-sending it since we know the
1275 * responder has already seen it.
1277 if (qp->s_acked == qp->s_cur) {
1278 if (++qp->s_cur >= qp->s_size)
1280 qp->s_acked = qp->s_cur;
1281 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
1282 if (qp->s_acked != qp->s_tail) {
1283 qp->s_state = OP(SEND_LAST);
1284 qp->s_psn = wqe->psn;
1287 if (++qp->s_acked >= qp->s_size)
1289 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
1291 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1297 * do_rc_ack - process an incoming RC ACK
1298 * @qp: the QP the ACK came in on
1299 * @psn: the packet sequence number of the ACK
1300 * @opcode: the opcode of the request that resulted in the ACK
1302 * This is called from rc_rcv_resp() to process an incoming RC ACK
1304 * May be called at interrupt level, with the QP s_lock held.
1305 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
1307 static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
1308 u64 val, struct hfi1_ctxtdata *rcd)
1310 struct hfi1_ibport *ibp;
1311 enum ib_wc_status status;
1312 struct rvt_swqe *wqe;
1318 lockdep_assert_held(&qp->s_lock);
1320 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
1321 * requests and implicitly NAK RDMA read and atomic requests issued
1322 * before the NAK'ed request. The MSN won't include the NAK'ed
1323 * request but will include an ACK'ed request(s).
1328 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1329 ibp = rcd_to_iport(rcd);
1332 * The MSN might be for a later WQE than the PSN indicates so
1333 * only complete WQEs that the PSN finishes.
1335 while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) {
1337 * RDMA_READ_RESPONSE_ONLY is a special case since
1338 * we want to generate completion events for everything
1339 * before the RDMA read, copy the data, then generate
1340 * the completion for the read.
1342 if (wqe->wr.opcode == IB_WR_RDMA_READ &&
1343 opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
1349 * If this request is a RDMA read or atomic, and the ACK is
1350 * for a later operation, this ACK NAKs the RDMA read or
1351 * atomic. In other words, only a RDMA_READ_LAST or ONLY
1352 * can ACK a RDMA read and likewise for atomic ops. Note
1353 * that the NAK case can only happen if relaxed ordering is
1354 * used and requests are sent after an RDMA read or atomic
1355 * is sent but before the response is received.
1357 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
1358 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
1359 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1360 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
1361 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
1362 /* Retry this request. */
1363 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
1364 qp->r_flags |= RVT_R_RDMAR_SEQ;
1365 restart_rc(qp, qp->s_last_psn + 1, 0);
1366 if (list_empty(&qp->rspwait)) {
1367 qp->r_flags |= RVT_R_RSP_SEND;
1369 list_add_tail(&qp->rspwait,
1370 &rcd->qp_wait_list);
1374 * No need to process the ACK/NAK since we are
1375 * restarting an earlier request.
1379 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1380 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
1381 u64 *vaddr = wqe->sg_list[0].vaddr;
1384 if (qp->s_num_rd_atomic &&
1385 (wqe->wr.opcode == IB_WR_RDMA_READ ||
1386 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1387 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
1388 qp->s_num_rd_atomic--;
1389 /* Restart sending task if fence is complete */
1390 if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
1391 !qp->s_num_rd_atomic) {
1392 qp->s_flags &= ~(RVT_S_WAIT_FENCE |
1394 hfi1_schedule_send(qp);
1395 } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
1396 qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
1398 hfi1_schedule_send(qp);
1401 wqe = do_rc_completion(qp, wqe, ibp);
1402 if (qp->s_acked == qp->s_tail)
1406 switch (aeth >> 29) {
1408 this_cpu_inc(*ibp->rvp.rc_acks);
1409 if (qp->s_acked != qp->s_tail) {
1411 * We are expecting more ACKs so
1412 * mod the retry timer.
1414 hfi1_mod_retry_timer(qp);
1416 * We can stop re-sending the earlier packets and
1417 * continue with the next packet the receiver wants.
1419 if (cmp_psn(qp->s_psn, psn) <= 0)
1420 reset_psn(qp, psn + 1);
1422 /* No more acks - kill all timers */
1423 hfi1_stop_rc_timers(qp);
1424 if (cmp_psn(qp->s_psn, psn) <= 0) {
1425 qp->s_state = OP(SEND_LAST);
1426 qp->s_psn = psn + 1;
1429 if (qp->s_flags & RVT_S_WAIT_ACK) {
1430 qp->s_flags &= ~RVT_S_WAIT_ACK;
1431 hfi1_schedule_send(qp);
1433 hfi1_get_credit(qp, aeth);
1434 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1435 qp->s_retry = qp->s_retry_cnt;
1436 update_last_psn(qp, psn);
1439 case 1: /* RNR NAK */
1440 ibp->rvp.n_rnr_naks++;
1441 if (qp->s_acked == qp->s_tail)
1443 if (qp->s_flags & RVT_S_WAIT_RNR)
1445 if (qp->s_rnr_retry == 0) {
1446 status = IB_WC_RNR_RETRY_EXC_ERR;
1449 if (qp->s_rnr_retry_cnt < 7)
1452 /* The last valid PSN is the previous PSN. */
1453 update_last_psn(qp, psn - 1);
1455 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
1459 qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
1460 hfi1_stop_rc_timers(qp);
1462 ib_hfi1_rnr_table[(aeth >> HFI1_AETH_CREDIT_SHIFT) &
1463 HFI1_AETH_CREDIT_MASK];
1464 hfi1_add_rnr_timer(qp, to);
1468 if (qp->s_acked == qp->s_tail)
1470 /* The last valid PSN is the previous PSN. */
1471 update_last_psn(qp, psn - 1);
1472 switch ((aeth >> HFI1_AETH_CREDIT_SHIFT) &
1473 HFI1_AETH_CREDIT_MASK) {
1474 case 0: /* PSN sequence error */
1475 ibp->rvp.n_seq_naks++;
1477 * Back up to the responder's expected PSN.
1478 * Note that we might get a NAK in the middle of an
1479 * RDMA READ response which terminates the RDMA
1482 restart_rc(qp, psn, 0);
1483 hfi1_schedule_send(qp);
1486 case 1: /* Invalid Request */
1487 status = IB_WC_REM_INV_REQ_ERR;
1488 ibp->rvp.n_other_naks++;
1491 case 2: /* Remote Access Error */
1492 status = IB_WC_REM_ACCESS_ERR;
1493 ibp->rvp.n_other_naks++;
1496 case 3: /* Remote Operation Error */
1497 status = IB_WC_REM_OP_ERR;
1498 ibp->rvp.n_other_naks++;
1500 if (qp->s_last == qp->s_acked) {
1501 hfi1_send_complete(qp, wqe, status);
1502 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1507 /* Ignore other reserved NAK error codes */
1510 qp->s_retry = qp->s_retry_cnt;
1511 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1514 default: /* 2: reserved */
1516 /* Ignore reserved NAK codes. */
1519 /* cannot be reached */
1521 hfi1_stop_rc_timers(qp);
1526 * We have seen an out of sequence RDMA read middle or last packet.
1527 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
1529 static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
1530 struct hfi1_ctxtdata *rcd)
1532 struct rvt_swqe *wqe;
1534 lockdep_assert_held(&qp->s_lock);
1535 /* Remove QP from retry timer */
1536 hfi1_stop_rc_timers(qp);
1538 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1540 while (cmp_psn(psn, wqe->lpsn) > 0) {
1541 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1542 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1543 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
1545 wqe = do_rc_completion(qp, wqe, ibp);
1548 ibp->rvp.n_rdma_seq++;
1549 qp->r_flags |= RVT_R_RDMAR_SEQ;
1550 restart_rc(qp, qp->s_last_psn + 1, 0);
1551 if (list_empty(&qp->rspwait)) {
1552 qp->r_flags |= RVT_R_RSP_SEND;
1554 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1559 * rc_rcv_resp - process an incoming RC response packet
1560 * @ibp: the port this packet came in on
1561 * @ohdr: the other headers for this packet
1562 * @data: the packet data
1563 * @tlen: the packet length
1564 * @qp: the QP for this packet
1565 * @opcode: the opcode for this packet
1566 * @psn: the packet sequence number for this packet
1567 * @hdrsize: the header length
1568 * @pmtu: the path MTU
1570 * This is called from hfi1_rc_rcv() to process an incoming RC response
1571 * packet for the given QP.
1572 * Called at interrupt level.
1574 static void rc_rcv_resp(struct hfi1_ibport *ibp,
1575 struct ib_other_headers *ohdr,
1576 void *data, u32 tlen, struct rvt_qp *qp,
1577 u32 opcode, u32 psn, u32 hdrsize, u32 pmtu,
1578 struct hfi1_ctxtdata *rcd)
1580 struct rvt_swqe *wqe;
1581 enum ib_wc_status status;
1582 unsigned long flags;
1588 spin_lock_irqsave(&qp->s_lock, flags);
1590 trace_hfi1_ack(qp, psn);
1592 /* Ignore invalid responses. */
1593 smp_read_barrier_depends(); /* see post_one_send */
1594 if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0)
1597 /* Ignore duplicate responses. */
1598 diff = cmp_psn(psn, qp->s_last_psn);
1599 if (unlikely(diff <= 0)) {
1600 /* Update credits for "ghost" ACKs */
1601 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1602 aeth = be32_to_cpu(ohdr->u.aeth);
1603 if ((aeth >> 29) == 0)
1604 hfi1_get_credit(qp, aeth);
1610 * Skip everything other than the PSN we expect, if we are waiting
1611 * for a reply to a restarted RDMA read or atomic op.
1613 if (qp->r_flags & RVT_R_RDMAR_SEQ) {
1614 if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
1616 qp->r_flags &= ~RVT_R_RDMAR_SEQ;
1619 if (unlikely(qp->s_acked == qp->s_tail))
1621 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1622 status = IB_WC_SUCCESS;
1625 case OP(ACKNOWLEDGE):
1626 case OP(ATOMIC_ACKNOWLEDGE):
1627 case OP(RDMA_READ_RESPONSE_FIRST):
1628 aeth = be32_to_cpu(ohdr->u.aeth);
1629 if (opcode == OP(ATOMIC_ACKNOWLEDGE))
1630 val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
1633 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
1634 opcode != OP(RDMA_READ_RESPONSE_FIRST))
1636 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1637 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1640 * If this is a response to a resent RDMA read, we
1641 * have to be careful to copy the data to the right
1644 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1648 case OP(RDMA_READ_RESPONSE_MIDDLE):
1649 /* no AETH, no ACK */
1650 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
1652 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1655 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1657 if (unlikely(pmtu >= qp->s_rdma_read_len))
1661 * We got a response so update the timeout.
1662 * 4.096 usec. * (1 << qp->timeout)
1664 qp->s_flags |= RVT_S_TIMER;
1665 mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies);
1666 if (qp->s_flags & RVT_S_WAIT_ACK) {
1667 qp->s_flags &= ~RVT_S_WAIT_ACK;
1668 hfi1_schedule_send(qp);
1671 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
1672 qp->s_retry = qp->s_retry_cnt;
1675 * Update the RDMA receive state but do the copy w/o
1676 * holding the locks and blocking interrupts.
1678 qp->s_rdma_read_len -= pmtu;
1679 update_last_psn(qp, psn);
1680 spin_unlock_irqrestore(&qp->s_lock, flags);
1681 hfi1_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0, 0);
1684 case OP(RDMA_READ_RESPONSE_ONLY):
1685 aeth = be32_to_cpu(ohdr->u.aeth);
1686 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
1688 /* Get the number of bytes the message was padded by. */
1689 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1691 * Check that the data size is >= 0 && <= pmtu.
1692 * Remember to account for ICRC (4).
1694 if (unlikely(tlen < (hdrsize + pad + 4)))
1697 * If this is a response to a resent RDMA read, we
1698 * have to be careful to copy the data to the right
1701 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1702 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1706 case OP(RDMA_READ_RESPONSE_LAST):
1707 /* ACKs READ req. */
1708 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
1710 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1712 /* Get the number of bytes the message was padded by. */
1713 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1715 * Check that the data size is >= 1 && <= pmtu.
1716 * Remember to account for ICRC (4).
1718 if (unlikely(tlen <= (hdrsize + pad + 4)))
1721 tlen -= hdrsize + pad + 4;
1722 if (unlikely(tlen != qp->s_rdma_read_len))
1724 aeth = be32_to_cpu(ohdr->u.aeth);
1725 hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0, 0);
1726 WARN_ON(qp->s_rdma_read_sge.num_sge);
1727 (void)do_rc_ack(qp, aeth, psn,
1728 OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
1733 status = IB_WC_LOC_QP_OP_ERR;
1737 rdma_seq_err(qp, ibp, psn, rcd);
1741 status = IB_WC_LOC_LEN_ERR;
1743 if (qp->s_last == qp->s_acked) {
1744 hfi1_send_complete(qp, wqe, status);
1745 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1748 spin_unlock_irqrestore(&qp->s_lock, flags);
1753 static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
1756 if (list_empty(&qp->rspwait)) {
1757 qp->r_flags |= RVT_R_RSP_NAK;
1759 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1763 static inline void rc_cancel_ack(struct rvt_qp *qp)
1765 struct hfi1_qp_priv *priv = qp->priv;
1767 priv->r_adefered = 0;
1768 if (list_empty(&qp->rspwait))
1770 list_del_init(&qp->rspwait);
1771 qp->r_flags &= ~RVT_R_RSP_NAK;
1776 * rc_rcv_error - process an incoming duplicate or error RC packet
1777 * @ohdr: the other headers for this packet
1778 * @data: the packet data
1779 * @qp: the QP for this packet
1780 * @opcode: the opcode for this packet
1781 * @psn: the packet sequence number for this packet
1782 * @diff: the difference between the PSN and the expected PSN
1784 * This is called from hfi1_rc_rcv() to process an unexpected
1785 * incoming RC packet for the given QP.
1786 * Called at interrupt level.
1787 * Return 1 if no more processing is needed; otherwise return 0 to
1788 * schedule a response to be sent.
1790 static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
1791 struct rvt_qp *qp, u32 opcode, u32 psn,
1792 int diff, struct hfi1_ctxtdata *rcd)
1794 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
1795 struct rvt_ack_entry *e;
1796 unsigned long flags;
1800 trace_hfi1_rcv_error(qp, psn);
1803 * Packet sequence error.
1804 * A NAK will ACK earlier sends and RDMA writes.
1805 * Don't queue the NAK if we already sent one.
1807 if (!qp->r_nak_state) {
1808 ibp->rvp.n_rc_seqnak++;
1809 qp->r_nak_state = IB_NAK_PSN_ERROR;
1810 /* Use the expected PSN. */
1811 qp->r_ack_psn = qp->r_psn;
1813 * Wait to send the sequence NAK until all packets
1814 * in the receive queue have been processed.
1815 * Otherwise, we end up propagating congestion.
1817 rc_defered_ack(rcd, qp);
1823 * Handle a duplicate request. Don't re-execute SEND, RDMA
1824 * write or atomic op. Don't NAK errors, just silently drop
1825 * the duplicate request. Note that r_sge, r_len, and
1826 * r_rcv_len may be in use so don't modify them.
1828 * We are supposed to ACK the earliest duplicate PSN but we
1829 * can coalesce an outstanding duplicate ACK. We have to
1830 * send the earliest so that RDMA reads can be restarted at
1831 * the requester's expected PSN.
1833 * First, find where this duplicate PSN falls within the
1834 * ACKs previously sent.
1835 * old_req is true if there is an older response that is scheduled
1836 * to be sent before sending this one.
1840 ibp->rvp.n_rc_dupreq++;
1842 spin_lock_irqsave(&qp->s_lock, flags);
1844 for (i = qp->r_head_ack_queue; ; i = prev) {
1845 if (i == qp->s_tail_ack_queue)
1850 prev = HFI1_MAX_RDMA_ATOMIC;
1851 if (prev == qp->r_head_ack_queue) {
1855 e = &qp->s_ack_queue[prev];
1860 if (cmp_psn(psn, e->psn) >= 0) {
1861 if (prev == qp->s_tail_ack_queue &&
1862 cmp_psn(psn, e->lpsn) <= 0)
1868 case OP(RDMA_READ_REQUEST): {
1869 struct ib_reth *reth;
1874 * If we didn't find the RDMA read request in the ack queue,
1875 * we can ignore this request.
1877 if (!e || e->opcode != OP(RDMA_READ_REQUEST))
1879 /* RETH comes after BTH */
1880 reth = &ohdr->u.rc.reth;
1882 * Address range must be a subset of the original
1883 * request and start on pmtu boundaries.
1884 * We reuse the old ack_queue slot since the requester
1885 * should not back up and request an earlier PSN for the
1888 offset = delta_psn(psn, e->psn) * qp->pmtu;
1889 len = be32_to_cpu(reth->length);
1890 if (unlikely(offset + len != e->rdma_sge.sge_length))
1892 if (e->rdma_sge.mr) {
1893 rvt_put_mr(e->rdma_sge.mr);
1894 e->rdma_sge.mr = NULL;
1897 u32 rkey = be32_to_cpu(reth->rkey);
1898 u64 vaddr = get_ib_reth_vaddr(reth);
1901 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
1902 IB_ACCESS_REMOTE_READ);
1906 e->rdma_sge.vaddr = NULL;
1907 e->rdma_sge.length = 0;
1908 e->rdma_sge.sge_length = 0;
1913 qp->s_tail_ack_queue = prev;
1917 case OP(COMPARE_SWAP):
1918 case OP(FETCH_ADD): {
1920 * If we didn't find the atomic request in the ack queue
1921 * or the send engine is already backed up to send an
1922 * earlier entry, we can ignore this request.
1924 if (!e || e->opcode != (u8)opcode || old_req)
1926 qp->s_tail_ack_queue = prev;
1932 * Ignore this operation if it doesn't request an ACK
1933 * or an earlier RDMA read or atomic is going to be resent.
1935 if (!(psn & IB_BTH_REQ_ACK) || old_req)
1938 * Resend the most recent ACK if this request is
1939 * after all the previous RDMA reads and atomics.
1941 if (i == qp->r_head_ack_queue) {
1942 spin_unlock_irqrestore(&qp->s_lock, flags);
1943 qp->r_nak_state = 0;
1944 qp->r_ack_psn = qp->r_psn - 1;
1949 * Resend the RDMA read or atomic op which
1950 * ACKs this duplicate request.
1952 qp->s_tail_ack_queue = i;
1955 qp->s_ack_state = OP(ACKNOWLEDGE);
1956 qp->s_flags |= RVT_S_RESP_PENDING;
1957 qp->r_nak_state = 0;
1958 hfi1_schedule_send(qp);
1961 spin_unlock_irqrestore(&qp->s_lock, flags);
1969 static inline void update_ack_queue(struct rvt_qp *qp, unsigned n)
1974 if (next > HFI1_MAX_RDMA_ATOMIC)
1976 qp->s_tail_ack_queue = next;
1977 qp->s_ack_state = OP(ACKNOWLEDGE);
1980 static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid,
1981 u32 lqpn, u32 rqpn, u8 svc_type)
1983 struct opa_hfi1_cong_log_event_internal *cc_event;
1984 unsigned long flags;
1986 if (sl >= OPA_MAX_SLS)
1989 spin_lock_irqsave(&ppd->cc_log_lock, flags);
1991 ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8);
1992 ppd->threshold_event_counter++;
1994 cc_event = &ppd->cc_events[ppd->cc_log_idx++];
1995 if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS)
1996 ppd->cc_log_idx = 0;
1997 cc_event->lqpn = lqpn & RVT_QPN_MASK;
1998 cc_event->rqpn = rqpn & RVT_QPN_MASK;
2000 cc_event->svc_type = svc_type;
2001 cc_event->rlid = rlid;
2002 /* keep timestamp in units of 1.024 usec */
2003 cc_event->timestamp = ktime_to_ns(ktime_get()) / 1024;
2005 spin_unlock_irqrestore(&ppd->cc_log_lock, flags);
2008 void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn,
2009 u32 rqpn, u8 svc_type)
2011 struct cca_timer *cca_timer;
2012 u16 ccti, ccti_incr, ccti_timer, ccti_limit;
2013 u8 trigger_threshold;
2014 struct cc_state *cc_state;
2015 unsigned long flags;
2017 if (sl >= OPA_MAX_SLS)
2020 cc_state = get_cc_state(ppd);
2026 * 1) increase CCTI (for this SL)
2027 * 2) select IPG (i.e., call set_link_ipg())
2030 ccti_limit = cc_state->cct.ccti_limit;
2031 ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase;
2032 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
2034 cc_state->cong_setting.entries[sl].trigger_threshold;
2036 spin_lock_irqsave(&ppd->cca_timer_lock, flags);
2038 cca_timer = &ppd->cca_timer[sl];
2039 if (cca_timer->ccti < ccti_limit) {
2040 if (cca_timer->ccti + ccti_incr <= ccti_limit)
2041 cca_timer->ccti += ccti_incr;
2043 cca_timer->ccti = ccti_limit;
2047 ccti = cca_timer->ccti;
2049 if (!hrtimer_active(&cca_timer->hrtimer)) {
2050 /* ccti_timer is in units of 1.024 usec */
2051 unsigned long nsec = 1024 * ccti_timer;
2053 hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec),
2057 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
2059 if ((trigger_threshold != 0) && (ccti >= trigger_threshold))
2060 log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type);
2064 * hfi1_rc_rcv - process an incoming RC packet
2065 * @rcd: the context pointer
2066 * @hdr: the header of this packet
2067 * @rcv_flags: flags relevant to rcv processing
2068 * @data: the packet data
2069 * @tlen: the packet length
2070 * @qp: the QP for this packet
2072 * This is called from qp_rcv() to process an incoming RC packet
2074 * May be called at interrupt level.
2076 void hfi1_rc_rcv(struct hfi1_packet *packet)
2078 struct hfi1_ctxtdata *rcd = packet->rcd;
2079 struct ib_header *hdr = packet->hdr;
2080 u32 rcv_flags = packet->rcv_flags;
2081 void *data = packet->ebuf;
2082 u32 tlen = packet->tlen;
2083 struct rvt_qp *qp = packet->qp;
2084 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
2085 struct ib_other_headers *ohdr = packet->ohdr;
2087 u32 hdrsize = packet->hlen;
2091 u32 pmtu = qp->pmtu;
2093 struct ib_reth *reth;
2094 unsigned long flags;
2095 int ret, is_fecn = 0;
2099 lockdep_assert_held(&qp->r_lock);
2100 bth0 = be32_to_cpu(ohdr->bth[0]);
2101 if (hfi1_ruc_check_hdr(ibp, hdr, rcv_flags & HFI1_HAS_GRH, qp, bth0))
2104 is_fecn = process_ecn(qp, packet, false);
2106 psn = be32_to_cpu(ohdr->bth[2]);
2107 opcode = (bth0 >> 24) & 0xff;
2110 * Process responses (ACKs) before anything else. Note that the
2111 * packet sequence number will be for something in the send work
2112 * queue rather than the expected receive packet sequence number.
2113 * In other words, this QP is the requester.
2115 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
2116 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
2117 rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
2118 hdrsize, pmtu, rcd);
2124 /* Compute 24 bits worth of difference. */
2125 diff = delta_psn(psn, qp->r_psn);
2126 if (unlikely(diff)) {
2127 if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
2132 /* Check for opcode sequence errors. */
2133 switch (qp->r_state) {
2134 case OP(SEND_FIRST):
2135 case OP(SEND_MIDDLE):
2136 if (opcode == OP(SEND_MIDDLE) ||
2137 opcode == OP(SEND_LAST) ||
2138 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
2139 opcode == OP(SEND_LAST_WITH_INVALIDATE))
2143 case OP(RDMA_WRITE_FIRST):
2144 case OP(RDMA_WRITE_MIDDLE):
2145 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
2146 opcode == OP(RDMA_WRITE_LAST) ||
2147 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
2152 if (opcode == OP(SEND_MIDDLE) ||
2153 opcode == OP(SEND_LAST) ||
2154 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
2155 opcode == OP(SEND_LAST_WITH_INVALIDATE) ||
2156 opcode == OP(RDMA_WRITE_MIDDLE) ||
2157 opcode == OP(RDMA_WRITE_LAST) ||
2158 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
2161 * Note that it is up to the requester to not send a new
2162 * RDMA read or atomic operation before receiving an ACK
2163 * for the previous operation.
2168 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
2171 /* OK, process the packet. */
2173 case OP(SEND_FIRST):
2174 ret = hfi1_rvt_get_rwqe(qp, 0);
2181 case OP(SEND_MIDDLE):
2182 case OP(RDMA_WRITE_MIDDLE):
2184 /* Check for invalid length PMTU or posted rwqe len. */
2185 if (unlikely(tlen != (hdrsize + pmtu + 4)))
2187 qp->r_rcv_len += pmtu;
2188 if (unlikely(qp->r_rcv_len > qp->r_len))
2190 hfi1_copy_sge(&qp->r_sge, data, pmtu, 1, 0);
2193 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
2195 ret = hfi1_rvt_get_rwqe(qp, 1);
2203 case OP(SEND_ONLY_WITH_IMMEDIATE):
2204 case OP(SEND_ONLY_WITH_INVALIDATE):
2205 ret = hfi1_rvt_get_rwqe(qp, 0);
2211 if (opcode == OP(SEND_ONLY))
2212 goto no_immediate_data;
2213 if (opcode == OP(SEND_ONLY_WITH_INVALIDATE))
2215 /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
2216 case OP(SEND_LAST_WITH_IMMEDIATE):
2218 wc.ex.imm_data = ohdr->u.imm_data;
2219 wc.wc_flags = IB_WC_WITH_IMM;
2221 case OP(SEND_LAST_WITH_INVALIDATE):
2223 rkey = be32_to_cpu(ohdr->u.ieth);
2224 if (rvt_invalidate_rkey(qp, rkey))
2225 goto no_immediate_data;
2226 wc.ex.invalidate_rkey = rkey;
2227 wc.wc_flags = IB_WC_WITH_INVALIDATE;
2229 case OP(RDMA_WRITE_LAST):
2230 copy_last = ibpd_to_rvtpd(qp->ibqp.pd)->user;
2237 /* Get the number of bytes the message was padded by. */
2238 pad = (bth0 >> 20) & 3;
2239 /* Check for invalid length. */
2240 /* LAST len should be >= 1 */
2241 if (unlikely(tlen < (hdrsize + pad + 4)))
2243 /* Don't count the CRC. */
2244 tlen -= (hdrsize + pad + 4);
2245 wc.byte_len = tlen + qp->r_rcv_len;
2246 if (unlikely(wc.byte_len > qp->r_len))
2248 hfi1_copy_sge(&qp->r_sge, data, tlen, 1, copy_last);
2249 rvt_put_ss(&qp->r_sge);
2251 if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
2253 wc.wr_id = qp->r_wr_id;
2254 wc.status = IB_WC_SUCCESS;
2255 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
2256 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
2257 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
2259 wc.opcode = IB_WC_RECV;
2261 wc.src_qp = qp->remote_qpn;
2262 wc.slid = qp->remote_ah_attr.dlid;
2264 * It seems that IB mandates the presence of an SL in a
2265 * work completion only for the UD transport (see section
2266 * 11.4.2 of IBTA Vol. 1).
2268 * However, the way the SL is chosen below is consistent
2269 * with the way that IB/qib works and is trying avoid
2270 * introducing incompatibilities.
2272 * See also OPA Vol. 1, section 9.7.6, and table 9-17.
2274 wc.sl = qp->remote_ah_attr.sl;
2275 /* zero fields that are N/A */
2278 wc.dlid_path_bits = 0;
2280 /* Signal completion event if the solicited bit is set. */
2281 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
2282 (bth0 & IB_BTH_SOLICITED) != 0);
2285 case OP(RDMA_WRITE_ONLY):
2288 case OP(RDMA_WRITE_FIRST):
2289 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
2290 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
2293 reth = &ohdr->u.rc.reth;
2294 qp->r_len = be32_to_cpu(reth->length);
2296 qp->r_sge.sg_list = NULL;
2297 if (qp->r_len != 0) {
2298 u32 rkey = be32_to_cpu(reth->rkey);
2299 u64 vaddr = get_ib_reth_vaddr(reth);
2302 /* Check rkey & NAK */
2303 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
2304 rkey, IB_ACCESS_REMOTE_WRITE);
2307 qp->r_sge.num_sge = 1;
2309 qp->r_sge.num_sge = 0;
2310 qp->r_sge.sge.mr = NULL;
2311 qp->r_sge.sge.vaddr = NULL;
2312 qp->r_sge.sge.length = 0;
2313 qp->r_sge.sge.sge_length = 0;
2315 if (opcode == OP(RDMA_WRITE_FIRST))
2317 else if (opcode == OP(RDMA_WRITE_ONLY))
2318 goto no_immediate_data;
2319 ret = hfi1_rvt_get_rwqe(qp, 1);
2324 wc.ex.imm_data = ohdr->u.rc.imm_data;
2325 wc.wc_flags = IB_WC_WITH_IMM;
2328 case OP(RDMA_READ_REQUEST): {
2329 struct rvt_ack_entry *e;
2333 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
2335 next = qp->r_head_ack_queue + 1;
2336 /* s_ack_queue is size HFI1_MAX_RDMA_ATOMIC+1 so use > not >= */
2337 if (next > HFI1_MAX_RDMA_ATOMIC)
2339 spin_lock_irqsave(&qp->s_lock, flags);
2340 if (unlikely(next == qp->s_tail_ack_queue)) {
2341 if (!qp->s_ack_queue[next].sent)
2342 goto nack_inv_unlck;
2343 update_ack_queue(qp, next);
2345 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2346 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
2347 rvt_put_mr(e->rdma_sge.mr);
2348 e->rdma_sge.mr = NULL;
2350 reth = &ohdr->u.rc.reth;
2351 len = be32_to_cpu(reth->length);
2353 u32 rkey = be32_to_cpu(reth->rkey);
2354 u64 vaddr = get_ib_reth_vaddr(reth);
2357 /* Check rkey & NAK */
2358 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
2359 rkey, IB_ACCESS_REMOTE_READ);
2361 goto nack_acc_unlck;
2363 * Update the next expected PSN. We add 1 later
2364 * below, so only add the remainder here.
2366 qp->r_psn += rvt_div_mtu(qp, len - 1);
2368 e->rdma_sge.mr = NULL;
2369 e->rdma_sge.vaddr = NULL;
2370 e->rdma_sge.length = 0;
2371 e->rdma_sge.sge_length = 0;
2376 e->lpsn = qp->r_psn;
2378 * We need to increment the MSN here instead of when we
2379 * finish sending the result since a duplicate request would
2380 * increment it more than once.
2384 qp->r_state = opcode;
2385 qp->r_nak_state = 0;
2386 qp->r_head_ack_queue = next;
2388 /* Schedule the send engine. */
2389 qp->s_flags |= RVT_S_RESP_PENDING;
2390 hfi1_schedule_send(qp);
2392 spin_unlock_irqrestore(&qp->s_lock, flags);
2398 case OP(COMPARE_SWAP):
2399 case OP(FETCH_ADD): {
2400 struct ib_atomic_eth *ateth;
2401 struct rvt_ack_entry *e;
2408 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
2410 next = qp->r_head_ack_queue + 1;
2411 if (next > HFI1_MAX_RDMA_ATOMIC)
2413 spin_lock_irqsave(&qp->s_lock, flags);
2414 if (unlikely(next == qp->s_tail_ack_queue)) {
2415 if (!qp->s_ack_queue[next].sent)
2416 goto nack_inv_unlck;
2417 update_ack_queue(qp, next);
2419 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2420 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
2421 rvt_put_mr(e->rdma_sge.mr);
2422 e->rdma_sge.mr = NULL;
2424 ateth = &ohdr->u.atomic_eth;
2425 vaddr = get_ib_ateth_vaddr(ateth);
2426 if (unlikely(vaddr & (sizeof(u64) - 1)))
2427 goto nack_inv_unlck;
2428 rkey = be32_to_cpu(ateth->rkey);
2429 /* Check rkey & NAK */
2430 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
2432 IB_ACCESS_REMOTE_ATOMIC)))
2433 goto nack_acc_unlck;
2434 /* Perform atomic OP and save result. */
2435 maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
2436 sdata = get_ib_ateth_swap(ateth);
2437 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
2438 (u64)atomic64_add_return(sdata, maddr) - sdata :
2439 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
2440 get_ib_ateth_compare(ateth),
2442 rvt_put_mr(qp->r_sge.sge.mr);
2443 qp->r_sge.num_sge = 0;
2450 qp->r_state = opcode;
2451 qp->r_nak_state = 0;
2452 qp->r_head_ack_queue = next;
2454 /* Schedule the send engine. */
2455 qp->s_flags |= RVT_S_RESP_PENDING;
2456 hfi1_schedule_send(qp);
2458 spin_unlock_irqrestore(&qp->s_lock, flags);
2465 /* NAK unknown opcodes. */
2469 qp->r_state = opcode;
2470 qp->r_ack_psn = psn;
2471 qp->r_nak_state = 0;
2472 /* Send an ACK if requested or required. */
2473 if (psn & IB_BTH_REQ_ACK) {
2474 struct hfi1_qp_priv *priv = qp->priv;
2476 if (packet->numpkt == 0) {
2480 if (priv->r_adefered >= HFI1_PSN_CREDIT) {
2484 if (unlikely(is_fecn)) {
2489 rc_defered_ack(rcd, qp);
2494 qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK;
2495 qp->r_ack_psn = qp->r_psn;
2496 /* Queue RNR NAK for later */
2497 rc_defered_ack(rcd, qp);
2501 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2502 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
2503 qp->r_ack_psn = qp->r_psn;
2504 /* Queue NAK for later */
2505 rc_defered_ack(rcd, qp);
2509 spin_unlock_irqrestore(&qp->s_lock, flags);
2511 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2512 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
2513 qp->r_ack_psn = qp->r_psn;
2514 /* Queue NAK for later */
2515 rc_defered_ack(rcd, qp);
2519 spin_unlock_irqrestore(&qp->s_lock, flags);
2521 rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
2522 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
2523 qp->r_ack_psn = qp->r_psn;
2525 hfi1_send_rc_ack(rcd, qp, is_fecn);
2528 void hfi1_rc_hdrerr(
2529 struct hfi1_ctxtdata *rcd,
2530 struct ib_header *hdr,
2534 int has_grh = rcv_flags & HFI1_HAS_GRH;
2535 struct ib_other_headers *ohdr;
2536 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
2544 ohdr = &hdr->u.l.oth;
2546 bth0 = be32_to_cpu(ohdr->bth[0]);
2547 if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, bth0))
2550 psn = be32_to_cpu(ohdr->bth[2]);
2551 opcode = (bth0 >> 24) & 0xff;
2553 /* Only deal with RDMA Writes for now */
2554 if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
2555 diff = delta_psn(psn, qp->r_psn);
2556 if (!qp->r_nak_state && diff >= 0) {
2557 ibp->rvp.n_rc_seqnak++;
2558 qp->r_nak_state = IB_NAK_PSN_ERROR;
2559 /* Use the expected PSN. */
2560 qp->r_ack_psn = qp->r_psn;
2562 * Wait to send the sequence
2563 * NAK until all packets
2564 * in the receive queue have
2566 * Otherwise, we end up
2567 * propagating congestion.
2569 rc_defered_ack(rcd, qp);
2570 } /* Out of sequence NAK */
2571 } /* QP Request NAKs */