2 * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
35 #include <linux/device.h>
36 #include <linux/dmapool.h>
37 #include <linux/ratelimit.h>
39 #include "rds_single_path.h"
44 * Convert IB-specific error message to RDS error message and call core
47 static void rds_ib_send_complete(struct rds_message *rm,
49 void (*complete)(struct rds_message *rm, int status))
54 case IB_WC_WR_FLUSH_ERR:
58 notify_status = RDS_RDMA_SUCCESS;
61 case IB_WC_REM_ACCESS_ERR:
62 notify_status = RDS_RDMA_REMOTE_ERROR;
66 notify_status = RDS_RDMA_OTHER_ERROR;
69 complete(rm, notify_status);
72 static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
73 struct rm_data_op *op,
77 ib_dma_unmap_sg(ic->i_cm_id->device,
78 op->op_sg, op->op_nents,
82 static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
83 struct rm_rdma_op *op,
87 ib_dma_unmap_sg(ic->i_cm_id->device,
88 op->op_sg, op->op_nents,
89 op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
93 /* If the user asked for a completion notification on this
94 * message, we can implement three different semantics:
95 * 1. Notify when we received the ACK on the RDS message
96 * that was queued with the RDMA. This provides reliable
97 * notification of RDMA status at the expense of a one-way
99 * 2. Notify when the IB stack gives us the completion event for
100 * the RDMA operation.
101 * 3. Notify when the IB stack gives us the completion event for
102 * the accompanying RDS messages.
103 * Here, we implement approach #3. To implement approach #2,
104 * we would need to take an event for the rdma WR. To implement #1,
105 * don't call rds_rdma_send_complete at all, and fall back to the notify
106 * handling in the ACK processing code.
108 * Note: There's no need to explicitly sync any RDMA buffers using
109 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
110 * operation itself unmapped the RDMA buffers, which takes care
113 rds_ib_send_complete(container_of(op, struct rds_message, rdma),
114 wc_status, rds_rdma_send_complete);
117 rds_stats_add(s_send_rdma_bytes, op->op_bytes);
119 rds_stats_add(s_recv_rdma_bytes, op->op_bytes);
122 static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic,
123 struct rm_atomic_op *op,
126 /* unmap atomic recvbuf */
128 ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1,
133 rds_ib_send_complete(container_of(op, struct rds_message, atomic),
134 wc_status, rds_atomic_send_complete);
136 if (op->op_type == RDS_ATOMIC_TYPE_CSWP)
137 rds_ib_stats_inc(s_ib_atomic_cswp);
139 rds_ib_stats_inc(s_ib_atomic_fadd);
143 * Unmap the resources associated with a struct send_work.
145 * Returns the rm for no good reason other than it is unobtainable
146 * other than by switching on wr.opcode, currently, and the caller,
147 * the event handler, needs it.
149 static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic,
150 struct rds_ib_send_work *send,
153 struct rds_message *rm = NULL;
155 /* In the error case, wc.opcode sometimes contains garbage */
156 switch (send->s_wr.opcode) {
159 rm = container_of(send->s_op, struct rds_message, data);
160 rds_ib_send_unmap_data(ic, send->s_op, wc_status);
163 case IB_WR_RDMA_WRITE:
164 case IB_WR_RDMA_READ:
166 rm = container_of(send->s_op, struct rds_message, rdma);
167 rds_ib_send_unmap_rdma(ic, send->s_op, wc_status);
170 case IB_WR_ATOMIC_FETCH_AND_ADD:
171 case IB_WR_ATOMIC_CMP_AND_SWP:
173 rm = container_of(send->s_op, struct rds_message, atomic);
174 rds_ib_send_unmap_atomic(ic, send->s_op, wc_status);
178 printk_ratelimited(KERN_NOTICE
179 "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
180 __func__, send->s_wr.opcode);
184 send->s_wr.opcode = 0xdead;
189 void rds_ib_send_init_ring(struct rds_ib_connection *ic)
191 struct rds_ib_send_work *send;
194 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
199 send->s_wr.wr_id = i;
200 send->s_wr.sg_list = send->s_sge;
201 send->s_wr.ex.imm_data = 0;
203 sge = &send->s_sge[0];
204 sge->addr = ic->i_send_hdrs_dma[i];
206 sge->length = sizeof(struct rds_header);
207 sge->lkey = ic->i_pd->local_dma_lkey;
209 send->s_sge[1].lkey = ic->i_pd->local_dma_lkey;
213 void rds_ib_send_clear_ring(struct rds_ib_connection *ic)
215 struct rds_ib_send_work *send;
218 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
219 if (send->s_op && send->s_wr.opcode != 0xdead)
220 rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR);
225 * The only fast path caller always has a non-zero nr, so we don't
226 * bother testing nr before performing the atomic sub.
228 static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr)
230 if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) &&
231 waitqueue_active(&rds_ib_ring_empty_wait))
232 wake_up(&rds_ib_ring_empty_wait);
233 BUG_ON(atomic_read(&ic->i_signaled_sends) < 0);
237 * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
238 * operations performed in the send path. As the sender allocs and potentially
239 * unallocs the next free entry in the ring it doesn't alter which is
240 * the next to be freed, which is what this is concerned with.
242 void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
244 struct rds_message *rm = NULL;
245 struct rds_connection *conn = ic->conn;
246 struct rds_ib_send_work *send;
253 rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
254 (unsigned long long)wc->wr_id, wc->status,
255 ib_wc_status_msg(wc->status), wc->byte_len,
256 be32_to_cpu(wc->ex.imm_data));
257 rds_ib_stats_inc(s_ib_tx_cq_event);
259 if (wc->wr_id == RDS_IB_ACK_WR_ID) {
260 if (time_after(jiffies, ic->i_ack_queued + HZ / 2))
261 rds_ib_stats_inc(s_ib_tx_stalled);
262 rds_ib_ack_send_complete(ic);
266 oldest = rds_ib_ring_oldest(&ic->i_send_ring);
268 completed = rds_ib_ring_completed(&ic->i_send_ring, wc->wr_id, oldest);
270 for (i = 0; i < completed; i++) {
271 send = &ic->i_sends[oldest];
272 if (send->s_wr.send_flags & IB_SEND_SIGNALED)
275 rm = rds_ib_send_unmap_op(ic, send, wc->status);
277 if (time_after(jiffies, send->s_queued + HZ / 2))
278 rds_ib_stats_inc(s_ib_tx_stalled);
281 if (send->s_op == rm->m_final_op) {
282 /* If anyone waited for this message to get
283 * flushed out, wake them up now
285 rds_message_unmapped(rm);
291 oldest = (oldest + 1) % ic->i_send_ring.w_nr;
294 rds_ib_ring_free(&ic->i_send_ring, completed);
295 rds_ib_sub_signaled(ic, nr_sig);
298 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
299 test_bit(0, &conn->c_map_queued))
300 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
302 /* We expect errors as the qp is drained during shutdown */
303 if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) {
304 rds_ib_conn_error(conn, "send completion on <%pI6c,%pI6c,%d> had status %u (%s), vendor err 0x%x, disconnecting and reconnecting\n",
305 &conn->c_laddr, &conn->c_faddr,
306 conn->c_tos, wc->status,
307 ib_wc_status_msg(wc->status), wc->vendor_err);
312 * This is the main function for allocating credits when sending
315 * Conceptually, we have two counters:
316 * - send credits: this tells us how many WRs we're allowed
317 * to submit without overruning the receiver's queue. For
318 * each SEND WR we post, we decrement this by one.
320 * - posted credits: this tells us how many WRs we recently
321 * posted to the receive queue. This value is transferred
322 * to the peer as a "credit update" in a RDS header field.
323 * Every time we transmit credits to the peer, we subtract
324 * the amount of transferred credits from this counter.
326 * It is essential that we avoid situations where both sides have
327 * exhausted their send credits, and are unable to send new credits
328 * to the peer. We achieve this by requiring that we send at least
329 * one credit update to the peer before exhausting our credits.
330 * When new credits arrive, we subtract one credit that is withheld
331 * until we've posted new buffers and are ready to transmit these
332 * credits (see rds_ib_send_add_credits below).
334 * The RDS send code is essentially single-threaded; rds_send_xmit
335 * sets RDS_IN_XMIT to ensure exclusive access to the send ring.
336 * However, the ACK sending code is independent and can race with
339 * In the send path, we need to update the counters for send credits
340 * and the counter of posted buffers atomically - when we use the
341 * last available credit, we cannot allow another thread to race us
342 * and grab the posted credits counter. Hence, we have to use a
343 * spinlock to protect the credit counter, or use atomics.
345 * Spinlocks shared between the send and the receive path are bad,
346 * because they create unnecessary delays. An early implementation
347 * using a spinlock showed a 5% degradation in throughput at some
350 * This implementation avoids spinlocks completely, putting both
351 * counters into a single atomic, and updating that atomic using
352 * atomic_add (in the receive path, when receiving fresh credits),
353 * and using atomic_cmpxchg when updating the two counters.
355 int rds_ib_send_grab_credits(struct rds_ib_connection *ic,
356 u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
358 unsigned int avail, posted, got = 0, advertise;
367 oldval = newval = atomic_read(&ic->i_credits);
368 posted = IB_GET_POST_CREDITS(oldval);
369 avail = IB_GET_SEND_CREDITS(oldval);
371 rdsdebug("wanted=%u credits=%u posted=%u\n",
372 wanted, avail, posted);
374 /* The last credit must be used to send a credit update. */
375 if (avail && !posted)
378 if (avail < wanted) {
379 struct rds_connection *conn = ic->i_cm_id->context;
381 /* Oops, there aren't that many credits left! */
382 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
385 /* Sometimes you get what you want, lalala. */
388 newval -= IB_SET_SEND_CREDITS(got);
391 * If need_posted is non-zero, then the caller wants
392 * the posted regardless of whether any send credits are
395 if (posted && (got || need_posted)) {
396 advertise = min_t(unsigned int, posted, max_posted);
397 newval -= IB_SET_POST_CREDITS(advertise);
400 /* Finally bill everything */
401 if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval)
404 *adv_credits = advertise;
408 void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits)
410 struct rds_ib_connection *ic = conn->c_transport_data;
415 rdsdebug("credits=%u current=%u%s\n",
417 IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
418 test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
420 atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
421 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
422 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
424 WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
426 rds_ib_stats_inc(s_ib_rx_credit_updates);
429 void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted)
431 struct rds_ib_connection *ic = conn->c_transport_data;
436 atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits);
438 /* Decide whether to send an update to the peer now.
439 * If we would send a credit update for every single buffer we
440 * post, we would end up with an ACK storm (ACK arrives,
441 * consumes buffer, we refill the ring, send ACK to remote
442 * advertising the newly posted buffer... ad inf)
444 * Performance pretty much depends on how often we send
445 * credit updates - too frequent updates mean lots of ACKs.
446 * Too infrequent updates, and the peer will run out of
447 * credits and has to throttle.
448 * For the time being, 16 seems to be a good compromise.
450 if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16)
451 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
454 static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic,
455 struct rds_ib_send_work *send,
459 * We want to delay signaling completions just enough to get
460 * the batching benefits but not so much that we create dead time
463 if (ic->i_unsignaled_wrs-- == 0 || notify) {
464 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
465 send->s_wr.send_flags |= IB_SEND_SIGNALED;
472 * This can be called multiple times for a given message. The first time
473 * we see a message we map its scatterlist into the IB device so that
474 * we can provide that mapped address to the IB scatter gather entries
475 * in the IB work requests. We translate the scatterlist into a series
476 * of work requests that fragment the message. These work requests complete
477 * in order so we pass ownership of the message to the completion handler
478 * once we send the final fragment.
480 * The RDS core uses the c_send_lock to only enter this function once
481 * per connection. This makes sure that the tx ring alloc/unalloc pairs
482 * don't get out of sync and confuse the ring.
484 int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
485 unsigned int hdr_off, unsigned int sg, unsigned int off)
487 struct rds_ib_connection *ic = conn->c_transport_data;
488 struct ib_device *dev = ic->i_cm_id->device;
489 struct rds_ib_send_work *send = NULL;
490 struct rds_ib_send_work *first;
491 struct rds_ib_send_work *prev;
492 const struct ib_send_wr *failed_wr;
493 struct scatterlist *scat;
497 u32 credit_alloc = 0;
503 int flow_controlled = 0;
506 BUG_ON(off % RDS_FRAG_SIZE);
507 BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
509 /* Do not send cong updates to IB loopback */
511 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
512 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
513 scat = &rm->data.op_sg[sg];
514 ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length);
515 return sizeof(struct rds_header) + ret;
518 /* FIXME we may overallocate here */
519 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
522 i = DIV_ROUND_UP(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
524 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
525 if (work_alloc == 0) {
526 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
527 rds_ib_stats_inc(s_ib_tx_ring_full);
533 credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
534 adv_credits += posted;
535 if (credit_alloc < work_alloc) {
536 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
537 work_alloc = credit_alloc;
540 if (work_alloc == 0) {
541 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
542 rds_ib_stats_inc(s_ib_tx_throttle);
548 /* map the message the first time we see it */
549 if (!ic->i_data_op) {
550 if (rm->data.op_nents) {
551 rm->data.op_count = ib_dma_map_sg(dev,
555 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
556 if (rm->data.op_count == 0) {
557 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
558 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
559 ret = -ENOMEM; /* XXX ? */
563 rm->data.op_count = 0;
566 rds_message_addref(rm);
567 rm->data.op_dmasg = 0;
568 rm->data.op_dmaoff = 0;
569 ic->i_data_op = &rm->data;
571 /* Finalize the header */
572 if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
573 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
574 if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
575 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
577 /* If it has a RDMA op, tell the peer we did it. This is
578 * used by the peer to release use-once RDMA MRs. */
579 if (rm->rdma.op_active) {
580 struct rds_ext_header_rdma ext_hdr;
582 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
583 rds_message_add_extension(&rm->m_inc.i_hdr,
584 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
586 if (rm->m_rdma_cookie) {
587 rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
588 rds_rdma_cookie_key(rm->m_rdma_cookie),
589 rds_rdma_cookie_offset(rm->m_rdma_cookie));
592 /* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so
593 * we should not do this unless we have a chance of at least
594 * sticking the header into the send ring. Which is why we
595 * should call rds_ib_ring_alloc first. */
596 rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
597 rds_message_make_checksum(&rm->m_inc.i_hdr);
600 * Update adv_credits since we reset the ACK_REQUIRED bit.
603 rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
604 adv_credits += posted;
605 BUG_ON(adv_credits > 255);
609 /* Sometimes you want to put a fence between an RDMA
610 * READ and the following SEND.
611 * We could either do this all the time
612 * or when requested by the user. Right now, we let
613 * the application choose.
615 if (rm->rdma.op_active && rm->rdma.op_fence)
616 send_flags = IB_SEND_FENCE;
618 /* Each frag gets a header. Msgs may be 0 bytes */
619 send = &ic->i_sends[pos];
622 scat = &ic->i_data_op->op_sg[rm->data.op_dmasg];
625 unsigned int len = 0;
627 /* Set up the header */
628 send->s_wr.send_flags = send_flags;
629 send->s_wr.opcode = IB_WR_SEND;
630 send->s_wr.num_sge = 1;
631 send->s_wr.next = NULL;
632 send->s_queued = jiffies;
635 send->s_sge[0].addr = ic->i_send_hdrs_dma[pos];
637 send->s_sge[0].length = sizeof(struct rds_header);
639 memcpy(ic->i_send_hdrs[pos], &rm->m_inc.i_hdr,
640 sizeof(struct rds_header));
643 /* Set up the data, if present */
645 && scat != &rm->data.op_sg[rm->data.op_count]) {
646 len = min(RDS_FRAG_SIZE,
647 sg_dma_len(scat) - rm->data.op_dmaoff);
648 send->s_wr.num_sge = 2;
650 send->s_sge[1].addr = sg_dma_address(scat);
651 send->s_sge[1].addr += rm->data.op_dmaoff;
652 send->s_sge[1].length = len;
655 rm->data.op_dmaoff += len;
656 if (rm->data.op_dmaoff == sg_dma_len(scat)) {
659 rm->data.op_dmaoff = 0;
663 rds_ib_set_wr_signal_state(ic, send, false);
666 * Always signal the last one if we're stopping due to flow control.
668 if (ic->i_flowctl && flow_controlled && i == (work_alloc - 1)) {
669 rds_ib_set_wr_signal_state(ic, send, true);
670 send->s_wr.send_flags |= IB_SEND_SOLICITED;
673 if (send->s_wr.send_flags & IB_SEND_SIGNALED)
676 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
677 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
679 if (ic->i_flowctl && adv_credits) {
680 struct rds_header *hdr = ic->i_send_hdrs[pos];
682 /* add credit and redo the header checksum */
683 hdr->h_credit = adv_credits;
684 rds_message_make_checksum(hdr);
686 rds_ib_stats_inc(s_ib_tx_credit_updates);
690 prev->s_wr.next = &send->s_wr;
693 pos = (pos + 1) % ic->i_send_ring.w_nr;
694 send = &ic->i_sends[pos];
697 } while (i < work_alloc
698 && scat != &rm->data.op_sg[rm->data.op_count]);
700 /* Account the RDS header in the number of bytes we sent, but just once.
701 * The caller has no concept of fragmentation. */
703 bytes_sent += sizeof(struct rds_header);
705 /* if we finished the message then send completion owns it */
706 if (scat == &rm->data.op_sg[rm->data.op_count]) {
707 prev->s_op = ic->i_data_op;
708 prev->s_wr.send_flags |= IB_SEND_SOLICITED;
709 if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED))
710 nr_sig += rds_ib_set_wr_signal_state(ic, prev, true);
711 ic->i_data_op = NULL;
714 /* Put back wrs & credits we didn't use */
715 if (i < work_alloc) {
716 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
719 if (ic->i_flowctl && i < credit_alloc)
720 rds_ib_send_add_credits(conn, credit_alloc - i);
723 atomic_add(nr_sig, &ic->i_signaled_sends);
725 /* XXX need to worry about failed_wr and partial sends. */
726 failed_wr = &first->s_wr;
727 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
728 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
729 first, &first->s_wr, ret, failed_wr);
730 BUG_ON(failed_wr != &first->s_wr);
732 printk(KERN_WARNING "RDS/IB: ib_post_send to %pI6c "
733 "returned %d\n", &conn->c_faddr, ret);
734 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
735 rds_ib_sub_signaled(ic, nr_sig);
737 ic->i_data_op = prev->s_op;
741 rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
752 * Issue atomic operation.
753 * A simplified version of the rdma case, we always map 1 SG, and
754 * only 8 bytes, for the return value from the atomic operation.
756 int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
758 struct rds_ib_connection *ic = conn->c_transport_data;
759 struct rds_ib_send_work *send = NULL;
760 const struct ib_send_wr *failed_wr;
766 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos);
767 if (work_alloc != 1) {
768 rds_ib_stats_inc(s_ib_tx_ring_full);
773 /* address of send request in ring */
774 send = &ic->i_sends[pos];
775 send->s_queued = jiffies;
777 if (op->op_type == RDS_ATOMIC_TYPE_CSWP) {
778 send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP;
779 send->s_atomic_wr.compare_add = op->op_m_cswp.compare;
780 send->s_atomic_wr.swap = op->op_m_cswp.swap;
781 send->s_atomic_wr.compare_add_mask = op->op_m_cswp.compare_mask;
782 send->s_atomic_wr.swap_mask = op->op_m_cswp.swap_mask;
784 send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD;
785 send->s_atomic_wr.compare_add = op->op_m_fadd.add;
786 send->s_atomic_wr.swap = 0;
787 send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask;
788 send->s_atomic_wr.swap_mask = 0;
790 send->s_wr.send_flags = 0;
791 nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
792 send->s_atomic_wr.wr.num_sge = 1;
793 send->s_atomic_wr.wr.next = NULL;
794 send->s_atomic_wr.remote_addr = op->op_remote_addr;
795 send->s_atomic_wr.rkey = op->op_rkey;
797 rds_message_addref(container_of(send->s_op, struct rds_message, atomic));
799 /* map 8 byte retval buffer to the device */
800 ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE);
801 rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret);
803 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
804 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
805 ret = -ENOMEM; /* XXX ? */
809 /* Convert our struct scatterlist to struct ib_sge */
810 send->s_sge[0].addr = sg_dma_address(op->op_sg);
811 send->s_sge[0].length = sg_dma_len(op->op_sg);
812 send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
814 rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
815 send->s_sge[0].addr, send->s_sge[0].length);
818 atomic_add(nr_sig, &ic->i_signaled_sends);
820 failed_wr = &send->s_atomic_wr.wr;
821 ret = ib_post_send(ic->i_cm_id->qp, &send->s_atomic_wr.wr, &failed_wr);
822 rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic,
823 send, &send->s_atomic_wr, ret, failed_wr);
824 BUG_ON(failed_wr != &send->s_atomic_wr.wr);
826 printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI6c "
827 "returned %d\n", &conn->c_faddr, ret);
828 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
829 rds_ib_sub_signaled(ic, nr_sig);
833 if (unlikely(failed_wr != &send->s_atomic_wr.wr)) {
834 printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
835 BUG_ON(failed_wr != &send->s_atomic_wr.wr);
842 int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
844 struct rds_ib_connection *ic = conn->c_transport_data;
845 struct rds_ib_send_work *send = NULL;
846 struct rds_ib_send_work *first;
847 struct rds_ib_send_work *prev;
848 const struct ib_send_wr *failed_wr;
849 struct scatterlist *scat;
851 u64 remote_addr = op->op_remote_addr;
852 u32 max_sge = ic->rds_ibdev->max_sge;
862 /* map the op the first time we see it */
863 if (!op->op_mapped) {
864 op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
865 op->op_sg, op->op_nents, (op->op_write) ?
866 DMA_TO_DEVICE : DMA_FROM_DEVICE);
867 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
868 if (op->op_count == 0) {
869 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
870 ret = -ENOMEM; /* XXX ? */
878 * Instead of knowing how to return a partial rdma read/write we insist that there
879 * be enough work requests to send the entire message.
881 i = DIV_ROUND_UP(op->op_count, max_sge);
883 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
884 if (work_alloc != i) {
885 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
886 rds_ib_stats_inc(s_ib_tx_ring_full);
891 send = &ic->i_sends[pos];
894 scat = &op->op_sg[0];
896 num_sge = op->op_count;
898 for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
899 send->s_wr.send_flags = 0;
900 send->s_queued = jiffies;
904 nr_sig += rds_ib_set_wr_signal_state(ic, send,
907 send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
908 send->s_rdma_wr.remote_addr = remote_addr;
909 send->s_rdma_wr.rkey = op->op_rkey;
911 if (num_sge > max_sge) {
912 send->s_rdma_wr.wr.num_sge = max_sge;
915 send->s_rdma_wr.wr.num_sge = num_sge;
918 send->s_rdma_wr.wr.next = NULL;
921 prev->s_rdma_wr.wr.next = &send->s_rdma_wr.wr;
923 for (j = 0; j < send->s_rdma_wr.wr.num_sge &&
924 scat != &op->op_sg[op->op_count]; j++) {
925 len = sg_dma_len(scat);
926 send->s_sge[j].addr = sg_dma_address(scat);
927 send->s_sge[j].length = len;
928 send->s_sge[j].lkey = ic->i_pd->local_dma_lkey;
931 rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
937 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
939 send->s_rdma_wr.wr.num_sge,
940 send->s_rdma_wr.wr.next);
943 if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
947 /* give a reference to the last op */
948 if (scat == &op->op_sg[op->op_count]) {
950 rds_message_addref(container_of(op, struct rds_message, rdma));
953 if (i < work_alloc) {
954 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
959 atomic_add(nr_sig, &ic->i_signaled_sends);
961 failed_wr = &first->s_rdma_wr.wr;
962 ret = ib_post_send(ic->i_cm_id->qp, &first->s_rdma_wr.wr, &failed_wr);
963 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
964 first, &first->s_rdma_wr.wr, ret, failed_wr);
965 BUG_ON(failed_wr != &first->s_rdma_wr.wr);
967 printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI6c "
968 "returned %d\n", &conn->c_faddr, ret);
969 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
970 rds_ib_sub_signaled(ic, nr_sig);
974 if (unlikely(failed_wr != &first->s_rdma_wr.wr)) {
975 printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
976 BUG_ON(failed_wr != &first->s_rdma_wr.wr);
984 void rds_ib_xmit_path_complete(struct rds_conn_path *cp)
986 struct rds_connection *conn = cp->cp_conn;
987 struct rds_ib_connection *ic = conn->c_transport_data;
989 /* We may have a pending ACK or window update we were unable
990 * to send previously (due to flow control). Try again. */
991 rds_ib_attempt_ack(ic);