2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
37 #include <linux/export.h>
38 #include <linux/time.h>
39 #include <linux/rds.h>
43 void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
48 refcount_set(&inc->i_refcount, 1);
49 INIT_LIST_HEAD(&inc->i_item);
52 inc->i_rdma_cookie = 0;
53 inc->i_rx_tstamp.tv_sec = 0;
54 inc->i_rx_tstamp.tv_usec = 0;
56 for (i = 0; i < RDS_RX_MAX_TRACES; i++)
57 inc->i_rx_lat_trace[i] = 0;
59 EXPORT_SYMBOL_GPL(rds_inc_init);
61 void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *cp,
64 refcount_set(&inc->i_refcount, 1);
65 INIT_LIST_HEAD(&inc->i_item);
66 inc->i_conn = cp->cp_conn;
67 inc->i_conn_path = cp;
69 inc->i_rdma_cookie = 0;
70 inc->i_rx_tstamp.tv_sec = 0;
71 inc->i_rx_tstamp.tv_usec = 0;
73 EXPORT_SYMBOL_GPL(rds_inc_path_init);
75 static void rds_inc_addref(struct rds_incoming *inc)
77 rdsdebug("addref inc %p ref %d\n", inc, refcount_read(&inc->i_refcount));
78 refcount_inc(&inc->i_refcount);
81 void rds_inc_put(struct rds_incoming *inc)
83 rdsdebug("put inc %p ref %d\n", inc, refcount_read(&inc->i_refcount));
84 if (refcount_dec_and_test(&inc->i_refcount)) {
85 BUG_ON(!list_empty(&inc->i_item));
87 inc->i_conn->c_trans->inc_free(inc);
90 EXPORT_SYMBOL_GPL(rds_inc_put);
92 static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
93 struct rds_cong_map *map,
94 int delta, __be16 port)
101 rs->rs_rcv_bytes += delta;
103 rds_stats_add(s_recv_bytes_added_to_socket, delta);
105 rds_stats_add(s_recv_bytes_removed_from_socket, -delta);
107 /* loop transport doesn't send/recv congestion updates */
108 if (rs->rs_transport->t_type == RDS_TRANS_LOOP)
111 now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
113 rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
114 "now_cong %d delta %d\n",
115 rs, &rs->rs_bound_addr,
116 ntohs(rs->rs_bound_port), rs->rs_rcv_bytes,
117 rds_sk_rcvbuf(rs), now_congested, delta);
119 /* wasn't -> am congested */
120 if (!rs->rs_congested && now_congested) {
121 rs->rs_congested = 1;
122 rds_cong_set_bit(map, port);
123 rds_cong_queue_updates(map);
125 /* was -> aren't congested */
126 /* Require more free space before reporting uncongested to prevent
127 bouncing cong/uncong state too often */
128 else if (rs->rs_congested && (rs->rs_rcv_bytes < (rds_sk_rcvbuf(rs)/2))) {
129 rs->rs_congested = 0;
130 rds_cong_clear_bit(map, port);
131 rds_cong_queue_updates(map);
134 /* do nothing if no change in cong state */
137 static void rds_conn_peer_gen_update(struct rds_connection *conn,
141 struct rds_message *rm, *tmp;
144 WARN_ON(conn->c_trans->t_type != RDS_TRANS_TCP);
145 if (peer_gen_num != 0) {
146 if (conn->c_peer_gen_num != 0 &&
147 peer_gen_num != conn->c_peer_gen_num) {
148 for (i = 0; i < RDS_MPATH_WORKERS; i++) {
149 struct rds_conn_path *cp;
151 cp = &conn->c_path[i];
152 spin_lock_irqsave(&cp->cp_lock, flags);
153 cp->cp_next_tx_seq = 1;
154 cp->cp_next_rx_seq = 0;
155 list_for_each_entry_safe(rm, tmp,
158 set_bit(RDS_MSG_FLUSH, &rm->m_flags);
160 spin_unlock_irqrestore(&cp->cp_lock, flags);
163 conn->c_peer_gen_num = peer_gen_num;
168 * Process all extension headers that come with this message.
170 static void rds_recv_incoming_exthdrs(struct rds_incoming *inc, struct rds_sock *rs)
172 struct rds_header *hdr = &inc->i_hdr;
173 unsigned int pos = 0, type, len;
175 struct rds_ext_header_version version;
176 struct rds_ext_header_rdma rdma;
177 struct rds_ext_header_rdma_dest rdma_dest;
181 len = sizeof(buffer);
182 type = rds_message_next_extension(hdr, &pos, &buffer, &len);
183 if (type == RDS_EXTHDR_NONE)
185 /* Process extension header here */
187 case RDS_EXTHDR_RDMA:
188 rds_rdma_unuse(rs, be32_to_cpu(buffer.rdma.h_rdma_rkey), 0);
191 case RDS_EXTHDR_RDMA_DEST:
192 /* We ignore the size for now. We could stash it
193 * somewhere and use it for error checking. */
194 inc->i_rdma_cookie = rds_rdma_make_cookie(
195 be32_to_cpu(buffer.rdma_dest.h_rdma_rkey),
196 be32_to_cpu(buffer.rdma_dest.h_rdma_offset));
203 static void rds_recv_hs_exthdrs(struct rds_header *hdr,
204 struct rds_connection *conn)
206 unsigned int pos = 0, type, len;
208 struct rds_ext_header_version version;
212 u32 new_peer_gen_num = 0;
215 len = sizeof(buffer);
216 type = rds_message_next_extension(hdr, &pos, &buffer, &len);
217 if (type == RDS_EXTHDR_NONE)
219 /* Process extension header here */
221 case RDS_EXTHDR_NPATHS:
222 conn->c_npaths = min_t(int, RDS_MPATH_WORKERS,
223 be16_to_cpu(buffer.rds_npaths));
225 case RDS_EXTHDR_GEN_NUM:
226 new_peer_gen_num = be32_to_cpu(buffer.rds_gen_num);
229 pr_warn_ratelimited("ignoring unknown exthdr type "
233 /* if RDS_EXTHDR_NPATHS was not found, default to a single-path */
234 conn->c_npaths = max_t(int, conn->c_npaths, 1);
235 conn->c_ping_triggered = 0;
236 rds_conn_peer_gen_update(conn, new_peer_gen_num);
239 /* rds_start_mprds() will synchronously start multiple paths when appropriate.
240 * The scheme is based on the following rules:
242 * 1. rds_sendmsg on first connect attempt sends the probe ping, with the
243 * sender's npaths (s_npaths)
244 * 2. rcvr of probe-ping knows the mprds_paths = min(s_npaths, r_npaths). It
245 * sends back a probe-pong with r_npaths. After that, if rcvr is the
246 * smaller ip addr, it starts rds_conn_path_connect_if_down on all
248 * 3. sender gets woken up, and can move to rds_conn_path_connect_if_down.
249 * If it is the smaller ipaddr, rds_conn_path_connect_if_down can be
250 * called after reception of the probe-pong on all mprds_paths.
251 * Otherwise (sender of probe-ping is not the smaller ip addr): just call
252 * rds_conn_path_connect_if_down on the hashed path. (see rule 4)
253 * 4. rds_connect_worker must only trigger a connection if laddr < faddr.
254 * 5. sender may end up queuing the packet on the cp. will get sent out later.
255 * when connection is completed.
257 static void rds_start_mprds(struct rds_connection *conn)
260 struct rds_conn_path *cp;
262 if (conn->c_npaths > 1 &&
263 IS_CANONICAL(conn->c_laddr, conn->c_faddr)) {
264 for (i = 0; i < conn->c_npaths; i++) {
265 cp = &conn->c_path[i];
266 rds_conn_path_connect_if_down(cp);
272 * The transport must make sure that this is serialized against other
273 * rx and conn reset on this specific conn.
275 * We currently assert that only one fragmented message will be sent
276 * down a connection at a time. This lets us reassemble in the conn
277 * instead of per-flow which means that we don't have to go digging through
278 * flows to tear down partial reassembly progress on conn failure and
279 * we save flow lookup and locking for each frag arrival. It does mean
280 * that small messages will wait behind large ones. Fragmenting at all
281 * is only to reduce the memory consumption of pre-posted buffers.
283 * The caller passes in saddr and daddr instead of us getting it from the
284 * conn. This lets loopback, who only has one conn for both directions,
285 * tell us which roles the addrs in the conn are playing for this message.
287 void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
288 struct rds_incoming *inc, gfp_t gfp)
290 struct rds_sock *rs = NULL;
293 struct rds_conn_path *cp;
296 inc->i_rx_jiffies = jiffies;
297 if (conn->c_trans->t_mp_capable)
298 cp = inc->i_conn_path;
300 cp = &conn->c_path[0];
302 rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u "
303 "flags 0x%x rx_jiffies %lu\n", conn,
304 (unsigned long long)cp->cp_next_rx_seq,
306 (unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence),
307 be32_to_cpu(inc->i_hdr.h_len),
308 be16_to_cpu(inc->i_hdr.h_sport),
309 be16_to_cpu(inc->i_hdr.h_dport),
314 * Sequence numbers should only increase. Messages get their
315 * sequence number as they're queued in a sending conn. They
316 * can be dropped, though, if the sending socket is closed before
317 * they hit the wire. So sequence numbers can skip forward
318 * under normal operation. They can also drop back in the conn
319 * failover case as previously sent messages are resent down the
320 * new instance of a conn. We drop those, otherwise we have
321 * to assume that the next valid seq does not come after a
322 * hole in the fragment stream.
324 * The headers don't give us a way to realize if fragments of
325 * a message have been dropped. We assume that frags that arrive
326 * to a flow are part of the current message on the flow that is
327 * being reassembled. This means that senders can't drop messages
328 * from the sending conn until all their frags are sent.
330 * XXX we could spend more on the wire to get more robust failure
331 * detection, arguably worth it to avoid data corruption.
333 if (be64_to_cpu(inc->i_hdr.h_sequence) < cp->cp_next_rx_seq &&
334 (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) {
335 rds_stats_inc(s_recv_drop_old_seq);
338 cp->cp_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1;
340 if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) {
341 if (inc->i_hdr.h_sport == 0) {
342 rdsdebug("ignore ping with 0 sport from 0x%x\n", saddr);
345 rds_stats_inc(s_recv_ping);
346 rds_send_pong(cp, inc->i_hdr.h_sport);
347 /* if this is a handshake ping, start multipath if necessary */
348 if (RDS_HS_PROBE(be16_to_cpu(inc->i_hdr.h_sport),
349 be16_to_cpu(inc->i_hdr.h_dport))) {
350 rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn);
351 rds_start_mprds(cp->cp_conn);
356 if (be16_to_cpu(inc->i_hdr.h_dport) == RDS_FLAG_PROBE_PORT &&
357 inc->i_hdr.h_sport == 0) {
358 rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn);
359 /* if this is a handshake pong, start multipath if necessary */
360 rds_start_mprds(cp->cp_conn);
361 wake_up(&cp->cp_conn->c_hs_waitq);
365 rs = rds_find_bound(daddr, inc->i_hdr.h_dport);
367 rds_stats_inc(s_recv_drop_no_sock);
371 /* Process extension headers */
372 rds_recv_incoming_exthdrs(inc, rs);
374 /* We can be racing with rds_release() which marks the socket dead. */
375 sk = rds_rs_to_sk(rs);
377 /* serialize with rds_release -> sock_orphan */
378 write_lock_irqsave(&rs->rs_recv_lock, flags);
379 if (!sock_flag(sk, SOCK_DEAD)) {
380 rdsdebug("adding inc %p to rs %p's recv queue\n", inc, rs);
381 rds_stats_inc(s_recv_queued);
382 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
383 be32_to_cpu(inc->i_hdr.h_len),
385 if (sock_flag(sk, SOCK_RCVTSTAMP))
386 do_gettimeofday(&inc->i_rx_tstamp);
388 inc->i_rx_lat_trace[RDS_MSG_RX_END] = local_clock();
389 list_add_tail(&inc->i_item, &rs->rs_recv_queue);
390 __rds_wake_sk_sleep(sk);
392 rds_stats_inc(s_recv_drop_dead_sock);
394 write_unlock_irqrestore(&rs->rs_recv_lock, flags);
400 EXPORT_SYMBOL_GPL(rds_recv_incoming);
403 * be very careful here. This is being called as the condition in
404 * wait_event_*() needs to cope with being called many times.
406 static int rds_next_incoming(struct rds_sock *rs, struct rds_incoming **inc)
411 read_lock_irqsave(&rs->rs_recv_lock, flags);
412 if (!list_empty(&rs->rs_recv_queue)) {
413 *inc = list_entry(rs->rs_recv_queue.next,
416 rds_inc_addref(*inc);
418 read_unlock_irqrestore(&rs->rs_recv_lock, flags);
424 static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
427 struct sock *sk = rds_rs_to_sk(rs);
431 write_lock_irqsave(&rs->rs_recv_lock, flags);
432 if (!list_empty(&inc->i_item)) {
435 /* XXX make sure this i_conn is reliable */
436 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
437 -be32_to_cpu(inc->i_hdr.h_len),
439 list_del_init(&inc->i_item);
443 write_unlock_irqrestore(&rs->rs_recv_lock, flags);
445 rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop);
450 * Pull errors off the error queue.
451 * If msghdr is NULL, we will just purge the error queue.
453 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
455 struct rds_notifier *notifier;
456 struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */
457 unsigned int count = 0, max_messages = ~0U;
463 /* put_cmsg copies to user space and thus may sleep. We can't do this
464 * with rs_lock held, so first grab as many notifications as we can stuff
465 * in the user provided cmsg buffer. We don't try to copy more, to avoid
466 * losing notifications - except when the buffer is so small that it wouldn't
467 * even hold a single notification. Then we give him as much of this single
468 * msg as we can squeeze in, and set MSG_CTRUNC.
471 max_messages = msghdr->msg_controllen / CMSG_SPACE(sizeof(cmsg));
476 spin_lock_irqsave(&rs->rs_lock, flags);
477 while (!list_empty(&rs->rs_notify_queue) && count < max_messages) {
478 notifier = list_entry(rs->rs_notify_queue.next,
479 struct rds_notifier, n_list);
480 list_move(¬ifier->n_list, ©);
483 spin_unlock_irqrestore(&rs->rs_lock, flags);
488 while (!list_empty(©)) {
489 notifier = list_entry(copy.next, struct rds_notifier, n_list);
492 cmsg.user_token = notifier->n_user_token;
493 cmsg.status = notifier->n_status;
495 err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_RDMA_STATUS,
496 sizeof(cmsg), &cmsg);
501 list_del_init(¬ifier->n_list);
505 /* If we bailed out because of an error in put_cmsg,
506 * we may be left with one or more notifications that we
507 * didn't process. Return them to the head of the list. */
508 if (!list_empty(©)) {
509 spin_lock_irqsave(&rs->rs_lock, flags);
510 list_splice(©, &rs->rs_notify_queue);
511 spin_unlock_irqrestore(&rs->rs_lock, flags);
518 * Queue a congestion notification
520 static int rds_notify_cong(struct rds_sock *rs, struct msghdr *msghdr)
522 uint64_t notify = rs->rs_cong_notify;
526 err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_CONG_UPDATE,
527 sizeof(notify), ¬ify);
531 spin_lock_irqsave(&rs->rs_lock, flags);
532 rs->rs_cong_notify &= ~notify;
533 spin_unlock_irqrestore(&rs->rs_lock, flags);
539 * Receive any control messages.
541 static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg,
546 if (inc->i_rdma_cookie) {
547 ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RDMA_DEST,
548 sizeof(inc->i_rdma_cookie), &inc->i_rdma_cookie);
553 if ((inc->i_rx_tstamp.tv_sec != 0) &&
554 sock_flag(rds_rs_to_sk(rs), SOCK_RCVTSTAMP)) {
555 ret = put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP,
556 sizeof(struct timeval),
562 if (rs->rs_rx_traces) {
563 struct rds_cmsg_rx_trace t;
566 memset(&t, 0, sizeof(t));
567 inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock();
568 t.rx_traces = rs->rs_rx_traces;
569 for (i = 0; i < rs->rs_rx_traces; i++) {
570 j = rs->rs_rx_trace[i];
571 t.rx_trace_pos[i] = j;
572 t.rx_trace[i] = inc->i_rx_lat_trace[j + 1] -
573 inc->i_rx_lat_trace[j];
576 ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RXPATH_LATENCY,
586 static bool rds_recvmsg_zcookie(struct rds_sock *rs, struct msghdr *msg)
588 struct rds_msg_zcopy_queue *q = &rs->rs_zcookie_queue;
589 struct rds_msg_zcopy_info *info = NULL;
590 struct rds_zcopy_cookies *done;
593 if (!msg->msg_control)
596 if (!sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY) ||
597 msg->msg_controllen < CMSG_SPACE(sizeof(*done)))
600 spin_lock_irqsave(&q->lock, flags);
601 if (!list_empty(&q->zcookie_head)) {
602 info = list_entry(q->zcookie_head.next,
603 struct rds_msg_zcopy_info, rs_zcookie_next);
604 list_del(&info->rs_zcookie_next);
606 spin_unlock_irqrestore(&q->lock, flags);
609 done = &info->zcookies;
610 if (put_cmsg(msg, SOL_RDS, RDS_CMSG_ZCOPY_COMPLETION, sizeof(*done),
612 spin_lock_irqsave(&q->lock, flags);
613 list_add(&info->rs_zcookie_next, &q->zcookie_head);
614 spin_unlock_irqrestore(&q->lock, flags);
621 int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
624 struct sock *sk = sock->sk;
625 struct rds_sock *rs = rds_sk_to_rs(sk);
627 int ret = 0, nonblock = msg_flags & MSG_DONTWAIT;
628 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
629 struct rds_incoming *inc = NULL;
631 /* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */
632 timeo = sock_rcvtimeo(sk, nonblock);
634 rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
636 if (msg_flags & MSG_OOB)
638 if (msg_flags & MSG_ERRQUEUE)
639 return sock_recv_errqueue(sk, msg, size, SOL_IP, IP_RECVERR);
642 /* If there are pending notifications, do those - and nothing else */
643 if (!list_empty(&rs->rs_notify_queue)) {
644 ret = rds_notify_queue_get(rs, msg);
648 if (rs->rs_cong_notify) {
649 ret = rds_notify_cong(rs, msg);
653 if (!rds_next_incoming(rs, &inc)) {
655 bool reaped = rds_recvmsg_zcookie(rs, msg);
657 ret = reaped ? 0 : -EAGAIN;
661 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
662 (!list_empty(&rs->rs_notify_queue) ||
663 rs->rs_cong_notify ||
664 rds_next_incoming(rs, &inc)), timeo);
665 rdsdebug("recvmsg woke inc %p timeo %ld\n", inc,
667 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
676 rdsdebug("copying inc %p from %pI4:%u to user\n", inc,
677 &inc->i_conn->c_faddr,
678 ntohs(inc->i_hdr.h_sport));
679 ret = inc->i_conn->c_trans->inc_copy_to_user(inc, &msg->msg_iter);
684 * if the message we just copied isn't at the head of the
685 * recv queue then someone else raced us to return it, try
686 * to get the next message.
688 if (!rds_still_queued(rs, inc, !(msg_flags & MSG_PEEK))) {
691 rds_stats_inc(s_recv_deliver_raced);
692 iov_iter_revert(&msg->msg_iter, ret);
696 if (ret < be32_to_cpu(inc->i_hdr.h_len)) {
697 if (msg_flags & MSG_TRUNC)
698 ret = be32_to_cpu(inc->i_hdr.h_len);
699 msg->msg_flags |= MSG_TRUNC;
702 if (rds_cmsg_recv(inc, msg, rs)) {
706 rds_recvmsg_zcookie(rs, msg);
708 rds_stats_inc(s_recv_delivered);
711 sin->sin_family = AF_INET;
712 sin->sin_port = inc->i_hdr.h_sport;
713 sin->sin_addr.s_addr = inc->i_saddr;
714 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
715 msg->msg_namelen = sizeof(*sin);
728 * The socket is being shut down and we're asked to drop messages that were
729 * queued for recvmsg. The caller has unbound the socket so the receive path
730 * won't queue any more incoming fragments or messages on the socket.
732 void rds_clear_recv_queue(struct rds_sock *rs)
734 struct sock *sk = rds_rs_to_sk(rs);
735 struct rds_incoming *inc, *tmp;
738 write_lock_irqsave(&rs->rs_recv_lock, flags);
739 list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) {
740 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
741 -be32_to_cpu(inc->i_hdr.h_len),
743 list_del_init(&inc->i_item);
746 write_unlock_irqrestore(&rs->rs_recv_lock, flags);
750 * inc->i_saddr isn't used here because it is only set in the receive
753 void rds_inc_info_copy(struct rds_incoming *inc,
754 struct rds_info_iterator *iter,
755 __be32 saddr, __be32 daddr, int flip)
757 struct rds_info_message minfo;
759 minfo.seq = be64_to_cpu(inc->i_hdr.h_sequence);
760 minfo.len = be32_to_cpu(inc->i_hdr.h_len);
765 minfo.lport = inc->i_hdr.h_dport;
766 minfo.fport = inc->i_hdr.h_sport;
770 minfo.lport = inc->i_hdr.h_sport;
771 minfo.fport = inc->i_hdr.h_dport;
776 rds_info_copy(iter, &minfo, sizeof(minfo));