1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include <linux/scatterlist.h>
7 #include <linux/highmem.h>
8 #include <rdma/rdma_cm.h>
9 #include <linux/mutex.h>
10 #include <linux/rds.h>
11 #include <linux/rhashtable.h>
12 #include <linux/refcount.h>
17 * RDS Network protocol version
19 #define RDS_PROTOCOL_3_0 0x0300
20 #define RDS_PROTOCOL_3_1 0x0301
21 #define RDS_PROTOCOL_VERSION RDS_PROTOCOL_3_1
22 #define RDS_PROTOCOL_MAJOR(v) ((v) >> 8)
23 #define RDS_PROTOCOL_MINOR(v) ((v) & 255)
24 #define RDS_PROTOCOL(maj, min) (((maj) << 8) | min)
27 * XXX randomly chosen, but at least seems to be unused:
28 * # 18464-18768 Unassigned
29 * We should do better. We want a reserved port to discourage unpriv'ed
30 * userspace from listening.
32 #define RDS_PORT 18634
35 #define KERNEL_HAS_ATOMIC64
39 #define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
41 /* sigh, pr_debug() causes unused variable warnings */
42 static inline __printf(1, 2)
43 void rdsdebug(char *fmt, ...)
48 /* XXX is there one of these somewhere? */
50 ({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
52 #define RDS_FRAG_SHIFT 12
53 #define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
55 /* Used to limit both RDMA and non-RDMA RDS message to 1MB */
56 #define RDS_MAX_MSG_SIZE ((unsigned int)(1 << 20))
58 #define RDS_CONG_MAP_BYTES (65536 / 8)
59 #define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
60 #define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8)
63 struct rb_node m_rb_node;
65 wait_queue_head_t m_waitq;
66 struct list_head m_conn_list;
67 unsigned long m_page_addrs[RDS_CONG_MAP_PAGES];
72 * This is how we will track the connection state:
73 * A connection is always in one of the following
74 * states. Updates to the state are atomic and imply
80 RDS_CONN_DISCONNECTING,
86 /* Bits for c_flags */
87 #define RDS_LL_SEND_FULL 0
88 #define RDS_RECONNECT_PENDING 1
90 #define RDS_RECV_REFILL 3
91 #define RDS_DESTROY_PENDING 4
93 /* Max number of multipaths per RDS connection. Must be a power of 2 */
94 #define RDS_MPATH_WORKERS 8
95 #define RDS_MPATH_HASH(rs, n) (jhash_1word((rs)->rs_bound_port, \
96 (rs)->rs_hash_initval) & ((n) - 1))
98 #define IS_CANONICAL(laddr, faddr) (htonl(laddr) < htonl(faddr))
100 /* Per mpath connection state */
101 struct rds_conn_path {
102 struct rds_connection *cp_conn;
103 struct rds_message *cp_xmit_rm;
104 unsigned long cp_xmit_sg;
105 unsigned int cp_xmit_hdr_off;
106 unsigned int cp_xmit_data_off;
107 unsigned int cp_xmit_atomic_sent;
108 unsigned int cp_xmit_rdma_sent;
109 unsigned int cp_xmit_data_sent;
111 spinlock_t cp_lock; /* protect msg queues */
113 struct list_head cp_send_queue;
114 struct list_head cp_retrans;
118 void *cp_transport_data;
121 unsigned long cp_send_gen;
122 unsigned long cp_flags;
123 unsigned long cp_reconnect_jiffies;
124 struct delayed_work cp_send_w;
125 struct delayed_work cp_recv_w;
126 struct delayed_work cp_conn_w;
127 struct work_struct cp_down_w;
128 struct mutex cp_cm_lock; /* protect cp_state & cm */
129 wait_queue_head_t cp_waitq;
131 unsigned int cp_unacked_packets;
132 unsigned int cp_unacked_bytes;
133 unsigned int cp_index;
136 /* One rds_connection per RDS address pair */
137 struct rds_connection {
138 struct hlist_node c_hash_node;
141 unsigned int c_loopback:1,
145 struct rds_connection *c_passive;
146 struct rds_transport *c_trans;
148 struct rds_cong_map *c_lcong;
149 struct rds_cong_map *c_fcong;
151 /* Protocol version */
152 unsigned int c_version;
153 possible_net_t c_net;
155 struct list_head c_map_item;
156 unsigned long c_map_queued;
158 struct rds_conn_path *c_path;
159 wait_queue_head_t c_hs_waitq; /* handshake waitq */
166 struct net *rds_conn_net(struct rds_connection *conn)
168 return read_pnet(&conn->c_net);
172 void rds_conn_net_set(struct rds_connection *conn, struct net *net)
174 write_pnet(&conn->c_net, net);
177 #define RDS_FLAG_CONG_BITMAP 0x01
178 #define RDS_FLAG_ACK_REQUIRED 0x02
179 #define RDS_FLAG_RETRANSMITTED 0x04
180 #define RDS_MAX_ADV_CREDIT 255
182 /* RDS_FLAG_PROBE_PORT is the reserved sport used for sending a ping
183 * probe to exchange control information before establishing a connection.
184 * Currently the control information that is exchanged is the number of
185 * supported paths. If the peer is a legacy (older kernel revision) peer,
186 * it would return a pong message without additional control information
187 * that would then alert the sender that the peer was an older rev.
189 #define RDS_FLAG_PROBE_PORT 1
190 #define RDS_HS_PROBE(sport, dport) \
191 ((sport == RDS_FLAG_PROBE_PORT && dport == 0) || \
192 (sport == 0 && dport == RDS_FLAG_PROBE_PORT))
194 * Maximum space available for extension headers.
196 #define RDS_HEADER_EXT_SPACE 16
209 u8 h_exthdr[RDS_HEADER_EXT_SPACE];
213 * Reserved - indicates end of extensions
215 #define RDS_EXTHDR_NONE 0
218 * This extension header is included in the very
219 * first message that is sent on a new connection,
220 * and identifies the protocol level. This will help
221 * rolling updates if a future change requires breaking
223 * NB: This is no longer true for IB, where we do a version
224 * negotiation during the connection setup phase (protocol
225 * version information is included in the RDMA CM private data).
227 #define RDS_EXTHDR_VERSION 1
228 struct rds_ext_header_version {
233 * This extension header is included in the RDS message
234 * chasing an RDMA operation.
236 #define RDS_EXTHDR_RDMA 2
237 struct rds_ext_header_rdma {
242 * This extension header tells the peer about the
243 * destination <R_Key,offset> of the requested RDMA
246 #define RDS_EXTHDR_RDMA_DEST 3
247 struct rds_ext_header_rdma_dest {
249 __be32 h_rdma_offset;
252 /* Extension header announcing number of paths.
253 * Implicit length = 2 bytes.
255 #define RDS_EXTHDR_NPATHS 5
256 #define RDS_EXTHDR_GEN_NUM 6
258 #define __RDS_EXTHDR_MAX 16 /* for now */
259 #define RDS_RX_MAX_TRACES (RDS_MSG_RX_DGRAM_TRACE_MAX + 1)
260 #define RDS_MSG_RX_HDR 0
261 #define RDS_MSG_RX_START 1
262 #define RDS_MSG_RX_END 2
263 #define RDS_MSG_RX_CMSG 3
265 struct rds_incoming {
266 refcount_t i_refcount;
267 struct list_head i_item;
268 struct rds_connection *i_conn;
269 struct rds_conn_path *i_conn_path;
270 struct rds_header i_hdr;
271 unsigned long i_rx_jiffies;
274 rds_rdma_cookie_t i_rdma_cookie;
275 struct timeval i_rx_tstamp;
276 u64 i_rx_lat_trace[RDS_RX_MAX_TRACES];
280 struct rb_node r_rb_node;
281 refcount_t r_refcount;
284 /* A copy of the creation flags */
285 unsigned int r_use_once:1;
286 unsigned int r_invalidate:1;
287 unsigned int r_write:1;
289 /* This is for RDS_MR_DEAD.
290 * It would be nice & consistent to make this part of the above
291 * bit field here, but we need to use test_and_set_bit.
293 unsigned long r_state;
294 struct rds_sock *r_sock; /* back pointer to the socket that owns us */
295 struct rds_transport *r_trans;
296 void *r_trans_private;
299 /* Flags for mr->r_state */
300 #define RDS_MR_DEAD 0
302 static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
304 return r_key | (((u64) offset) << 32);
307 static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
312 static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
317 /* atomic operation types */
318 #define RDS_ATOMIC_TYPE_CSWP 0
319 #define RDS_ATOMIC_TYPE_FADD 1
322 * m_sock_item and m_conn_item are on lists that are serialized under
323 * conn->c_lock. m_sock_item has additional meaning in that once it is empty
324 * the message will not be put back on the retransmit list after being sent.
325 * messages that are canceled while being sent rely on this.
327 * m_inc is used by loopback so that it can pass an incoming message straight
328 * back up into the rx path. It embeds a wire header which is also used by
329 * the send path, which is kind of awkward.
331 * m_sock_item indicates the message's presence on a socket's send or receive
332 * queue. m_rs will point to that socket.
334 * m_daddr is used by cancellation to prune messages to a given destination.
336 * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock
337 * nesting. As paths iterate over messages on a sock, or conn, they must
338 * also lock the conn, or sock, to remove the message from those lists too.
339 * Testing the flag to determine if the message is still on the lists lets
340 * us avoid testing the list_head directly. That means each path can use
341 * the message's list_head to keep it on a local list while juggling locks
342 * without confusing the other path.
344 * m_ack_seq is an optional field set by transports who need a different
345 * sequence number range to invalidate. They can use this in a callback
346 * that they pass to rds_send_drop_acked() to see if each message has been
347 * acked. The HAS_ACK_SEQ flag can be used to detect messages which haven't
348 * had ack_seq set yet.
350 #define RDS_MSG_ON_SOCK 1
351 #define RDS_MSG_ON_CONN 2
352 #define RDS_MSG_HAS_ACK_SEQ 3
353 #define RDS_MSG_ACK_REQUIRED 4
354 #define RDS_MSG_RETRANSMITTED 5
355 #define RDS_MSG_MAPPED 6
356 #define RDS_MSG_PAGEVEC 7
357 #define RDS_MSG_FLUSH 8
359 struct rds_znotifier {
364 struct rds_msg_zcopy_info {
365 struct list_head rs_zcookie_next;
367 struct rds_znotifier znotif;
368 struct rds_zcopy_cookies zcookies;
372 struct rds_msg_zcopy_queue {
373 struct list_head zcookie_head;
374 spinlock_t lock; /* protects zcookie_head queue */
377 static inline void rds_message_zcopy_queue_init(struct rds_msg_zcopy_queue *q)
379 spin_lock_init(&q->lock);
380 INIT_LIST_HEAD(&q->zcookie_head);
384 refcount_t m_refcount;
385 struct list_head m_sock_item;
386 struct list_head m_conn_item;
387 struct rds_incoming m_inc;
390 unsigned long m_flags;
392 /* Never access m_rs without holding m_rs_lock.
397 spinlock_t m_rs_lock;
398 wait_queue_head_t m_flush_wait;
400 struct rds_sock *m_rs;
402 /* cookie to send to remote, in rds header */
403 rds_rdma_cookie_t m_rdma_cookie;
405 unsigned int m_used_sgs;
406 unsigned int m_total_sgs;
411 struct rm_atomic_op {
417 uint64_t compare_mask;
422 uint64_t nocarry_mask;
428 unsigned int op_notify:1;
429 unsigned int op_recverr:1;
430 unsigned int op_mapped:1;
431 unsigned int op_silent:1;
432 unsigned int op_active:1;
433 struct scatterlist *op_sg;
434 struct rds_notifier *op_notifier;
436 struct rds_mr *op_rdma_mr;
441 unsigned int op_write:1;
442 unsigned int op_fence:1;
443 unsigned int op_notify:1;
444 unsigned int op_recverr:1;
445 unsigned int op_mapped:1;
446 unsigned int op_silent:1;
447 unsigned int op_active:1;
448 unsigned int op_bytes;
449 unsigned int op_nents;
450 unsigned int op_count;
451 struct scatterlist *op_sg;
452 struct rds_notifier *op_notifier;
454 struct rds_mr *op_rdma_mr;
457 unsigned int op_active:1;
458 unsigned int op_notify:1;
459 unsigned int op_nents;
460 unsigned int op_count;
461 unsigned int op_dmasg;
462 unsigned int op_dmaoff;
463 struct rds_znotifier *op_mmp_znotifier;
464 struct scatterlist *op_sg;
468 struct rds_conn_path *m_conn_path;
472 * The RDS notifier is used (optionally) to tell the application about
473 * completed RDMA operations. Rather than keeping the whole rds message
474 * around on the queue, we allocate a small notifier that is put on the
475 * socket's notifier_list. Notifications are delivered to the application
476 * through control messages.
478 struct rds_notifier {
479 struct list_head n_list;
480 uint64_t n_user_token;
484 /* Available as part of RDS core, so doesn't need to participate
485 * in get_preferred transport etc
487 #define RDS_TRANS_LOOP 3
490 * struct rds_transport - transport specific behavioural hooks
492 * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send
493 * part of a message. The caller serializes on the send_sem so this
494 * doesn't need to be reentrant for a given conn. The header must be
495 * sent before the data payload. .xmit must be prepared to send a
496 * message with no data payload. .xmit should return the number of
497 * bytes that were sent down the connection, including header bytes.
498 * Returning 0 tells the caller that it doesn't need to perform any
499 * additional work now. This is usually the case when the transport has
500 * filled the sending queue for its connection and will handle
501 * triggering the rds thread to continue the send when space becomes
502 * available. Returning -EAGAIN tells the caller to retry the send
503 * immediately. Returning -ENOMEM tells the caller to retry the send at
504 * some point in the future.
506 * @conn_shutdown: conn_shutdown stops traffic on the given connection. Once
507 * it returns the connection can not call rds_recv_incoming().
508 * This will only be called once after conn_connect returns
509 * non-zero success and will The caller serializes this with
510 * the send and connecting paths (xmit_* and conn_*). The
511 * transport is responsible for other serialization, including
512 * rds_recv_incoming(). This is called in process context but
513 * should try hard not to block.
516 struct rds_transport {
517 char t_name[TRANSNAMSIZ];
518 struct list_head t_item;
519 struct module *t_owner;
520 unsigned int t_prefer_loopback:1,
524 int (*laddr_check)(struct net *net, __be32 addr);
525 int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
526 void (*conn_free)(void *data);
527 int (*conn_path_connect)(struct rds_conn_path *cp);
528 void (*conn_path_shutdown)(struct rds_conn_path *conn);
529 void (*xmit_path_prepare)(struct rds_conn_path *cp);
530 void (*xmit_path_complete)(struct rds_conn_path *cp);
531 int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
532 unsigned int hdr_off, unsigned int sg, unsigned int off);
533 int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
534 int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
535 int (*recv_path)(struct rds_conn_path *cp);
536 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
537 void (*inc_free)(struct rds_incoming *inc);
539 int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
540 struct rdma_cm_event *event);
541 int (*cm_initiate_connect)(struct rdma_cm_id *cm_id);
542 void (*cm_connect_complete)(struct rds_connection *conn,
543 struct rdma_cm_event *event);
545 unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
548 void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
549 struct rds_sock *rs, u32 *key_ret,
550 struct rds_connection *conn);
551 void (*sync_mr)(void *trans_private, int direction);
552 void (*free_mr)(void *trans_private, int invalidate);
553 void (*flush_mrs)(void);
554 bool (*t_unloading)(struct rds_connection *conn);
564 * bound_addr used for both incoming and outgoing, no INADDR_ANY
567 struct rhash_head rs_bound_node;
569 __be32 rs_bound_addr;
571 __be16 rs_bound_port;
573 struct rds_transport *rs_transport;
576 * rds_sendmsg caches the conn it used the last time around.
577 * This helps avoid costly lookups.
579 struct rds_connection *rs_conn;
581 /* flag indicating we were congested or not */
583 /* seen congestion (ENOBUFS) when sending? */
584 int rs_seen_congestion;
586 /* rs_lock protects all these adjacent members before the newline */
588 struct list_head rs_send_queue;
591 struct list_head rs_notify_queue; /* currently used for failed RDMAs */
593 /* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask
594 * to decide whether the application should be woken up.
595 * If not set, we use rs_cong_track to find out whether a cong map
598 uint64_t rs_cong_mask;
599 uint64_t rs_cong_notify;
600 struct list_head rs_cong_list;
601 unsigned long rs_cong_track;
604 * rs_recv_lock protects the receive queue, and is
605 * used to serialize with rds_release.
607 rwlock_t rs_recv_lock;
608 struct list_head rs_recv_queue;
610 /* just for stats reporting */
611 struct list_head rs_item;
613 /* these have their own lock */
614 spinlock_t rs_rdma_lock;
615 struct rb_root rs_rdma_keys;
617 /* Socket options - in case there will be more */
618 unsigned char rs_recverr,
622 /* Socket receive path trace points*/
624 u8 rs_rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX];
625 struct rds_msg_zcopy_queue rs_zcookie_queue;
628 static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
630 return container_of(sk, struct rds_sock, rs_sk);
632 static inline struct sock *rds_rs_to_sk(struct rds_sock *rs)
638 * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value
639 * to account for overhead. We don't account for overhead, we just apply
640 * the number of payload bytes to the specified value.
642 static inline int rds_sk_sndbuf(struct rds_sock *rs)
644 return rds_rs_to_sk(rs)->sk_sndbuf / 2;
646 static inline int rds_sk_rcvbuf(struct rds_sock *rs)
648 return rds_rs_to_sk(rs)->sk_rcvbuf / 2;
651 struct rds_statistics {
652 uint64_t s_conn_reset;
653 uint64_t s_recv_drop_bad_checksum;
654 uint64_t s_recv_drop_old_seq;
655 uint64_t s_recv_drop_no_sock;
656 uint64_t s_recv_drop_dead_sock;
657 uint64_t s_recv_deliver_raced;
658 uint64_t s_recv_delivered;
659 uint64_t s_recv_queued;
660 uint64_t s_recv_immediate_retry;
661 uint64_t s_recv_delayed_retry;
662 uint64_t s_recv_ack_required;
663 uint64_t s_recv_rdma_bytes;
664 uint64_t s_recv_ping;
665 uint64_t s_send_queue_empty;
666 uint64_t s_send_queue_full;
667 uint64_t s_send_lock_contention;
668 uint64_t s_send_lock_queue_raced;
669 uint64_t s_send_immediate_retry;
670 uint64_t s_send_delayed_retry;
671 uint64_t s_send_drop_acked;
672 uint64_t s_send_ack_required;
673 uint64_t s_send_queued;
674 uint64_t s_send_rdma;
675 uint64_t s_send_rdma_bytes;
676 uint64_t s_send_pong;
677 uint64_t s_page_remainder_hit;
678 uint64_t s_page_remainder_miss;
679 uint64_t s_copy_to_user;
680 uint64_t s_copy_from_user;
681 uint64_t s_cong_update_queued;
682 uint64_t s_cong_update_received;
683 uint64_t s_cong_send_error;
684 uint64_t s_cong_send_blocked;
685 uint64_t s_recv_bytes_added_to_socket;
686 uint64_t s_recv_bytes_removed_from_socket;
691 void rds_sock_addref(struct rds_sock *rs);
692 void rds_sock_put(struct rds_sock *rs);
693 void rds_wake_sk_sleep(struct rds_sock *rs);
694 static inline void __rds_wake_sk_sleep(struct sock *sk)
696 wait_queue_head_t *waitq = sk_sleep(sk);
698 if (!sock_flag(sk, SOCK_DEAD) && waitq)
701 extern wait_queue_head_t rds_poll_waitq;
705 int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
706 void rds_remove_bound(struct rds_sock *rs);
707 struct rds_sock *rds_find_bound(__be32 addr, __be16 port);
708 int rds_bind_lock_init(void);
709 void rds_bind_lock_destroy(void);
712 int rds_cong_get_maps(struct rds_connection *conn);
713 void rds_cong_add_conn(struct rds_connection *conn);
714 void rds_cong_remove_conn(struct rds_connection *conn);
715 void rds_cong_set_bit(struct rds_cong_map *map, __be16 port);
716 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port);
717 int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs);
718 void rds_cong_queue_updates(struct rds_cong_map *map);
719 void rds_cong_map_updated(struct rds_cong_map *map, uint64_t);
720 int rds_cong_updated_since(unsigned long *recent);
721 void rds_cong_add_socket(struct rds_sock *);
722 void rds_cong_remove_socket(struct rds_sock *);
723 void rds_cong_exit(void);
724 struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
727 extern u32 rds_gen_num;
728 int rds_conn_init(void);
729 void rds_conn_exit(void);
730 struct rds_connection *rds_conn_create(struct net *net,
731 __be32 laddr, __be32 faddr,
732 struct rds_transport *trans, gfp_t gfp);
733 struct rds_connection *rds_conn_create_outgoing(struct net *net,
734 __be32 laddr, __be32 faddr,
735 struct rds_transport *trans, gfp_t gfp);
736 void rds_conn_shutdown(struct rds_conn_path *cpath);
737 void rds_conn_destroy(struct rds_connection *conn);
738 void rds_conn_drop(struct rds_connection *conn);
739 void rds_conn_path_drop(struct rds_conn_path *cpath, bool destroy);
740 void rds_conn_connect_if_down(struct rds_connection *conn);
741 void rds_conn_path_connect_if_down(struct rds_conn_path *cp);
742 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
743 struct rds_info_iterator *iter,
744 struct rds_info_lengths *lens,
745 int (*visitor)(struct rds_connection *, void *),
750 void __rds_conn_path_error(struct rds_conn_path *cp, const char *, ...);
751 #define rds_conn_path_error(cp, fmt...) \
752 __rds_conn_path_error(cp, KERN_WARNING "RDS: " fmt)
755 rds_conn_path_transition(struct rds_conn_path *cp, int old, int new)
757 return atomic_cmpxchg(&cp->cp_state, old, new) == old;
761 rds_conn_transition(struct rds_connection *conn, int old, int new)
763 WARN_ON(conn->c_trans->t_mp_capable);
764 return rds_conn_path_transition(&conn->c_path[0], old, new);
768 rds_conn_path_state(struct rds_conn_path *cp)
770 return atomic_read(&cp->cp_state);
774 rds_conn_state(struct rds_connection *conn)
776 WARN_ON(conn->c_trans->t_mp_capable);
777 return rds_conn_path_state(&conn->c_path[0]);
781 rds_conn_path_up(struct rds_conn_path *cp)
783 return atomic_read(&cp->cp_state) == RDS_CONN_UP;
787 rds_conn_up(struct rds_connection *conn)
789 WARN_ON(conn->c_trans->t_mp_capable);
790 return rds_conn_path_up(&conn->c_path[0]);
794 rds_conn_path_connecting(struct rds_conn_path *cp)
796 return atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING;
800 rds_conn_connecting(struct rds_connection *conn)
802 WARN_ON(conn->c_trans->t_mp_capable);
803 return rds_conn_path_connecting(&conn->c_path[0]);
807 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
808 struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
809 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from,
811 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
812 void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
813 __be16 dport, u64 seq);
814 int rds_message_add_extension(struct rds_header *hdr,
815 unsigned int type, const void *data, unsigned int len);
816 int rds_message_next_extension(struct rds_header *hdr,
817 unsigned int *pos, void *buf, unsigned int *buflen);
818 int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
819 int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
820 void rds_message_inc_free(struct rds_incoming *inc);
821 void rds_message_addref(struct rds_message *rm);
822 void rds_message_put(struct rds_message *rm);
823 void rds_message_wait(struct rds_message *rm);
824 void rds_message_unmapped(struct rds_message *rm);
825 void rds_notify_msg_zcopy_purge(struct rds_msg_zcopy_queue *info);
827 static inline void rds_message_make_checksum(struct rds_header *hdr)
830 hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2);
833 static inline int rds_message_verify_checksum(const struct rds_header *hdr)
835 return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0;
840 int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
842 void rds_page_exit(void);
845 void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
847 void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *conn,
849 void rds_inc_put(struct rds_incoming *inc);
850 void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
851 struct rds_incoming *inc, gfp_t gfp);
852 int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
854 void rds_clear_recv_queue(struct rds_sock *rs);
855 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
856 void rds_inc_info_copy(struct rds_incoming *inc,
857 struct rds_info_iterator *iter,
858 __be32 saddr, __be32 daddr, int flip);
861 int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
862 void rds_send_path_reset(struct rds_conn_path *conn);
863 int rds_send_xmit(struct rds_conn_path *cp);
865 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest);
866 typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
867 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
868 is_acked_func is_acked);
869 void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
870 is_acked_func is_acked);
871 void rds_send_ping(struct rds_connection *conn, int cp_index);
872 int rds_send_pong(struct rds_conn_path *cp, __be16 dport);
875 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
876 int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
877 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
878 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
879 void rds_rdma_drop_keys(struct rds_sock *rs);
880 int rds_rdma_extra_size(struct rds_rdma_args *args);
881 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
882 struct cmsghdr *cmsg);
883 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
884 struct cmsghdr *cmsg);
885 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
886 struct cmsghdr *cmsg);
887 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
888 struct cmsghdr *cmsg);
889 void rds_rdma_free_op(struct rm_rdma_op *ro);
890 void rds_atomic_free_op(struct rm_atomic_op *ao);
891 void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
892 void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
893 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
894 struct cmsghdr *cmsg);
896 void __rds_put_mr_final(struct rds_mr *mr);
897 static inline void rds_mr_put(struct rds_mr *mr)
899 if (refcount_dec_and_test(&mr->r_refcount))
900 __rds_put_mr_final(mr);
903 static inline bool rds_destroy_pending(struct rds_connection *conn)
905 return !check_net(rds_conn_net(conn)) ||
906 (conn->c_trans->t_unloading && conn->c_trans->t_unloading(conn));
910 DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
911 #define rds_stats_inc_which(which, member) do { \
912 per_cpu(which, get_cpu()).member++; \
915 #define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member)
916 #define rds_stats_add_which(which, member, count) do { \
917 per_cpu(which, get_cpu()).member += count; \
920 #define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
921 int rds_stats_init(void);
922 void rds_stats_exit(void);
923 void rds_stats_info_copy(struct rds_info_iterator *iter,
924 uint64_t *values, const char *const *names,
928 int rds_sysctl_init(void);
929 void rds_sysctl_exit(void);
930 extern unsigned long rds_sysctl_sndbuf_min;
931 extern unsigned long rds_sysctl_sndbuf_default;
932 extern unsigned long rds_sysctl_sndbuf_max;
933 extern unsigned long rds_sysctl_reconnect_min_jiffies;
934 extern unsigned long rds_sysctl_reconnect_max_jiffies;
935 extern unsigned int rds_sysctl_max_unacked_packets;
936 extern unsigned int rds_sysctl_max_unacked_bytes;
937 extern unsigned int rds_sysctl_ping_enable;
938 extern unsigned long rds_sysctl_trace_flags;
939 extern unsigned int rds_sysctl_trace_level;
942 int rds_threads_init(void);
943 void rds_threads_exit(void);
944 extern struct workqueue_struct *rds_wq;
945 void rds_queue_reconnect(struct rds_conn_path *cp);
946 void rds_connect_worker(struct work_struct *);
947 void rds_shutdown_worker(struct work_struct *);
948 void rds_send_worker(struct work_struct *);
949 void rds_recv_worker(struct work_struct *);
950 void rds_connect_path_complete(struct rds_conn_path *conn, int curr);
951 void rds_connect_complete(struct rds_connection *conn);
954 void rds_trans_register(struct rds_transport *trans);
955 void rds_trans_unregister(struct rds_transport *trans);
956 struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr);
957 void rds_trans_put(struct rds_transport *trans);
958 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
960 struct rds_transport *rds_trans_get(int t_type);
961 int rds_trans_init(void);
962 void rds_trans_exit(void);