1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * Implementation of the Transmission Control Protocol(TCP).
9 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
21 * David S. Miller : New socket lookup architecture.
22 * This code is dedicated to John Dyson.
23 * David S. Miller : Change semantics of established hash,
24 * half is devoted to TIME_WAIT sockets
25 * and the rest go in the other half.
26 * Andi Kleen : Add support for syncookies and fixed
27 * some bugs: ip options weren't passed to
28 * the TCP layer, missed a check for an
30 * Andi Kleen : Implemented fast path mtu discovery.
31 * Fixed many serious bugs in the
32 * request_sock handling and moved
33 * most of it into the af independent code.
34 * Added tail drop and some other bugfixes.
35 * Added new listen semantics.
36 * Mike McLagan : Routing by source
37 * Juan Jose Ciarlante: ip_dynaddr bits
38 * Andi Kleen: various fixes.
39 * Vitaly E. Lavrov : Transparent proxy revived after year
41 * Andi Kleen : Fix new listen.
42 * Andi Kleen : Fix accept error reporting.
43 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
44 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
45 * a single port at the same time.
48 #define pr_fmt(fmt) "TCP: " fmt
50 #include <linux/bottom_half.h>
51 #include <linux/types.h>
52 #include <linux/fcntl.h>
53 #include <linux/module.h>
54 #include <linux/random.h>
55 #include <linux/cache.h>
56 #include <linux/jhash.h>
57 #include <linux/init.h>
58 #include <linux/times.h>
59 #include <linux/slab.h>
61 #include <net/net_namespace.h>
63 #include <net/inet_hashtables.h>
65 #include <net/transp_v6.h>
67 #include <net/inet_common.h>
68 #include <net/timewait_sock.h>
70 #include <net/secure_seq.h>
71 #include <net/busy_poll.h>
73 #include <linux/inet.h>
74 #include <linux/ipv6.h>
75 #include <linux/stddef.h>
76 #include <linux/proc_fs.h>
77 #include <linux/seq_file.h>
78 #include <linux/inetdevice.h>
80 #include <crypto/hash.h>
81 #include <linux/scatterlist.h>
83 #include <trace/events/tcp.h>
85 #ifdef CONFIG_TCP_MD5SIG
86 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
87 __be32 daddr, __be32 saddr, const struct tcphdr *th);
90 struct inet_hashinfo tcp_hashinfo;
91 EXPORT_SYMBOL(tcp_hashinfo);
93 static u32 tcp_v4_init_seq(const struct sk_buff *skb)
95 return secure_tcp_seq(ip_hdr(skb)->daddr,
98 tcp_hdr(skb)->source);
101 static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
103 return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
106 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
108 const struct inet_timewait_sock *tw = inet_twsk(sktw);
109 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
110 struct tcp_sock *tp = tcp_sk(sk);
111 int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse;
114 /* Still does not detect *everything* that goes through
115 * lo, since we require a loopback src or dst address
116 * or direct binding to 'lo' interface.
118 bool loopback = false;
119 if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
121 #if IS_ENABLED(CONFIG_IPV6)
122 if (tw->tw_family == AF_INET6) {
123 if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
124 ipv6_addr_v4mapped_loopback(&tw->tw_v6_daddr) ||
125 ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
126 ipv6_addr_v4mapped_loopback(&tw->tw_v6_rcv_saddr))
131 if (ipv4_is_loopback(tw->tw_daddr) ||
132 ipv4_is_loopback(tw->tw_rcv_saddr))
139 /* With PAWS, it is safe from the viewpoint
140 of data integrity. Even without PAWS it is safe provided sequence
141 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
143 Actually, the idea is close to VJ's one, only timestamp cache is
144 held not per host, but per port pair and TW bucket is used as state
147 If TW bucket has been already destroyed we fall back to VJ's scheme
148 and use initial timestamp retrieved from peer table.
150 if (tcptw->tw_ts_recent_stamp &&
151 (!twp || (reuse && time_after32(ktime_get_seconds(),
152 tcptw->tw_ts_recent_stamp)))) {
153 /* In case of repair and re-using TIME-WAIT sockets we still
154 * want to be sure that it is safe as above but honor the
155 * sequence numbers and time stamps set as part of the repair
158 * Without this check re-using a TIME-WAIT socket with TCP
159 * repair would accumulate a -1 on the repair assigned
160 * sequence number. The first time it is reused the sequence
161 * is -1, the second time -2, etc. This fixes that issue
162 * without appearing to create any others.
164 if (likely(!tp->repair)) {
165 u32 seq = tcptw->tw_snd_nxt + 65535 + 2;
169 WRITE_ONCE(tp->write_seq, seq);
170 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
171 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
179 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
181 static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
184 /* This check is replicated from tcp_v4_connect() and intended to
185 * prevent BPF program called below from accessing bytes that are out
186 * of the bound specified by user in addr_len.
188 if (addr_len < sizeof(struct sockaddr_in))
191 sock_owned_by_me(sk);
193 return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
196 /* This will initiate an outgoing connection. */
197 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
199 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
200 struct inet_sock *inet = inet_sk(sk);
201 struct tcp_sock *tp = tcp_sk(sk);
202 __be16 orig_sport, orig_dport;
203 __be32 daddr, nexthop;
207 struct ip_options_rcu *inet_opt;
208 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
210 if (addr_len < sizeof(struct sockaddr_in))
213 if (usin->sin_family != AF_INET)
214 return -EAFNOSUPPORT;
216 nexthop = daddr = usin->sin_addr.s_addr;
217 inet_opt = rcu_dereference_protected(inet->inet_opt,
218 lockdep_sock_is_held(sk));
219 if (inet_opt && inet_opt->opt.srr) {
222 nexthop = inet_opt->opt.faddr;
225 orig_sport = inet->inet_sport;
226 orig_dport = usin->sin_port;
227 fl4 = &inet->cork.fl.u.ip4;
228 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
229 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
231 orig_sport, orig_dport, sk);
234 if (err == -ENETUNREACH)
235 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
239 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
244 if (!inet_opt || !inet_opt->opt.srr)
247 if (!inet->inet_saddr)
248 inet->inet_saddr = fl4->saddr;
249 sk_rcv_saddr_set(sk, inet->inet_saddr);
251 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
252 /* Reset inherited state */
253 tp->rx_opt.ts_recent = 0;
254 tp->rx_opt.ts_recent_stamp = 0;
255 if (likely(!tp->repair))
256 WRITE_ONCE(tp->write_seq, 0);
259 inet->inet_dport = usin->sin_port;
260 sk_daddr_set(sk, daddr);
262 inet_csk(sk)->icsk_ext_hdr_len = 0;
264 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
266 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
268 /* Socket identity is still unknown (sport may be zero).
269 * However we set state to SYN-SENT and not releasing socket
270 * lock select source port, enter ourselves into the hash tables and
271 * complete initialization after this.
273 tcp_set_state(sk, TCP_SYN_SENT);
274 err = inet_hash_connect(tcp_death_row, sk);
280 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
281 inet->inet_sport, inet->inet_dport, sk);
287 /* OK, now commit destination to socket. */
288 sk->sk_gso_type = SKB_GSO_TCPV4;
289 sk_setup_caps(sk, &rt->dst);
292 if (likely(!tp->repair)) {
294 WRITE_ONCE(tp->write_seq,
295 secure_tcp_seq(inet->inet_saddr,
299 tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
304 inet->inet_id = prandom_u32();
306 if (tcp_fastopen_defer_connect(sk, &err))
311 err = tcp_connect(sk);
320 * This unhashes the socket and releases the local port,
323 tcp_set_state(sk, TCP_CLOSE);
325 sk->sk_route_caps = 0;
326 inet->inet_dport = 0;
329 EXPORT_SYMBOL(tcp_v4_connect);
332 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
333 * It can be called through tcp_release_cb() if socket was owned by user
334 * at the time tcp_v4_err() was called to handle ICMP message.
336 void tcp_v4_mtu_reduced(struct sock *sk)
338 struct inet_sock *inet = inet_sk(sk);
339 struct dst_entry *dst;
342 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
344 mtu = tcp_sk(sk)->mtu_info;
345 dst = inet_csk_update_pmtu(sk, mtu);
349 /* Something is about to be wrong... Remember soft error
350 * for the case, if this connection will not able to recover.
352 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
353 sk->sk_err_soft = EMSGSIZE;
357 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
358 ip_sk_accept_pmtu(sk) &&
359 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
360 tcp_sync_mss(sk, mtu);
362 /* Resend the TCP packet because it's
363 * clear that the old packet has been
364 * dropped. This is the new "fast" path mtu
367 tcp_simple_retransmit(sk);
368 } /* else let the usual retransmit timer handle it */
370 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
372 static void do_redirect(struct sk_buff *skb, struct sock *sk)
374 struct dst_entry *dst = __sk_dst_check(sk, 0);
377 dst->ops->redirect(dst, sk, skb);
381 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
382 void tcp_req_err(struct sock *sk, u32 seq, bool abort)
384 struct request_sock *req = inet_reqsk(sk);
385 struct net *net = sock_net(sk);
387 /* ICMPs are not backlogged, hence we cannot get
388 * an established socket here.
390 if (seq != tcp_rsk(req)->snt_isn) {
391 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
394 * Still in SYN_RECV, just remove it silently.
395 * There is no good way to pass the error to the newly
396 * created socket, and POSIX does not want network
397 * errors returned from accept().
399 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
400 tcp_listendrop(req->rsk_listener);
404 EXPORT_SYMBOL(tcp_req_err);
407 * This routine is called by the ICMP module when it gets some
408 * sort of error condition. If err < 0 then the socket should
409 * be closed and the error returned to the user. If err > 0
410 * it's just the icmp type << 8 | icmp code. After adjustment
411 * header points to the first 8 bytes of the tcp header. We need
412 * to find the appropriate port.
414 * The locking strategy used here is very "optimistic". When
415 * someone else accesses the socket the ICMP is just dropped
416 * and for some paths there is no check at all.
417 * A more general error queue to queue errors for later handling
418 * is probably better.
422 int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
424 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
425 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
426 struct inet_connection_sock *icsk;
428 struct inet_sock *inet;
429 const int type = icmp_hdr(icmp_skb)->type;
430 const int code = icmp_hdr(icmp_skb)->code;
433 struct request_sock *fastopen;
438 struct net *net = dev_net(icmp_skb->dev);
440 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
441 th->dest, iph->saddr, ntohs(th->source),
442 inet_iif(icmp_skb), 0);
444 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
447 if (sk->sk_state == TCP_TIME_WAIT) {
448 inet_twsk_put(inet_twsk(sk));
451 seq = ntohl(th->seq);
452 if (sk->sk_state == TCP_NEW_SYN_RECV) {
453 tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
454 type == ICMP_TIME_EXCEEDED ||
455 (type == ICMP_DEST_UNREACH &&
456 (code == ICMP_NET_UNREACH ||
457 code == ICMP_HOST_UNREACH)));
462 /* If too many ICMPs get dropped on busy
463 * servers this needs to be solved differently.
464 * We do take care of PMTU discovery (RFC1191) special case :
465 * we can receive locally generated ICMP messages while socket is held.
467 if (sock_owned_by_user(sk)) {
468 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
469 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
471 if (sk->sk_state == TCP_CLOSE)
474 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
475 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
481 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
482 fastopen = rcu_dereference(tp->fastopen_rsk);
483 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
484 if (sk->sk_state != TCP_LISTEN &&
485 !between(seq, snd_una, tp->snd_nxt)) {
486 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
492 if (!sock_owned_by_user(sk))
493 do_redirect(icmp_skb, sk);
495 case ICMP_SOURCE_QUENCH:
496 /* Just silently ignore these. */
498 case ICMP_PARAMETERPROB:
501 case ICMP_DEST_UNREACH:
502 if (code > NR_ICMP_UNREACH)
505 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
506 /* We are not interested in TCP_LISTEN and open_requests
507 * (SYN-ACKs send out by Linux are always <576bytes so
508 * they should go through unfragmented).
510 if (sk->sk_state == TCP_LISTEN)
514 if (!sock_owned_by_user(sk)) {
515 tcp_v4_mtu_reduced(sk);
517 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
523 err = icmp_err_convert[code].errno;
524 /* check if icmp_skb allows revert of backoff
525 * (see draft-zimmermann-tcp-lcd) */
526 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
528 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
529 !icsk->icsk_backoff || fastopen)
532 if (sock_owned_by_user(sk))
535 skb = tcp_rtx_queue_head(sk);
536 if (WARN_ON_ONCE(!skb))
539 icsk->icsk_backoff--;
540 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
542 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
545 tcp_mstamp_refresh(tp);
546 delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
547 remaining = icsk->icsk_rto -
548 usecs_to_jiffies(delta_us);
551 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
552 remaining, TCP_RTO_MAX);
554 /* RTO revert clocked out retransmission.
555 * Will retransmit now */
556 tcp_retransmit_timer(sk);
560 case ICMP_TIME_EXCEEDED:
567 switch (sk->sk_state) {
570 /* Only in fast or simultaneous open. If a fast open socket is
571 * is already accepted it is treated as a connected one below.
573 if (fastopen && !fastopen->sk)
576 if (!sock_owned_by_user(sk)) {
579 sk->sk_error_report(sk);
583 sk->sk_err_soft = err;
588 /* If we've already connected we will keep trying
589 * until we time out, or the user gives up.
591 * rfc1122 4.2.3.9 allows to consider as hard errors
592 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
593 * but it is obsoleted by pmtu discovery).
595 * Note, that in modern internet, where routing is unreliable
596 * and in each dark corner broken firewalls sit, sending random
597 * errors ordered by their masters even this two messages finally lose
598 * their original sense (even Linux sends invalid PORT_UNREACHs)
600 * Now we are in compliance with RFCs.
605 if (!sock_owned_by_user(sk) && inet->recverr) {
607 sk->sk_error_report(sk);
608 } else { /* Only an error on timeout */
609 sk->sk_err_soft = err;
618 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
620 struct tcphdr *th = tcp_hdr(skb);
622 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
623 skb->csum_start = skb_transport_header(skb) - skb->head;
624 skb->csum_offset = offsetof(struct tcphdr, check);
627 /* This routine computes an IPv4 TCP checksum. */
628 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
630 const struct inet_sock *inet = inet_sk(sk);
632 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
634 EXPORT_SYMBOL(tcp_v4_send_check);
637 * This routine will send an RST to the other tcp.
639 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
641 * Answer: if a packet caused RST, it is not for a socket
642 * existing in our system, if it is matched to a socket,
643 * it is just duplicate segment or bug in other side's TCP.
644 * So that we build reply only basing on parameters
645 * arrived with segment.
646 * Exception: precedence violation. We do not implement it in any case.
649 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
651 const struct tcphdr *th = tcp_hdr(skb);
654 #ifdef CONFIG_TCP_MD5SIG
655 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
658 struct ip_reply_arg arg;
659 #ifdef CONFIG_TCP_MD5SIG
660 struct tcp_md5sig_key *key = NULL;
661 const __u8 *hash_location = NULL;
662 unsigned char newhash[16];
664 struct sock *sk1 = NULL;
666 u64 transmit_time = 0;
670 /* Never send a reset in response to a reset. */
674 /* If sk not NULL, it means we did a successful lookup and incoming
675 * route had to be correct. prequeue might have dropped our dst.
677 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
680 /* Swap the send and the receive. */
681 memset(&rep, 0, sizeof(rep));
682 rep.th.dest = th->source;
683 rep.th.source = th->dest;
684 rep.th.doff = sizeof(struct tcphdr) / 4;
688 rep.th.seq = th->ack_seq;
691 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
692 skb->len - (th->doff << 2));
695 memset(&arg, 0, sizeof(arg));
696 arg.iov[0].iov_base = (unsigned char *)&rep;
697 arg.iov[0].iov_len = sizeof(rep.th);
699 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
700 #ifdef CONFIG_TCP_MD5SIG
702 hash_location = tcp_parse_md5sig_option(th);
703 if (sk && sk_fullsock(sk)) {
704 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
705 &ip_hdr(skb)->saddr, AF_INET);
706 } else if (hash_location) {
708 * active side is lost. Try to find listening socket through
709 * source port, and then find md5 key through listening socket.
710 * we are not loose security here:
711 * Incoming packet is checked with md5 hash with finding key,
712 * no RST generated if md5 hash doesn't match.
714 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
716 th->source, ip_hdr(skb)->daddr,
717 ntohs(th->source), inet_iif(skb),
719 /* don't send rst if it can't find key */
723 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
724 &ip_hdr(skb)->saddr, AF_INET);
729 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
730 if (genhash || memcmp(hash_location, newhash, 16) != 0)
736 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
738 (TCPOPT_MD5SIG << 8) |
740 /* Update length and the length the header thinks exists */
741 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
742 rep.th.doff = arg.iov[0].iov_len / 4;
744 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
745 key, ip_hdr(skb)->saddr,
746 ip_hdr(skb)->daddr, &rep.th);
749 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
750 ip_hdr(skb)->saddr, /* XXX */
751 arg.iov[0].iov_len, IPPROTO_TCP, 0);
752 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
753 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
755 /* When socket is gone, all binding information is lost.
756 * routing might fail in this case. No choice here, if we choose to force
757 * input interface, we will misroute in case of asymmetric route.
760 arg.bound_dev_if = sk->sk_bound_dev_if;
762 trace_tcp_send_reset(sk, skb);
765 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
766 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
768 arg.tos = ip_hdr(skb)->tos;
769 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
771 ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
773 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
774 inet_twsk(sk)->tw_mark : sk->sk_mark;
775 ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
776 inet_twsk(sk)->tw_priority : sk->sk_priority;
777 transmit_time = tcp_transmit_time(sk);
779 ip_send_unicast_reply(ctl_sk,
780 skb, &TCP_SKB_CB(skb)->header.h4.opt,
781 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
782 &arg, arg.iov[0].iov_len,
786 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
787 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
790 #ifdef CONFIG_TCP_MD5SIG
796 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
797 outside socket context is ugly, certainly. What can I do?
800 static void tcp_v4_send_ack(const struct sock *sk,
801 struct sk_buff *skb, u32 seq, u32 ack,
802 u32 win, u32 tsval, u32 tsecr, int oif,
803 struct tcp_md5sig_key *key,
804 int reply_flags, u8 tos)
806 const struct tcphdr *th = tcp_hdr(skb);
809 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
810 #ifdef CONFIG_TCP_MD5SIG
811 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
815 struct net *net = sock_net(sk);
816 struct ip_reply_arg arg;
820 memset(&rep.th, 0, sizeof(struct tcphdr));
821 memset(&arg, 0, sizeof(arg));
823 arg.iov[0].iov_base = (unsigned char *)&rep;
824 arg.iov[0].iov_len = sizeof(rep.th);
826 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
827 (TCPOPT_TIMESTAMP << 8) |
829 rep.opt[1] = htonl(tsval);
830 rep.opt[2] = htonl(tsecr);
831 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
834 /* Swap the send and the receive. */
835 rep.th.dest = th->source;
836 rep.th.source = th->dest;
837 rep.th.doff = arg.iov[0].iov_len / 4;
838 rep.th.seq = htonl(seq);
839 rep.th.ack_seq = htonl(ack);
841 rep.th.window = htons(win);
843 #ifdef CONFIG_TCP_MD5SIG
845 int offset = (tsecr) ? 3 : 0;
847 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
849 (TCPOPT_MD5SIG << 8) |
851 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
852 rep.th.doff = arg.iov[0].iov_len/4;
854 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
855 key, ip_hdr(skb)->saddr,
856 ip_hdr(skb)->daddr, &rep.th);
859 arg.flags = reply_flags;
860 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
861 ip_hdr(skb)->saddr, /* XXX */
862 arg.iov[0].iov_len, IPPROTO_TCP, 0);
863 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
865 arg.bound_dev_if = oif;
867 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
869 ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
870 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
871 inet_twsk(sk)->tw_mark : sk->sk_mark;
872 ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
873 inet_twsk(sk)->tw_priority : sk->sk_priority;
874 transmit_time = tcp_transmit_time(sk);
875 ip_send_unicast_reply(ctl_sk,
876 skb, &TCP_SKB_CB(skb)->header.h4.opt,
877 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
878 &arg, arg.iov[0].iov_len,
882 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
886 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
888 struct inet_timewait_sock *tw = inet_twsk(sk);
889 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
891 tcp_v4_send_ack(sk, skb,
892 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
893 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
894 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
897 tcp_twsk_md5_key(tcptw),
898 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
905 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
906 struct request_sock *req)
908 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
909 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
911 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
915 * The window field (SEG.WND) of every outgoing segment, with the
916 * exception of <SYN> segments, MUST be right-shifted by
917 * Rcv.Wind.Shift bits:
919 tcp_v4_send_ack(sk, skb, seq,
920 tcp_rsk(req)->rcv_nxt,
921 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
922 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
925 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
927 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
932 * Send a SYN-ACK after having received a SYN.
933 * This still operates on a request_sock only, not on a big
936 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
938 struct request_sock *req,
939 struct tcp_fastopen_cookie *foc,
940 enum tcp_synack_type synack_type)
942 const struct inet_request_sock *ireq = inet_rsk(req);
947 /* First, grab a route. */
948 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
951 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
954 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
957 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
959 rcu_dereference(ireq->ireq_opt));
961 err = net_xmit_eval(err);
968 * IPv4 request_sock destructor.
970 static void tcp_v4_reqsk_destructor(struct request_sock *req)
972 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
975 #ifdef CONFIG_TCP_MD5SIG
977 * RFC2385 MD5 checksumming requires a mapping of
978 * IP address->MD5 Key.
979 * We need to maintain these in the sk structure.
982 DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
983 EXPORT_SYMBOL(tcp_md5_needed);
985 /* Find the Key structure for an address. */
986 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk,
987 const union tcp_md5_addr *addr,
990 const struct tcp_sock *tp = tcp_sk(sk);
991 struct tcp_md5sig_key *key;
992 const struct tcp_md5sig_info *md5sig;
994 struct tcp_md5sig_key *best_match = NULL;
997 /* caller either holds rcu_read_lock() or socket lock */
998 md5sig = rcu_dereference_check(tp->md5sig_info,
999 lockdep_sock_is_held(sk));
1003 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
1004 if (key->family != family)
1007 if (family == AF_INET) {
1008 mask = inet_make_mask(key->prefixlen);
1009 match = (key->addr.a4.s_addr & mask) ==
1010 (addr->a4.s_addr & mask);
1011 #if IS_ENABLED(CONFIG_IPV6)
1012 } else if (family == AF_INET6) {
1013 match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
1020 if (match && (!best_match ||
1021 key->prefixlen > best_match->prefixlen))
1026 EXPORT_SYMBOL(__tcp_md5_do_lookup);
1028 static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
1029 const union tcp_md5_addr *addr,
1030 int family, u8 prefixlen)
1032 const struct tcp_sock *tp = tcp_sk(sk);
1033 struct tcp_md5sig_key *key;
1034 unsigned int size = sizeof(struct in_addr);
1035 const struct tcp_md5sig_info *md5sig;
1037 /* caller either holds rcu_read_lock() or socket lock */
1038 md5sig = rcu_dereference_check(tp->md5sig_info,
1039 lockdep_sock_is_held(sk));
1042 #if IS_ENABLED(CONFIG_IPV6)
1043 if (family == AF_INET6)
1044 size = sizeof(struct in6_addr);
1046 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
1047 if (key->family != family)
1049 if (!memcmp(&key->addr, addr, size) &&
1050 key->prefixlen == prefixlen)
1056 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1057 const struct sock *addr_sk)
1059 const union tcp_md5_addr *addr;
1061 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
1062 return tcp_md5_do_lookup(sk, addr, AF_INET);
1064 EXPORT_SYMBOL(tcp_v4_md5_lookup);
1066 /* This can be called on a newly created socket, from other files */
1067 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1068 int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
1071 /* Add Key to the list */
1072 struct tcp_md5sig_key *key;
1073 struct tcp_sock *tp = tcp_sk(sk);
1074 struct tcp_md5sig_info *md5sig;
1076 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
1078 /* Pre-existing entry - just update that one. */
1079 memcpy(key->key, newkey, newkeylen);
1080 key->keylen = newkeylen;
1084 md5sig = rcu_dereference_protected(tp->md5sig_info,
1085 lockdep_sock_is_held(sk));
1087 md5sig = kmalloc(sizeof(*md5sig), gfp);
1091 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1092 INIT_HLIST_HEAD(&md5sig->head);
1093 rcu_assign_pointer(tp->md5sig_info, md5sig);
1096 key = sock_kmalloc(sk, sizeof(*key), gfp);
1099 if (!tcp_alloc_md5sig_pool()) {
1100 sock_kfree_s(sk, key, sizeof(*key));
1104 memcpy(key->key, newkey, newkeylen);
1105 key->keylen = newkeylen;
1106 key->family = family;
1107 key->prefixlen = prefixlen;
1108 memcpy(&key->addr, addr,
1109 (family == AF_INET6) ? sizeof(struct in6_addr) :
1110 sizeof(struct in_addr));
1111 hlist_add_head_rcu(&key->node, &md5sig->head);
1114 EXPORT_SYMBOL(tcp_md5_do_add);
1116 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1119 struct tcp_md5sig_key *key;
1121 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
1124 hlist_del_rcu(&key->node);
1125 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1126 kfree_rcu(key, rcu);
1129 EXPORT_SYMBOL(tcp_md5_do_del);
1131 static void tcp_clear_md5_list(struct sock *sk)
1133 struct tcp_sock *tp = tcp_sk(sk);
1134 struct tcp_md5sig_key *key;
1135 struct hlist_node *n;
1136 struct tcp_md5sig_info *md5sig;
1138 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1140 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1141 hlist_del_rcu(&key->node);
1142 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1143 kfree_rcu(key, rcu);
1147 static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1148 char __user *optval, int optlen)
1150 struct tcp_md5sig cmd;
1151 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1154 if (optlen < sizeof(cmd))
1157 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1160 if (sin->sin_family != AF_INET)
1163 if (optname == TCP_MD5SIG_EXT &&
1164 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1165 prefixlen = cmd.tcpm_prefixlen;
1170 if (!cmd.tcpm_keylen)
1171 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1172 AF_INET, prefixlen);
1174 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1177 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1178 AF_INET, prefixlen, cmd.tcpm_key, cmd.tcpm_keylen,
1182 static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1183 __be32 daddr, __be32 saddr,
1184 const struct tcphdr *th, int nbytes)
1186 struct tcp4_pseudohdr *bp;
1187 struct scatterlist sg;
1194 bp->protocol = IPPROTO_TCP;
1195 bp->len = cpu_to_be16(nbytes);
1197 _th = (struct tcphdr *)(bp + 1);
1198 memcpy(_th, th, sizeof(*th));
1201 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1202 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1203 sizeof(*bp) + sizeof(*th));
1204 return crypto_ahash_update(hp->md5_req);
1207 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1208 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1210 struct tcp_md5sig_pool *hp;
1211 struct ahash_request *req;
1213 hp = tcp_get_md5sig_pool();
1215 goto clear_hash_noput;
1218 if (crypto_ahash_init(req))
1220 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1222 if (tcp_md5_hash_key(hp, key))
1224 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1225 if (crypto_ahash_final(req))
1228 tcp_put_md5sig_pool();
1232 tcp_put_md5sig_pool();
1234 memset(md5_hash, 0, 16);
1238 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1239 const struct sock *sk,
1240 const struct sk_buff *skb)
1242 struct tcp_md5sig_pool *hp;
1243 struct ahash_request *req;
1244 const struct tcphdr *th = tcp_hdr(skb);
1245 __be32 saddr, daddr;
1247 if (sk) { /* valid for establish/request sockets */
1248 saddr = sk->sk_rcv_saddr;
1249 daddr = sk->sk_daddr;
1251 const struct iphdr *iph = ip_hdr(skb);
1256 hp = tcp_get_md5sig_pool();
1258 goto clear_hash_noput;
1261 if (crypto_ahash_init(req))
1264 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1266 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1268 if (tcp_md5_hash_key(hp, key))
1270 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1271 if (crypto_ahash_final(req))
1274 tcp_put_md5sig_pool();
1278 tcp_put_md5sig_pool();
1280 memset(md5_hash, 0, 16);
1283 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1287 /* Called with rcu_read_lock() */
1288 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1289 const struct sk_buff *skb)
1291 #ifdef CONFIG_TCP_MD5SIG
1293 * This gets called for each TCP segment that arrives
1294 * so we want to be efficient.
1295 * We have 3 drop cases:
1296 * o No MD5 hash and one expected.
1297 * o MD5 hash and we're not expecting one.
1298 * o MD5 hash and its wrong.
1300 const __u8 *hash_location = NULL;
1301 struct tcp_md5sig_key *hash_expected;
1302 const struct iphdr *iph = ip_hdr(skb);
1303 const struct tcphdr *th = tcp_hdr(skb);
1305 unsigned char newhash[16];
1307 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1309 hash_location = tcp_parse_md5sig_option(th);
1311 /* We've parsed the options - do we have a hash? */
1312 if (!hash_expected && !hash_location)
1315 if (hash_expected && !hash_location) {
1316 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1320 if (!hash_expected && hash_location) {
1321 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1325 /* Okay, so this is hash_expected and hash_location -
1326 * so we need to calculate the checksum.
1328 genhash = tcp_v4_md5_hash_skb(newhash,
1332 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1333 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1334 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1335 &iph->saddr, ntohs(th->source),
1336 &iph->daddr, ntohs(th->dest),
1337 genhash ? " tcp_v4_calc_md5_hash failed"
1346 static void tcp_v4_init_req(struct request_sock *req,
1347 const struct sock *sk_listener,
1348 struct sk_buff *skb)
1350 struct inet_request_sock *ireq = inet_rsk(req);
1351 struct net *net = sock_net(sk_listener);
1353 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1354 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1355 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1358 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1360 const struct request_sock *req)
1362 return inet_csk_route_req(sk, &fl->u.ip4, req);
1365 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1367 .obj_size = sizeof(struct tcp_request_sock),
1368 .rtx_syn_ack = tcp_rtx_synack,
1369 .send_ack = tcp_v4_reqsk_send_ack,
1370 .destructor = tcp_v4_reqsk_destructor,
1371 .send_reset = tcp_v4_send_reset,
1372 .syn_ack_timeout = tcp_syn_ack_timeout,
1375 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1376 .mss_clamp = TCP_MSS_DEFAULT,
1377 #ifdef CONFIG_TCP_MD5SIG
1378 .req_md5_lookup = tcp_v4_md5_lookup,
1379 .calc_md5_hash = tcp_v4_md5_hash_skb,
1381 .init_req = tcp_v4_init_req,
1382 #ifdef CONFIG_SYN_COOKIES
1383 .cookie_init_seq = cookie_v4_init_sequence,
1385 .route_req = tcp_v4_route_req,
1386 .init_seq = tcp_v4_init_seq,
1387 .init_ts_off = tcp_v4_init_ts_off,
1388 .send_synack = tcp_v4_send_synack,
1391 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1393 /* Never answer to SYNs send to broadcast or multicast */
1394 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1397 return tcp_conn_request(&tcp_request_sock_ops,
1398 &tcp_request_sock_ipv4_ops, sk, skb);
1404 EXPORT_SYMBOL(tcp_v4_conn_request);
1408 * The three way handshake has completed - we got a valid synack -
1409 * now create the new socket.
1411 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1412 struct request_sock *req,
1413 struct dst_entry *dst,
1414 struct request_sock *req_unhash,
1417 struct inet_request_sock *ireq;
1418 struct inet_sock *newinet;
1419 struct tcp_sock *newtp;
1421 #ifdef CONFIG_TCP_MD5SIG
1422 struct tcp_md5sig_key *key;
1424 struct ip_options_rcu *inet_opt;
1426 if (sk_acceptq_is_full(sk))
1429 newsk = tcp_create_openreq_child(sk, req, skb);
1433 newsk->sk_gso_type = SKB_GSO_TCPV4;
1434 inet_sk_rx_dst_set(newsk, skb);
1436 newtp = tcp_sk(newsk);
1437 newinet = inet_sk(newsk);
1438 ireq = inet_rsk(req);
1439 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1440 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1441 newsk->sk_bound_dev_if = ireq->ir_iif;
1442 newinet->inet_saddr = ireq->ir_loc_addr;
1443 inet_opt = rcu_dereference(ireq->ireq_opt);
1444 RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1445 newinet->mc_index = inet_iif(skb);
1446 newinet->mc_ttl = ip_hdr(skb)->ttl;
1447 newinet->rcv_tos = ip_hdr(skb)->tos;
1448 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1450 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1451 newinet->inet_id = prandom_u32();
1454 dst = inet_csk_route_child_sock(sk, newsk, req);
1458 /* syncookie case : see end of cookie_v4_check() */
1460 sk_setup_caps(newsk, dst);
1462 tcp_ca_openreq_child(newsk, dst);
1464 tcp_sync_mss(newsk, dst_mtu(dst));
1465 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1467 tcp_initialize_rcv_mss(newsk);
1469 #ifdef CONFIG_TCP_MD5SIG
1470 /* Copy over the MD5 key from the original socket */
1471 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1475 * We're using one, so create a matching key
1476 * on the newsk structure. If we fail to get
1477 * memory, then we end up not copying the key
1480 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1481 AF_INET, 32, key->key, key->keylen, GFP_ATOMIC);
1482 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1486 if (__inet_inherit_port(sk, newsk) < 0)
1488 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1489 if (likely(*own_req)) {
1490 tcp_move_syn(newtp, req);
1491 ireq->ireq_opt = NULL;
1493 newinet->inet_opt = NULL;
1498 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1505 newinet->inet_opt = NULL;
1506 inet_csk_prepare_forced_close(newsk);
1510 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1512 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1514 #ifdef CONFIG_SYN_COOKIES
1515 const struct tcphdr *th = tcp_hdr(skb);
1518 sk = cookie_v4_check(sk, skb);
1523 u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
1524 struct tcphdr *th, u32 *cookie)
1527 #ifdef CONFIG_SYN_COOKIES
1528 mss = tcp_get_syncookie_mss(&tcp_request_sock_ops,
1529 &tcp_request_sock_ipv4_ops, sk, th);
1531 *cookie = __cookie_v4_init_sequence(iph, th, &mss);
1532 tcp_synq_overflow(sk);
1538 /* The socket must have it's spinlock held when we get
1539 * here, unless it is a TCP_LISTEN socket.
1541 * We have a potential double-lock case here, so even when
1542 * doing backlog processing we use the BH locking scheme.
1543 * This is because we cannot sleep with the original spinlock
1546 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1550 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1551 struct dst_entry *dst = sk->sk_rx_dst;
1553 sock_rps_save_rxhash(sk, skb);
1554 sk_mark_napi_id(sk, skb);
1556 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1557 !dst->ops->check(dst, 0)) {
1559 sk->sk_rx_dst = NULL;
1562 tcp_rcv_established(sk, skb);
1566 if (tcp_checksum_complete(skb))
1569 if (sk->sk_state == TCP_LISTEN) {
1570 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1575 if (tcp_child_process(sk, nsk, skb)) {
1582 sock_rps_save_rxhash(sk, skb);
1584 if (tcp_rcv_state_process(sk, skb)) {
1591 tcp_v4_send_reset(rsk, skb);
1594 /* Be careful here. If this function gets more complicated and
1595 * gcc suffers from register pressure on the x86, sk (in %ebx)
1596 * might be destroyed here. This current version compiles correctly,
1597 * but you have been warned.
1602 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1603 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1606 EXPORT_SYMBOL(tcp_v4_do_rcv);
1608 int tcp_v4_early_demux(struct sk_buff *skb)
1610 const struct iphdr *iph;
1611 const struct tcphdr *th;
1614 if (skb->pkt_type != PACKET_HOST)
1617 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1623 if (th->doff < sizeof(struct tcphdr) / 4)
1626 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1627 iph->saddr, th->source,
1628 iph->daddr, ntohs(th->dest),
1629 skb->skb_iif, inet_sdif(skb));
1632 skb->destructor = sock_edemux;
1633 if (sk_fullsock(sk)) {
1634 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1637 dst = dst_check(dst, 0);
1639 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1640 skb_dst_set_noref(skb, dst);
1646 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1648 u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
1649 struct skb_shared_info *shinfo;
1650 const struct tcphdr *th;
1651 struct tcphdr *thtail;
1652 struct sk_buff *tail;
1653 unsigned int hdrlen;
1658 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1659 * we can fix skb->truesize to its real value to avoid future drops.
1660 * This is valid because skb is not yet charged to the socket.
1661 * It has been noticed pure SACK packets were sometimes dropped
1662 * (if cooked by drivers without copybreak feature).
1668 if (unlikely(tcp_checksum_complete(skb))) {
1670 __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1671 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1675 /* Attempt coalescing to last skb in backlog, even if we are
1677 * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
1679 th = (const struct tcphdr *)skb->data;
1680 hdrlen = th->doff * 4;
1681 shinfo = skb_shinfo(skb);
1683 if (!shinfo->gso_size)
1684 shinfo->gso_size = skb->len - hdrlen;
1686 if (!shinfo->gso_segs)
1687 shinfo->gso_segs = 1;
1689 tail = sk->sk_backlog.tail;
1692 thtail = (struct tcphdr *)tail->data;
1694 if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
1695 TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
1696 ((TCP_SKB_CB(tail)->tcp_flags |
1697 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
1698 !((TCP_SKB_CB(tail)->tcp_flags &
1699 TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
1700 ((TCP_SKB_CB(tail)->tcp_flags ^
1701 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
1702 #ifdef CONFIG_TLS_DEVICE
1703 tail->decrypted != skb->decrypted ||
1705 thtail->doff != th->doff ||
1706 memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
1709 __skb_pull(skb, hdrlen);
1710 if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
1711 thtail->window = th->window;
1713 TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
1715 if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
1716 TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
1718 /* We have to update both TCP_SKB_CB(tail)->tcp_flags and
1719 * thtail->fin, so that the fast path in tcp_rcv_established()
1720 * is not entered if we append a packet with a FIN.
1721 * SYN, RST, URG are not present.
1722 * ACK is set on both packets.
1723 * PSH : we do not really care in TCP stack,
1724 * at least for 'GRO' packets.
1726 thtail->fin |= th->fin;
1727 TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1729 if (TCP_SKB_CB(skb)->has_rxtstamp) {
1730 TCP_SKB_CB(tail)->has_rxtstamp = true;
1731 tail->tstamp = skb->tstamp;
1732 skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
1735 /* Not as strict as GRO. We only need to carry mss max value */
1736 skb_shinfo(tail)->gso_size = max(shinfo->gso_size,
1737 skb_shinfo(tail)->gso_size);
1739 gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs;
1740 skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
1742 sk->sk_backlog.len += delta;
1743 __NET_INC_STATS(sock_net(sk),
1744 LINUX_MIB_TCPBACKLOGCOALESCE);
1745 kfree_skb_partial(skb, fragstolen);
1748 __skb_push(skb, hdrlen);
1751 /* Only socket owner can try to collapse/prune rx queues
1752 * to reduce memory overhead, so add a little headroom here.
1753 * Few sockets backlog are possibly concurrently non empty.
1757 if (unlikely(sk_add_backlog(sk, skb, limit))) {
1759 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1764 EXPORT_SYMBOL(tcp_add_backlog);
1766 int tcp_filter(struct sock *sk, struct sk_buff *skb)
1768 struct tcphdr *th = (struct tcphdr *)skb->data;
1770 return sk_filter_trim_cap(sk, skb, th->doff * 4);
1772 EXPORT_SYMBOL(tcp_filter);
1774 static void tcp_v4_restore_cb(struct sk_buff *skb)
1776 memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1777 sizeof(struct inet_skb_parm));
1780 static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1781 const struct tcphdr *th)
1783 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1784 * barrier() makes sure compiler wont play fool^Waliasing games.
1786 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1787 sizeof(struct inet_skb_parm));
1790 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1791 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1792 skb->len - th->doff * 4);
1793 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1794 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1795 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1796 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1797 TCP_SKB_CB(skb)->sacked = 0;
1798 TCP_SKB_CB(skb)->has_rxtstamp =
1799 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1806 int tcp_v4_rcv(struct sk_buff *skb)
1808 struct net *net = dev_net(skb->dev);
1809 struct sk_buff *skb_to_free;
1810 int sdif = inet_sdif(skb);
1811 const struct iphdr *iph;
1812 const struct tcphdr *th;
1817 if (skb->pkt_type != PACKET_HOST)
1820 /* Count it even if it's bad */
1821 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1823 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1826 th = (const struct tcphdr *)skb->data;
1828 if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
1830 if (!pskb_may_pull(skb, th->doff * 4))
1833 /* An explanation is required here, I think.
1834 * Packet length and doff are validated by header prediction,
1835 * provided case of th->doff==0 is eliminated.
1836 * So, we defer the checks. */
1838 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1841 th = (const struct tcphdr *)skb->data;
1844 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1845 th->dest, sdif, &refcounted);
1850 if (sk->sk_state == TCP_TIME_WAIT)
1853 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1854 struct request_sock *req = inet_reqsk(sk);
1855 bool req_stolen = false;
1858 sk = req->rsk_listener;
1859 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1860 sk_drops_add(sk, skb);
1864 if (tcp_checksum_complete(skb)) {
1868 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1869 inet_csk_reqsk_queue_drop_and_put(sk, req);
1872 /* We own a reference on the listener, increase it again
1873 * as we might lose it too soon.
1878 if (!tcp_filter(sk, skb)) {
1879 th = (const struct tcphdr *)skb->data;
1881 tcp_v4_fill_cb(skb, iph, th);
1882 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1887 /* Another cpu got exclusive access to req
1888 * and created a full blown socket.
1889 * Try to feed this packet to this socket
1890 * instead of discarding it.
1892 tcp_v4_restore_cb(skb);
1896 goto discard_and_relse;
1900 tcp_v4_restore_cb(skb);
1901 } else if (tcp_child_process(sk, nsk, skb)) {
1902 tcp_v4_send_reset(nsk, skb);
1903 goto discard_and_relse;
1909 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1910 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1911 goto discard_and_relse;
1914 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1915 goto discard_and_relse;
1917 if (tcp_v4_inbound_md5_hash(sk, skb))
1918 goto discard_and_relse;
1922 if (tcp_filter(sk, skb))
1923 goto discard_and_relse;
1924 th = (const struct tcphdr *)skb->data;
1926 tcp_v4_fill_cb(skb, iph, th);
1930 if (sk->sk_state == TCP_LISTEN) {
1931 ret = tcp_v4_do_rcv(sk, skb);
1932 goto put_and_return;
1935 sk_incoming_cpu_update(sk);
1937 bh_lock_sock_nested(sk);
1938 tcp_segs_in(tcp_sk(sk), skb);
1940 if (!sock_owned_by_user(sk)) {
1941 skb_to_free = sk->sk_rx_skb_cache;
1942 sk->sk_rx_skb_cache = NULL;
1943 ret = tcp_v4_do_rcv(sk, skb);
1945 if (tcp_add_backlog(sk, skb))
1946 goto discard_and_relse;
1951 __kfree_skb(skb_to_free);
1960 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1963 tcp_v4_fill_cb(skb, iph, th);
1965 if (tcp_checksum_complete(skb)) {
1967 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1969 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1971 tcp_v4_send_reset(NULL, skb);
1975 /* Discard frame. */
1980 sk_drops_add(sk, skb);
1986 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1987 inet_twsk_put(inet_twsk(sk));
1991 tcp_v4_fill_cb(skb, iph, th);
1993 if (tcp_checksum_complete(skb)) {
1994 inet_twsk_put(inet_twsk(sk));
1997 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1999 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2002 iph->saddr, th->source,
2003 iph->daddr, th->dest,
2007 inet_twsk_deschedule_put(inet_twsk(sk));
2009 tcp_v4_restore_cb(skb);
2017 tcp_v4_timewait_ack(sk, skb);
2020 tcp_v4_send_reset(sk, skb);
2021 inet_twsk_deschedule_put(inet_twsk(sk));
2023 case TCP_TW_SUCCESS:;
2028 static struct timewait_sock_ops tcp_timewait_sock_ops = {
2029 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
2030 .twsk_unique = tcp_twsk_unique,
2031 .twsk_destructor= tcp_twsk_destructor,
2034 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2036 struct dst_entry *dst = skb_dst(skb);
2038 if (dst && dst_hold_safe(dst)) {
2039 sk->sk_rx_dst = dst;
2040 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2043 EXPORT_SYMBOL(inet_sk_rx_dst_set);
2045 const struct inet_connection_sock_af_ops ipv4_specific = {
2046 .queue_xmit = ip_queue_xmit,
2047 .send_check = tcp_v4_send_check,
2048 .rebuild_header = inet_sk_rebuild_header,
2049 .sk_rx_dst_set = inet_sk_rx_dst_set,
2050 .conn_request = tcp_v4_conn_request,
2051 .syn_recv_sock = tcp_v4_syn_recv_sock,
2052 .net_header_len = sizeof(struct iphdr),
2053 .setsockopt = ip_setsockopt,
2054 .getsockopt = ip_getsockopt,
2055 .addr2sockaddr = inet_csk_addr2sockaddr,
2056 .sockaddr_len = sizeof(struct sockaddr_in),
2057 #ifdef CONFIG_COMPAT
2058 .compat_setsockopt = compat_ip_setsockopt,
2059 .compat_getsockopt = compat_ip_getsockopt,
2061 .mtu_reduced = tcp_v4_mtu_reduced,
2063 EXPORT_SYMBOL(ipv4_specific);
2065 #ifdef CONFIG_TCP_MD5SIG
2066 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2067 .md5_lookup = tcp_v4_md5_lookup,
2068 .calc_md5_hash = tcp_v4_md5_hash_skb,
2069 .md5_parse = tcp_v4_parse_md5_keys,
2073 /* NOTE: A lot of things set to zero explicitly by call to
2074 * sk_alloc() so need not be done here.
2076 static int tcp_v4_init_sock(struct sock *sk)
2078 struct inet_connection_sock *icsk = inet_csk(sk);
2082 icsk->icsk_af_ops = &ipv4_specific;
2084 #ifdef CONFIG_TCP_MD5SIG
2085 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2091 void tcp_v4_destroy_sock(struct sock *sk)
2093 struct tcp_sock *tp = tcp_sk(sk);
2095 trace_tcp_destroy_sock(sk);
2097 tcp_clear_xmit_timers(sk);
2099 tcp_cleanup_congestion_control(sk);
2101 tcp_cleanup_ulp(sk);
2103 /* Cleanup up the write buffer. */
2104 tcp_write_queue_purge(sk);
2106 /* Check if we want to disable active TFO */
2107 tcp_fastopen_active_disable_ofo_check(sk);
2109 /* Cleans up our, hopefully empty, out_of_order_queue. */
2110 skb_rbtree_purge(&tp->out_of_order_queue);
2112 #ifdef CONFIG_TCP_MD5SIG
2113 /* Clean up the MD5 key list, if any */
2114 if (tp->md5sig_info) {
2115 tcp_clear_md5_list(sk);
2116 kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
2117 tp->md5sig_info = NULL;
2121 /* Clean up a referenced TCP bind bucket. */
2122 if (inet_csk(sk)->icsk_bind_hash)
2125 BUG_ON(rcu_access_pointer(tp->fastopen_rsk));
2127 /* If socket is aborted during connect operation */
2128 tcp_free_fastopen_req(tp);
2129 tcp_fastopen_destroy_cipher(sk);
2130 tcp_saved_syn_free(tp);
2132 sk_sockets_allocated_dec(sk);
2134 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2136 #ifdef CONFIG_PROC_FS
2137 /* Proc filesystem TCP sock list dumping. */
2140 * Get next listener socket follow cur. If cur is NULL, get first socket
2141 * starting from bucket given in st->bucket; when st->bucket is zero the
2142 * very first socket in the hash table is returned.
2144 static void *listening_get_next(struct seq_file *seq, void *cur)
2146 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
2147 struct tcp_iter_state *st = seq->private;
2148 struct net *net = seq_file_net(seq);
2149 struct inet_listen_hashbucket *ilb;
2150 struct sock *sk = cur;
2154 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2155 spin_lock(&ilb->lock);
2156 sk = sk_head(&ilb->head);
2160 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2166 sk_for_each_from(sk) {
2167 if (!net_eq(sock_net(sk), net))
2169 if (sk->sk_family == afinfo->family)
2172 spin_unlock(&ilb->lock);
2174 if (++st->bucket < INET_LHTABLE_SIZE)
2179 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2181 struct tcp_iter_state *st = seq->private;
2186 rc = listening_get_next(seq, NULL);
2188 while (rc && *pos) {
2189 rc = listening_get_next(seq, rc);
2195 static inline bool empty_bucket(const struct tcp_iter_state *st)
2197 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2201 * Get first established socket starting from bucket given in st->bucket.
2202 * If st->bucket is zero, the very first socket in the hash is returned.
2204 static void *established_get_first(struct seq_file *seq)
2206 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
2207 struct tcp_iter_state *st = seq->private;
2208 struct net *net = seq_file_net(seq);
2212 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2214 struct hlist_nulls_node *node;
2215 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2217 /* Lockless fast path for the common case of empty buckets */
2218 if (empty_bucket(st))
2222 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2223 if (sk->sk_family != afinfo->family ||
2224 !net_eq(sock_net(sk), net)) {
2230 spin_unlock_bh(lock);
2236 static void *established_get_next(struct seq_file *seq, void *cur)
2238 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
2239 struct sock *sk = cur;
2240 struct hlist_nulls_node *node;
2241 struct tcp_iter_state *st = seq->private;
2242 struct net *net = seq_file_net(seq);
2247 sk = sk_nulls_next(sk);
2249 sk_nulls_for_each_from(sk, node) {
2250 if (sk->sk_family == afinfo->family &&
2251 net_eq(sock_net(sk), net))
2255 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2257 return established_get_first(seq);
2260 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2262 struct tcp_iter_state *st = seq->private;
2266 rc = established_get_first(seq);
2269 rc = established_get_next(seq, rc);
2275 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2278 struct tcp_iter_state *st = seq->private;
2280 st->state = TCP_SEQ_STATE_LISTENING;
2281 rc = listening_get_idx(seq, &pos);
2284 st->state = TCP_SEQ_STATE_ESTABLISHED;
2285 rc = established_get_idx(seq, pos);
2291 static void *tcp_seek_last_pos(struct seq_file *seq)
2293 struct tcp_iter_state *st = seq->private;
2294 int offset = st->offset;
2295 int orig_num = st->num;
2298 switch (st->state) {
2299 case TCP_SEQ_STATE_LISTENING:
2300 if (st->bucket >= INET_LHTABLE_SIZE)
2302 st->state = TCP_SEQ_STATE_LISTENING;
2303 rc = listening_get_next(seq, NULL);
2304 while (offset-- && rc)
2305 rc = listening_get_next(seq, rc);
2309 st->state = TCP_SEQ_STATE_ESTABLISHED;
2311 case TCP_SEQ_STATE_ESTABLISHED:
2312 if (st->bucket > tcp_hashinfo.ehash_mask)
2314 rc = established_get_first(seq);
2315 while (offset-- && rc)
2316 rc = established_get_next(seq, rc);
2324 void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2326 struct tcp_iter_state *st = seq->private;
2329 if (*pos && *pos == st->last_pos) {
2330 rc = tcp_seek_last_pos(seq);
2335 st->state = TCP_SEQ_STATE_LISTENING;
2339 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2342 st->last_pos = *pos;
2345 EXPORT_SYMBOL(tcp_seq_start);
2347 void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2349 struct tcp_iter_state *st = seq->private;
2352 if (v == SEQ_START_TOKEN) {
2353 rc = tcp_get_idx(seq, 0);
2357 switch (st->state) {
2358 case TCP_SEQ_STATE_LISTENING:
2359 rc = listening_get_next(seq, v);
2361 st->state = TCP_SEQ_STATE_ESTABLISHED;
2364 rc = established_get_first(seq);
2367 case TCP_SEQ_STATE_ESTABLISHED:
2368 rc = established_get_next(seq, v);
2373 st->last_pos = *pos;
2376 EXPORT_SYMBOL(tcp_seq_next);
2378 void tcp_seq_stop(struct seq_file *seq, void *v)
2380 struct tcp_iter_state *st = seq->private;
2382 switch (st->state) {
2383 case TCP_SEQ_STATE_LISTENING:
2384 if (v != SEQ_START_TOKEN)
2385 spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
2387 case TCP_SEQ_STATE_ESTABLISHED:
2389 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2393 EXPORT_SYMBOL(tcp_seq_stop);
2395 static void get_openreq4(const struct request_sock *req,
2396 struct seq_file *f, int i)
2398 const struct inet_request_sock *ireq = inet_rsk(req);
2399 long delta = req->rsk_timer.expires - jiffies;
2401 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2402 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2407 ntohs(ireq->ir_rmt_port),
2409 0, 0, /* could print option size, but that is af dependent. */
2410 1, /* timers active (only the expire timer) */
2411 jiffies_delta_to_clock_t(delta),
2413 from_kuid_munged(seq_user_ns(f),
2414 sock_i_uid(req->rsk_listener)),
2415 0, /* non standard timer */
2416 0, /* open_requests have no inode */
2421 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2424 unsigned long timer_expires;
2425 const struct tcp_sock *tp = tcp_sk(sk);
2426 const struct inet_connection_sock *icsk = inet_csk(sk);
2427 const struct inet_sock *inet = inet_sk(sk);
2428 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2429 __be32 dest = inet->inet_daddr;
2430 __be32 src = inet->inet_rcv_saddr;
2431 __u16 destp = ntohs(inet->inet_dport);
2432 __u16 srcp = ntohs(inet->inet_sport);
2436 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2437 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2438 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2440 timer_expires = icsk->icsk_timeout;
2441 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2443 timer_expires = icsk->icsk_timeout;
2444 } else if (timer_pending(&sk->sk_timer)) {
2446 timer_expires = sk->sk_timer.expires;
2449 timer_expires = jiffies;
2452 state = inet_sk_state_load(sk);
2453 if (state == TCP_LISTEN)
2454 rx_queue = READ_ONCE(sk->sk_ack_backlog);
2456 /* Because we don't lock the socket,
2457 * we might find a transient negative value.
2459 rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2460 READ_ONCE(tp->copied_seq), 0);
2462 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2463 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2464 i, src, srcp, dest, destp, state,
2465 READ_ONCE(tp->write_seq) - tp->snd_una,
2468 jiffies_delta_to_clock_t(timer_expires - jiffies),
2469 icsk->icsk_retransmits,
2470 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2471 icsk->icsk_probes_out,
2473 refcount_read(&sk->sk_refcnt), sk,
2474 jiffies_to_clock_t(icsk->icsk_rto),
2475 jiffies_to_clock_t(icsk->icsk_ack.ato),
2476 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
2478 state == TCP_LISTEN ?
2479 fastopenq->max_qlen :
2480 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2483 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2484 struct seq_file *f, int i)
2486 long delta = tw->tw_timer.expires - jiffies;
2490 dest = tw->tw_daddr;
2491 src = tw->tw_rcv_saddr;
2492 destp = ntohs(tw->tw_dport);
2493 srcp = ntohs(tw->tw_sport);
2495 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2496 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2497 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2498 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2499 refcount_read(&tw->tw_refcnt), tw);
2504 static int tcp4_seq_show(struct seq_file *seq, void *v)
2506 struct tcp_iter_state *st;
2507 struct sock *sk = v;
2509 seq_setwidth(seq, TMPSZ - 1);
2510 if (v == SEQ_START_TOKEN) {
2511 seq_puts(seq, " sl local_address rem_address st tx_queue "
2512 "rx_queue tr tm->when retrnsmt uid timeout "
2518 if (sk->sk_state == TCP_TIME_WAIT)
2519 get_timewait4_sock(v, seq, st->num);
2520 else if (sk->sk_state == TCP_NEW_SYN_RECV)
2521 get_openreq4(v, seq, st->num);
2523 get_tcp4_sock(v, seq, st->num);
2529 static const struct seq_operations tcp4_seq_ops = {
2530 .show = tcp4_seq_show,
2531 .start = tcp_seq_start,
2532 .next = tcp_seq_next,
2533 .stop = tcp_seq_stop,
2536 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2540 static int __net_init tcp4_proc_init_net(struct net *net)
2542 if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
2543 sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
2548 static void __net_exit tcp4_proc_exit_net(struct net *net)
2550 remove_proc_entry("tcp", net->proc_net);
2553 static struct pernet_operations tcp4_net_ops = {
2554 .init = tcp4_proc_init_net,
2555 .exit = tcp4_proc_exit_net,
2558 int __init tcp4_proc_init(void)
2560 return register_pernet_subsys(&tcp4_net_ops);
2563 void tcp4_proc_exit(void)
2565 unregister_pernet_subsys(&tcp4_net_ops);
2567 #endif /* CONFIG_PROC_FS */
2569 struct proto tcp_prot = {
2571 .owner = THIS_MODULE,
2573 .pre_connect = tcp_v4_pre_connect,
2574 .connect = tcp_v4_connect,
2575 .disconnect = tcp_disconnect,
2576 .accept = inet_csk_accept,
2578 .init = tcp_v4_init_sock,
2579 .destroy = tcp_v4_destroy_sock,
2580 .shutdown = tcp_shutdown,
2581 .setsockopt = tcp_setsockopt,
2582 .getsockopt = tcp_getsockopt,
2583 .keepalive = tcp_set_keepalive,
2584 .recvmsg = tcp_recvmsg,
2585 .sendmsg = tcp_sendmsg,
2586 .sendpage = tcp_sendpage,
2587 .backlog_rcv = tcp_v4_do_rcv,
2588 .release_cb = tcp_release_cb,
2590 .unhash = inet_unhash,
2591 .get_port = inet_csk_get_port,
2592 .enter_memory_pressure = tcp_enter_memory_pressure,
2593 .leave_memory_pressure = tcp_leave_memory_pressure,
2594 .stream_memory_free = tcp_stream_memory_free,
2595 .sockets_allocated = &tcp_sockets_allocated,
2596 .orphan_count = &tcp_orphan_count,
2597 .memory_allocated = &tcp_memory_allocated,
2598 .memory_pressure = &tcp_memory_pressure,
2599 .sysctl_mem = sysctl_tcp_mem,
2600 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2601 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
2602 .max_header = MAX_TCP_HEADER,
2603 .obj_size = sizeof(struct tcp_sock),
2604 .slab_flags = SLAB_TYPESAFE_BY_RCU,
2605 .twsk_prot = &tcp_timewait_sock_ops,
2606 .rsk_prot = &tcp_request_sock_ops,
2607 .h.hashinfo = &tcp_hashinfo,
2608 .no_autobind = true,
2609 #ifdef CONFIG_COMPAT
2610 .compat_setsockopt = compat_tcp_setsockopt,
2611 .compat_getsockopt = compat_tcp_getsockopt,
2613 .diag_destroy = tcp_abort,
2615 EXPORT_SYMBOL(tcp_prot);
2617 static void __net_exit tcp_sk_exit(struct net *net)
2621 if (net->ipv4.tcp_congestion_control)
2622 module_put(net->ipv4.tcp_congestion_control->owner);
2624 for_each_possible_cpu(cpu)
2625 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2626 free_percpu(net->ipv4.tcp_sk);
2629 static int __net_init tcp_sk_init(struct net *net)
2633 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2634 if (!net->ipv4.tcp_sk)
2637 for_each_possible_cpu(cpu) {
2640 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2644 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2646 /* Please enforce IP_DF and IPID==0 for RST and
2647 * ACK sent in SYN-RECV and TIME-WAIT state.
2649 inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
2651 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2654 net->ipv4.sysctl_tcp_ecn = 2;
2655 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2657 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2658 net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
2659 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2660 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2661 net->ipv4.sysctl_tcp_mtu_probe_floor = TCP_MIN_SND_MSS;
2663 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2664 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2665 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2667 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2668 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2669 net->ipv4.sysctl_tcp_syncookies = 1;
2670 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2671 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2672 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2673 net->ipv4.sysctl_tcp_orphan_retries = 0;
2674 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2675 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2676 net->ipv4.sysctl_tcp_tw_reuse = 2;
2678 cnt = tcp_hashinfo.ehash_mask + 1;
2679 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
2680 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2682 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 128);
2683 net->ipv4.sysctl_tcp_sack = 1;
2684 net->ipv4.sysctl_tcp_window_scaling = 1;
2685 net->ipv4.sysctl_tcp_timestamps = 1;
2686 net->ipv4.sysctl_tcp_early_retrans = 3;
2687 net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
2688 net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior. */
2689 net->ipv4.sysctl_tcp_retrans_collapse = 1;
2690 net->ipv4.sysctl_tcp_max_reordering = 300;
2691 net->ipv4.sysctl_tcp_dsack = 1;
2692 net->ipv4.sysctl_tcp_app_win = 31;
2693 net->ipv4.sysctl_tcp_adv_win_scale = 1;
2694 net->ipv4.sysctl_tcp_frto = 2;
2695 net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
2696 /* This limits the percentage of the congestion window which we
2697 * will allow a single TSO frame to consume. Building TSO frames
2698 * which are too large can cause TCP streams to be bursty.
2700 net->ipv4.sysctl_tcp_tso_win_divisor = 3;
2701 /* Default TSQ limit of 16 TSO segments */
2702 net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
2703 /* rfc5961 challenge ack rate limiting */
2704 net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
2705 net->ipv4.sysctl_tcp_min_tso_segs = 2;
2706 net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
2707 net->ipv4.sysctl_tcp_autocorking = 1;
2708 net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
2709 net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
2710 net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
2711 if (net != &init_net) {
2712 memcpy(net->ipv4.sysctl_tcp_rmem,
2713 init_net.ipv4.sysctl_tcp_rmem,
2714 sizeof(init_net.ipv4.sysctl_tcp_rmem));
2715 memcpy(net->ipv4.sysctl_tcp_wmem,
2716 init_net.ipv4.sysctl_tcp_wmem,
2717 sizeof(init_net.ipv4.sysctl_tcp_wmem));
2719 net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
2720 net->ipv4.sysctl_tcp_comp_sack_nr = 44;
2721 net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
2722 spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
2723 net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
2724 atomic_set(&net->ipv4.tfo_active_disable_times, 0);
2726 /* Reno is always built in */
2727 if (!net_eq(net, &init_net) &&
2728 try_module_get(init_net.ipv4.tcp_congestion_control->owner))
2729 net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
2731 net->ipv4.tcp_congestion_control = &tcp_reno;
2740 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2744 inet_twsk_purge(&tcp_hashinfo, AF_INET);
2746 list_for_each_entry(net, net_exit_list, exit_list)
2747 tcp_fastopen_ctx_destroy(net);
2750 static struct pernet_operations __net_initdata tcp_sk_ops = {
2751 .init = tcp_sk_init,
2752 .exit = tcp_sk_exit,
2753 .exit_batch = tcp_sk_exit_batch,
2756 void __init tcp_v4_init(void)
2758 if (register_pernet_subsys(&tcp_sk_ops))
2759 panic("Failed to create the TCP control socket.\n");