]> asedeno.scripts.mit.edu Git - linux.git/blob - net/ipv4/tcp_ipv4.c
Merge tag 'asoc-fix-v5.2-rc4' of https://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / net / ipv4 / tcp_ipv4.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Implementation of the Transmission Control Protocol(TCP).
7  *
8  *              IPv4 specific functions
9  *
10  *
11  *              code split from:
12  *              linux/ipv4/tcp.c
13  *              linux/ipv4/tcp_input.c
14  *              linux/ipv4/tcp_output.c
15  *
16  *              See tcp.c for author information
17  *
18  *      This program is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU General Public License
20  *      as published by the Free Software Foundation; either version
21  *      2 of the License, or (at your option) any later version.
22  */
23
24 /*
25  * Changes:
26  *              David S. Miller :       New socket lookup architecture.
27  *                                      This code is dedicated to John Dyson.
28  *              David S. Miller :       Change semantics of established hash,
29  *                                      half is devoted to TIME_WAIT sockets
30  *                                      and the rest go in the other half.
31  *              Andi Kleen :            Add support for syncookies and fixed
32  *                                      some bugs: ip options weren't passed to
33  *                                      the TCP layer, missed a check for an
34  *                                      ACK bit.
35  *              Andi Kleen :            Implemented fast path mtu discovery.
36  *                                      Fixed many serious bugs in the
37  *                                      request_sock handling and moved
38  *                                      most of it into the af independent code.
39  *                                      Added tail drop and some other bugfixes.
40  *                                      Added new listen semantics.
41  *              Mike McLagan    :       Routing by source
42  *      Juan Jose Ciarlante:            ip_dynaddr bits
43  *              Andi Kleen:             various fixes.
44  *      Vitaly E. Lavrov        :       Transparent proxy revived after year
45  *                                      coma.
46  *      Andi Kleen              :       Fix new listen.
47  *      Andi Kleen              :       Fix accept error reporting.
48  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
49  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
50  *                                      a single port at the same time.
51  */
52
53 #define pr_fmt(fmt) "TCP: " fmt
54
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/secure_seq.h>
76 #include <net/busy_poll.h>
77
78 #include <linux/inet.h>
79 #include <linux/ipv6.h>
80 #include <linux/stddef.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/inetdevice.h>
84
85 #include <crypto/hash.h>
86 #include <linux/scatterlist.h>
87
88 #include <trace/events/tcp.h>
89
90 #ifdef CONFIG_TCP_MD5SIG
91 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
92                                __be32 daddr, __be32 saddr, const struct tcphdr *th);
93 #endif
94
95 struct inet_hashinfo tcp_hashinfo;
96 EXPORT_SYMBOL(tcp_hashinfo);
97
98 static u32 tcp_v4_init_seq(const struct sk_buff *skb)
99 {
100         return secure_tcp_seq(ip_hdr(skb)->daddr,
101                               ip_hdr(skb)->saddr,
102                               tcp_hdr(skb)->dest,
103                               tcp_hdr(skb)->source);
104 }
105
106 static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
107 {
108         return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
109 }
110
111 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
112 {
113         const struct inet_timewait_sock *tw = inet_twsk(sktw);
114         const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
115         struct tcp_sock *tp = tcp_sk(sk);
116         int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse;
117
118         if (reuse == 2) {
119                 /* Still does not detect *everything* that goes through
120                  * lo, since we require a loopback src or dst address
121                  * or direct binding to 'lo' interface.
122                  */
123                 bool loopback = false;
124                 if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
125                         loopback = true;
126 #if IS_ENABLED(CONFIG_IPV6)
127                 if (tw->tw_family == AF_INET6) {
128                         if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
129                             (ipv6_addr_v4mapped(&tw->tw_v6_daddr) &&
130                              (tw->tw_v6_daddr.s6_addr[12] == 127)) ||
131                             ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
132                             (ipv6_addr_v4mapped(&tw->tw_v6_rcv_saddr) &&
133                              (tw->tw_v6_rcv_saddr.s6_addr[12] == 127)))
134                                 loopback = true;
135                 } else
136 #endif
137                 {
138                         if (ipv4_is_loopback(tw->tw_daddr) ||
139                             ipv4_is_loopback(tw->tw_rcv_saddr))
140                                 loopback = true;
141                 }
142                 if (!loopback)
143                         reuse = 0;
144         }
145
146         /* With PAWS, it is safe from the viewpoint
147            of data integrity. Even without PAWS it is safe provided sequence
148            spaces do not overlap i.e. at data rates <= 80Mbit/sec.
149
150            Actually, the idea is close to VJ's one, only timestamp cache is
151            held not per host, but per port pair and TW bucket is used as state
152            holder.
153
154            If TW bucket has been already destroyed we fall back to VJ's scheme
155            and use initial timestamp retrieved from peer table.
156          */
157         if (tcptw->tw_ts_recent_stamp &&
158             (!twp || (reuse && time_after32(ktime_get_seconds(),
159                                             tcptw->tw_ts_recent_stamp)))) {
160                 /* In case of repair and re-using TIME-WAIT sockets we still
161                  * want to be sure that it is safe as above but honor the
162                  * sequence numbers and time stamps set as part of the repair
163                  * process.
164                  *
165                  * Without this check re-using a TIME-WAIT socket with TCP
166                  * repair would accumulate a -1 on the repair assigned
167                  * sequence number. The first time it is reused the sequence
168                  * is -1, the second time -2, etc. This fixes that issue
169                  * without appearing to create any others.
170                  */
171                 if (likely(!tp->repair)) {
172                         tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
173                         if (tp->write_seq == 0)
174                                 tp->write_seq = 1;
175                         tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
176                         tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
177                 }
178                 sock_hold(sktw);
179                 return 1;
180         }
181
182         return 0;
183 }
184 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
185
186 static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
187                               int addr_len)
188 {
189         /* This check is replicated from tcp_v4_connect() and intended to
190          * prevent BPF program called below from accessing bytes that are out
191          * of the bound specified by user in addr_len.
192          */
193         if (addr_len < sizeof(struct sockaddr_in))
194                 return -EINVAL;
195
196         sock_owned_by_me(sk);
197
198         return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
199 }
200
201 /* This will initiate an outgoing connection. */
202 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
203 {
204         struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
205         struct inet_sock *inet = inet_sk(sk);
206         struct tcp_sock *tp = tcp_sk(sk);
207         __be16 orig_sport, orig_dport;
208         __be32 daddr, nexthop;
209         struct flowi4 *fl4;
210         struct rtable *rt;
211         int err;
212         struct ip_options_rcu *inet_opt;
213         struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
214
215         if (addr_len < sizeof(struct sockaddr_in))
216                 return -EINVAL;
217
218         if (usin->sin_family != AF_INET)
219                 return -EAFNOSUPPORT;
220
221         nexthop = daddr = usin->sin_addr.s_addr;
222         inet_opt = rcu_dereference_protected(inet->inet_opt,
223                                              lockdep_sock_is_held(sk));
224         if (inet_opt && inet_opt->opt.srr) {
225                 if (!daddr)
226                         return -EINVAL;
227                 nexthop = inet_opt->opt.faddr;
228         }
229
230         orig_sport = inet->inet_sport;
231         orig_dport = usin->sin_port;
232         fl4 = &inet->cork.fl.u.ip4;
233         rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
234                               RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
235                               IPPROTO_TCP,
236                               orig_sport, orig_dport, sk);
237         if (IS_ERR(rt)) {
238                 err = PTR_ERR(rt);
239                 if (err == -ENETUNREACH)
240                         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
241                 return err;
242         }
243
244         if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
245                 ip_rt_put(rt);
246                 return -ENETUNREACH;
247         }
248
249         if (!inet_opt || !inet_opt->opt.srr)
250                 daddr = fl4->daddr;
251
252         if (!inet->inet_saddr)
253                 inet->inet_saddr = fl4->saddr;
254         sk_rcv_saddr_set(sk, inet->inet_saddr);
255
256         if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
257                 /* Reset inherited state */
258                 tp->rx_opt.ts_recent       = 0;
259                 tp->rx_opt.ts_recent_stamp = 0;
260                 if (likely(!tp->repair))
261                         tp->write_seq      = 0;
262         }
263
264         inet->inet_dport = usin->sin_port;
265         sk_daddr_set(sk, daddr);
266
267         inet_csk(sk)->icsk_ext_hdr_len = 0;
268         if (inet_opt)
269                 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
270
271         tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
272
273         /* Socket identity is still unknown (sport may be zero).
274          * However we set state to SYN-SENT and not releasing socket
275          * lock select source port, enter ourselves into the hash tables and
276          * complete initialization after this.
277          */
278         tcp_set_state(sk, TCP_SYN_SENT);
279         err = inet_hash_connect(tcp_death_row, sk);
280         if (err)
281                 goto failure;
282
283         sk_set_txhash(sk);
284
285         rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
286                                inet->inet_sport, inet->inet_dport, sk);
287         if (IS_ERR(rt)) {
288                 err = PTR_ERR(rt);
289                 rt = NULL;
290                 goto failure;
291         }
292         /* OK, now commit destination to socket.  */
293         sk->sk_gso_type = SKB_GSO_TCPV4;
294         sk_setup_caps(sk, &rt->dst);
295         rt = NULL;
296
297         if (likely(!tp->repair)) {
298                 if (!tp->write_seq)
299                         tp->write_seq = secure_tcp_seq(inet->inet_saddr,
300                                                        inet->inet_daddr,
301                                                        inet->inet_sport,
302                                                        usin->sin_port);
303                 tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
304                                                  inet->inet_saddr,
305                                                  inet->inet_daddr);
306         }
307
308         inet->inet_id = tp->write_seq ^ jiffies;
309
310         if (tcp_fastopen_defer_connect(sk, &err))
311                 return err;
312         if (err)
313                 goto failure;
314
315         err = tcp_connect(sk);
316
317         if (err)
318                 goto failure;
319
320         return 0;
321
322 failure:
323         /*
324          * This unhashes the socket and releases the local port,
325          * if necessary.
326          */
327         tcp_set_state(sk, TCP_CLOSE);
328         ip_rt_put(rt);
329         sk->sk_route_caps = 0;
330         inet->inet_dport = 0;
331         return err;
332 }
333 EXPORT_SYMBOL(tcp_v4_connect);
334
335 /*
336  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
337  * It can be called through tcp_release_cb() if socket was owned by user
338  * at the time tcp_v4_err() was called to handle ICMP message.
339  */
340 void tcp_v4_mtu_reduced(struct sock *sk)
341 {
342         struct inet_sock *inet = inet_sk(sk);
343         struct dst_entry *dst;
344         u32 mtu;
345
346         if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
347                 return;
348         mtu = tcp_sk(sk)->mtu_info;
349         dst = inet_csk_update_pmtu(sk, mtu);
350         if (!dst)
351                 return;
352
353         /* Something is about to be wrong... Remember soft error
354          * for the case, if this connection will not able to recover.
355          */
356         if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
357                 sk->sk_err_soft = EMSGSIZE;
358
359         mtu = dst_mtu(dst);
360
361         if (inet->pmtudisc != IP_PMTUDISC_DONT &&
362             ip_sk_accept_pmtu(sk) &&
363             inet_csk(sk)->icsk_pmtu_cookie > mtu) {
364                 tcp_sync_mss(sk, mtu);
365
366                 /* Resend the TCP packet because it's
367                  * clear that the old packet has been
368                  * dropped. This is the new "fast" path mtu
369                  * discovery.
370                  */
371                 tcp_simple_retransmit(sk);
372         } /* else let the usual retransmit timer handle it */
373 }
374 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
375
376 static void do_redirect(struct sk_buff *skb, struct sock *sk)
377 {
378         struct dst_entry *dst = __sk_dst_check(sk, 0);
379
380         if (dst)
381                 dst->ops->redirect(dst, sk, skb);
382 }
383
384
385 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
386 void tcp_req_err(struct sock *sk, u32 seq, bool abort)
387 {
388         struct request_sock *req = inet_reqsk(sk);
389         struct net *net = sock_net(sk);
390
391         /* ICMPs are not backlogged, hence we cannot get
392          * an established socket here.
393          */
394         if (seq != tcp_rsk(req)->snt_isn) {
395                 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
396         } else if (abort) {
397                 /*
398                  * Still in SYN_RECV, just remove it silently.
399                  * There is no good way to pass the error to the newly
400                  * created socket, and POSIX does not want network
401                  * errors returned from accept().
402                  */
403                 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
404                 tcp_listendrop(req->rsk_listener);
405         }
406         reqsk_put(req);
407 }
408 EXPORT_SYMBOL(tcp_req_err);
409
410 /*
411  * This routine is called by the ICMP module when it gets some
412  * sort of error condition.  If err < 0 then the socket should
413  * be closed and the error returned to the user.  If err > 0
414  * it's just the icmp type << 8 | icmp code.  After adjustment
415  * header points to the first 8 bytes of the tcp header.  We need
416  * to find the appropriate port.
417  *
418  * The locking strategy used here is very "optimistic". When
419  * someone else accesses the socket the ICMP is just dropped
420  * and for some paths there is no check at all.
421  * A more general error queue to queue errors for later handling
422  * is probably better.
423  *
424  */
425
426 int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
427 {
428         const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
429         struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
430         struct inet_connection_sock *icsk;
431         struct tcp_sock *tp;
432         struct inet_sock *inet;
433         const int type = icmp_hdr(icmp_skb)->type;
434         const int code = icmp_hdr(icmp_skb)->code;
435         struct sock *sk;
436         struct sk_buff *skb;
437         struct request_sock *fastopen;
438         u32 seq, snd_una;
439         s32 remaining;
440         u32 delta_us;
441         int err;
442         struct net *net = dev_net(icmp_skb->dev);
443
444         sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
445                                        th->dest, iph->saddr, ntohs(th->source),
446                                        inet_iif(icmp_skb), 0);
447         if (!sk) {
448                 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
449                 return -ENOENT;
450         }
451         if (sk->sk_state == TCP_TIME_WAIT) {
452                 inet_twsk_put(inet_twsk(sk));
453                 return 0;
454         }
455         seq = ntohl(th->seq);
456         if (sk->sk_state == TCP_NEW_SYN_RECV) {
457                 tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
458                                      type == ICMP_TIME_EXCEEDED ||
459                                      (type == ICMP_DEST_UNREACH &&
460                                       (code == ICMP_NET_UNREACH ||
461                                        code == ICMP_HOST_UNREACH)));
462                 return 0;
463         }
464
465         bh_lock_sock(sk);
466         /* If too many ICMPs get dropped on busy
467          * servers this needs to be solved differently.
468          * We do take care of PMTU discovery (RFC1191) special case :
469          * we can receive locally generated ICMP messages while socket is held.
470          */
471         if (sock_owned_by_user(sk)) {
472                 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
473                         __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
474         }
475         if (sk->sk_state == TCP_CLOSE)
476                 goto out;
477
478         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
479                 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
480                 goto out;
481         }
482
483         icsk = inet_csk(sk);
484         tp = tcp_sk(sk);
485         /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
486         fastopen = tp->fastopen_rsk;
487         snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
488         if (sk->sk_state != TCP_LISTEN &&
489             !between(seq, snd_una, tp->snd_nxt)) {
490                 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
491                 goto out;
492         }
493
494         switch (type) {
495         case ICMP_REDIRECT:
496                 if (!sock_owned_by_user(sk))
497                         do_redirect(icmp_skb, sk);
498                 goto out;
499         case ICMP_SOURCE_QUENCH:
500                 /* Just silently ignore these. */
501                 goto out;
502         case ICMP_PARAMETERPROB:
503                 err = EPROTO;
504                 break;
505         case ICMP_DEST_UNREACH:
506                 if (code > NR_ICMP_UNREACH)
507                         goto out;
508
509                 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
510                         /* We are not interested in TCP_LISTEN and open_requests
511                          * (SYN-ACKs send out by Linux are always <576bytes so
512                          * they should go through unfragmented).
513                          */
514                         if (sk->sk_state == TCP_LISTEN)
515                                 goto out;
516
517                         tp->mtu_info = info;
518                         if (!sock_owned_by_user(sk)) {
519                                 tcp_v4_mtu_reduced(sk);
520                         } else {
521                                 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
522                                         sock_hold(sk);
523                         }
524                         goto out;
525                 }
526
527                 err = icmp_err_convert[code].errno;
528                 /* check if icmp_skb allows revert of backoff
529                  * (see draft-zimmermann-tcp-lcd) */
530                 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
531                         break;
532                 if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
533                     !icsk->icsk_backoff || fastopen)
534                         break;
535
536                 if (sock_owned_by_user(sk))
537                         break;
538
539                 skb = tcp_rtx_queue_head(sk);
540                 if (WARN_ON_ONCE(!skb))
541                         break;
542
543                 icsk->icsk_backoff--;
544                 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
545                                                TCP_TIMEOUT_INIT;
546                 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
547
548
549                 tcp_mstamp_refresh(tp);
550                 delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
551                 remaining = icsk->icsk_rto -
552                             usecs_to_jiffies(delta_us);
553
554                 if (remaining > 0) {
555                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
556                                                   remaining, TCP_RTO_MAX);
557                 } else {
558                         /* RTO revert clocked out retransmission.
559                          * Will retransmit now */
560                         tcp_retransmit_timer(sk);
561                 }
562
563                 break;
564         case ICMP_TIME_EXCEEDED:
565                 err = EHOSTUNREACH;
566                 break;
567         default:
568                 goto out;
569         }
570
571         switch (sk->sk_state) {
572         case TCP_SYN_SENT:
573         case TCP_SYN_RECV:
574                 /* Only in fast or simultaneous open. If a fast open socket is
575                  * is already accepted it is treated as a connected one below.
576                  */
577                 if (fastopen && !fastopen->sk)
578                         break;
579
580                 if (!sock_owned_by_user(sk)) {
581                         sk->sk_err = err;
582
583                         sk->sk_error_report(sk);
584
585                         tcp_done(sk);
586                 } else {
587                         sk->sk_err_soft = err;
588                 }
589                 goto out;
590         }
591
592         /* If we've already connected we will keep trying
593          * until we time out, or the user gives up.
594          *
595          * rfc1122 4.2.3.9 allows to consider as hard errors
596          * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
597          * but it is obsoleted by pmtu discovery).
598          *
599          * Note, that in modern internet, where routing is unreliable
600          * and in each dark corner broken firewalls sit, sending random
601          * errors ordered by their masters even this two messages finally lose
602          * their original sense (even Linux sends invalid PORT_UNREACHs)
603          *
604          * Now we are in compliance with RFCs.
605          *                                                      --ANK (980905)
606          */
607
608         inet = inet_sk(sk);
609         if (!sock_owned_by_user(sk) && inet->recverr) {
610                 sk->sk_err = err;
611                 sk->sk_error_report(sk);
612         } else  { /* Only an error on timeout */
613                 sk->sk_err_soft = err;
614         }
615
616 out:
617         bh_unlock_sock(sk);
618         sock_put(sk);
619         return 0;
620 }
621
622 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
623 {
624         struct tcphdr *th = tcp_hdr(skb);
625
626         th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
627         skb->csum_start = skb_transport_header(skb) - skb->head;
628         skb->csum_offset = offsetof(struct tcphdr, check);
629 }
630
631 /* This routine computes an IPv4 TCP checksum. */
632 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
633 {
634         const struct inet_sock *inet = inet_sk(sk);
635
636         __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
637 }
638 EXPORT_SYMBOL(tcp_v4_send_check);
639
640 /*
641  *      This routine will send an RST to the other tcp.
642  *
643  *      Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
644  *                    for reset.
645  *      Answer: if a packet caused RST, it is not for a socket
646  *              existing in our system, if it is matched to a socket,
647  *              it is just duplicate segment or bug in other side's TCP.
648  *              So that we build reply only basing on parameters
649  *              arrived with segment.
650  *      Exception: precedence violation. We do not implement it in any case.
651  */
652
653 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
654 {
655         const struct tcphdr *th = tcp_hdr(skb);
656         struct {
657                 struct tcphdr th;
658 #ifdef CONFIG_TCP_MD5SIG
659                 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
660 #endif
661         } rep;
662         struct ip_reply_arg arg;
663 #ifdef CONFIG_TCP_MD5SIG
664         struct tcp_md5sig_key *key = NULL;
665         const __u8 *hash_location = NULL;
666         unsigned char newhash[16];
667         int genhash;
668         struct sock *sk1 = NULL;
669 #endif
670         struct net *net;
671         struct sock *ctl_sk;
672
673         /* Never send a reset in response to a reset. */
674         if (th->rst)
675                 return;
676
677         /* If sk not NULL, it means we did a successful lookup and incoming
678          * route had to be correct. prequeue might have dropped our dst.
679          */
680         if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
681                 return;
682
683         /* Swap the send and the receive. */
684         memset(&rep, 0, sizeof(rep));
685         rep.th.dest   = th->source;
686         rep.th.source = th->dest;
687         rep.th.doff   = sizeof(struct tcphdr) / 4;
688         rep.th.rst    = 1;
689
690         if (th->ack) {
691                 rep.th.seq = th->ack_seq;
692         } else {
693                 rep.th.ack = 1;
694                 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
695                                        skb->len - (th->doff << 2));
696         }
697
698         memset(&arg, 0, sizeof(arg));
699         arg.iov[0].iov_base = (unsigned char *)&rep;
700         arg.iov[0].iov_len  = sizeof(rep.th);
701
702         net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
703 #ifdef CONFIG_TCP_MD5SIG
704         rcu_read_lock();
705         hash_location = tcp_parse_md5sig_option(th);
706         if (sk && sk_fullsock(sk)) {
707                 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
708                                         &ip_hdr(skb)->saddr, AF_INET);
709         } else if (hash_location) {
710                 /*
711                  * active side is lost. Try to find listening socket through
712                  * source port, and then find md5 key through listening socket.
713                  * we are not loose security here:
714                  * Incoming packet is checked with md5 hash with finding key,
715                  * no RST generated if md5 hash doesn't match.
716                  */
717                 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
718                                              ip_hdr(skb)->saddr,
719                                              th->source, ip_hdr(skb)->daddr,
720                                              ntohs(th->source), inet_iif(skb),
721                                              tcp_v4_sdif(skb));
722                 /* don't send rst if it can't find key */
723                 if (!sk1)
724                         goto out;
725
726                 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
727                                         &ip_hdr(skb)->saddr, AF_INET);
728                 if (!key)
729                         goto out;
730
731
732                 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
733                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
734                         goto out;
735
736         }
737
738         if (key) {
739                 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
740                                    (TCPOPT_NOP << 16) |
741                                    (TCPOPT_MD5SIG << 8) |
742                                    TCPOLEN_MD5SIG);
743                 /* Update length and the length the header thinks exists */
744                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
745                 rep.th.doff = arg.iov[0].iov_len / 4;
746
747                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
748                                      key, ip_hdr(skb)->saddr,
749                                      ip_hdr(skb)->daddr, &rep.th);
750         }
751 #endif
752         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
753                                       ip_hdr(skb)->saddr, /* XXX */
754                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
755         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
756         arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
757
758         /* When socket is gone, all binding information is lost.
759          * routing might fail in this case. No choice here, if we choose to force
760          * input interface, we will misroute in case of asymmetric route.
761          */
762         if (sk) {
763                 arg.bound_dev_if = sk->sk_bound_dev_if;
764                 if (sk_fullsock(sk))
765                         trace_tcp_send_reset(sk, skb);
766         }
767
768         BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
769                      offsetof(struct inet_timewait_sock, tw_bound_dev_if));
770
771         arg.tos = ip_hdr(skb)->tos;
772         arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
773         local_bh_disable();
774         ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk);
775         if (sk)
776                 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
777                                    inet_twsk(sk)->tw_mark : sk->sk_mark;
778         ip_send_unicast_reply(ctl_sk,
779                               skb, &TCP_SKB_CB(skb)->header.h4.opt,
780                               ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
781                               &arg, arg.iov[0].iov_len);
782
783         ctl_sk->sk_mark = 0;
784         __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
785         __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
786         local_bh_enable();
787
788 #ifdef CONFIG_TCP_MD5SIG
789 out:
790         rcu_read_unlock();
791 #endif
792 }
793
794 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
795    outside socket context is ugly, certainly. What can I do?
796  */
797
798 static void tcp_v4_send_ack(const struct sock *sk,
799                             struct sk_buff *skb, u32 seq, u32 ack,
800                             u32 win, u32 tsval, u32 tsecr, int oif,
801                             struct tcp_md5sig_key *key,
802                             int reply_flags, u8 tos)
803 {
804         const struct tcphdr *th = tcp_hdr(skb);
805         struct {
806                 struct tcphdr th;
807                 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
808 #ifdef CONFIG_TCP_MD5SIG
809                            + (TCPOLEN_MD5SIG_ALIGNED >> 2)
810 #endif
811                         ];
812         } rep;
813         struct net *net = sock_net(sk);
814         struct ip_reply_arg arg;
815         struct sock *ctl_sk;
816
817         memset(&rep.th, 0, sizeof(struct tcphdr));
818         memset(&arg, 0, sizeof(arg));
819
820         arg.iov[0].iov_base = (unsigned char *)&rep;
821         arg.iov[0].iov_len  = sizeof(rep.th);
822         if (tsecr) {
823                 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
824                                    (TCPOPT_TIMESTAMP << 8) |
825                                    TCPOLEN_TIMESTAMP);
826                 rep.opt[1] = htonl(tsval);
827                 rep.opt[2] = htonl(tsecr);
828                 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
829         }
830
831         /* Swap the send and the receive. */
832         rep.th.dest    = th->source;
833         rep.th.source  = th->dest;
834         rep.th.doff    = arg.iov[0].iov_len / 4;
835         rep.th.seq     = htonl(seq);
836         rep.th.ack_seq = htonl(ack);
837         rep.th.ack     = 1;
838         rep.th.window  = htons(win);
839
840 #ifdef CONFIG_TCP_MD5SIG
841         if (key) {
842                 int offset = (tsecr) ? 3 : 0;
843
844                 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
845                                           (TCPOPT_NOP << 16) |
846                                           (TCPOPT_MD5SIG << 8) |
847                                           TCPOLEN_MD5SIG);
848                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
849                 rep.th.doff = arg.iov[0].iov_len/4;
850
851                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
852                                     key, ip_hdr(skb)->saddr,
853                                     ip_hdr(skb)->daddr, &rep.th);
854         }
855 #endif
856         arg.flags = reply_flags;
857         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
858                                       ip_hdr(skb)->saddr, /* XXX */
859                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
860         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
861         if (oif)
862                 arg.bound_dev_if = oif;
863         arg.tos = tos;
864         arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
865         local_bh_disable();
866         ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk);
867         if (sk)
868                 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
869                                    inet_twsk(sk)->tw_mark : sk->sk_mark;
870         ip_send_unicast_reply(ctl_sk,
871                               skb, &TCP_SKB_CB(skb)->header.h4.opt,
872                               ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
873                               &arg, arg.iov[0].iov_len);
874
875         ctl_sk->sk_mark = 0;
876         __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
877         local_bh_enable();
878 }
879
880 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
881 {
882         struct inet_timewait_sock *tw = inet_twsk(sk);
883         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
884
885         tcp_v4_send_ack(sk, skb,
886                         tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
887                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
888                         tcp_time_stamp_raw() + tcptw->tw_ts_offset,
889                         tcptw->tw_ts_recent,
890                         tw->tw_bound_dev_if,
891                         tcp_twsk_md5_key(tcptw),
892                         tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
893                         tw->tw_tos
894                         );
895
896         inet_twsk_put(tw);
897 }
898
899 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
900                                   struct request_sock *req)
901 {
902         /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
903          * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
904          */
905         u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
906                                              tcp_sk(sk)->snd_nxt;
907
908         /* RFC 7323 2.3
909          * The window field (SEG.WND) of every outgoing segment, with the
910          * exception of <SYN> segments, MUST be right-shifted by
911          * Rcv.Wind.Shift bits:
912          */
913         tcp_v4_send_ack(sk, skb, seq,
914                         tcp_rsk(req)->rcv_nxt,
915                         req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
916                         tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
917                         req->ts_recent,
918                         0,
919                         tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
920                                           AF_INET),
921                         inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
922                         ip_hdr(skb)->tos);
923 }
924
925 /*
926  *      Send a SYN-ACK after having received a SYN.
927  *      This still operates on a request_sock only, not on a big
928  *      socket.
929  */
930 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
931                               struct flowi *fl,
932                               struct request_sock *req,
933                               struct tcp_fastopen_cookie *foc,
934                               enum tcp_synack_type synack_type)
935 {
936         const struct inet_request_sock *ireq = inet_rsk(req);
937         struct flowi4 fl4;
938         int err = -1;
939         struct sk_buff *skb;
940
941         /* First, grab a route. */
942         if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
943                 return -1;
944
945         skb = tcp_make_synack(sk, dst, req, foc, synack_type);
946
947         if (skb) {
948                 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
949
950                 rcu_read_lock();
951                 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
952                                             ireq->ir_rmt_addr,
953                                             rcu_dereference(ireq->ireq_opt));
954                 rcu_read_unlock();
955                 err = net_xmit_eval(err);
956         }
957
958         return err;
959 }
960
961 /*
962  *      IPv4 request_sock destructor.
963  */
964 static void tcp_v4_reqsk_destructor(struct request_sock *req)
965 {
966         kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
967 }
968
969 #ifdef CONFIG_TCP_MD5SIG
970 /*
971  * RFC2385 MD5 checksumming requires a mapping of
972  * IP address->MD5 Key.
973  * We need to maintain these in the sk structure.
974  */
975
976 DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
977 EXPORT_SYMBOL(tcp_md5_needed);
978
979 /* Find the Key structure for an address.  */
980 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk,
981                                            const union tcp_md5_addr *addr,
982                                            int family)
983 {
984         const struct tcp_sock *tp = tcp_sk(sk);
985         struct tcp_md5sig_key *key;
986         const struct tcp_md5sig_info *md5sig;
987         __be32 mask;
988         struct tcp_md5sig_key *best_match = NULL;
989         bool match;
990
991         /* caller either holds rcu_read_lock() or socket lock */
992         md5sig = rcu_dereference_check(tp->md5sig_info,
993                                        lockdep_sock_is_held(sk));
994         if (!md5sig)
995                 return NULL;
996
997         hlist_for_each_entry_rcu(key, &md5sig->head, node) {
998                 if (key->family != family)
999                         continue;
1000
1001                 if (family == AF_INET) {
1002                         mask = inet_make_mask(key->prefixlen);
1003                         match = (key->addr.a4.s_addr & mask) ==
1004                                 (addr->a4.s_addr & mask);
1005 #if IS_ENABLED(CONFIG_IPV6)
1006                 } else if (family == AF_INET6) {
1007                         match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
1008                                                   key->prefixlen);
1009 #endif
1010                 } else {
1011                         match = false;
1012                 }
1013
1014                 if (match && (!best_match ||
1015                               key->prefixlen > best_match->prefixlen))
1016                         best_match = key;
1017         }
1018         return best_match;
1019 }
1020 EXPORT_SYMBOL(__tcp_md5_do_lookup);
1021
1022 static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
1023                                                       const union tcp_md5_addr *addr,
1024                                                       int family, u8 prefixlen)
1025 {
1026         const struct tcp_sock *tp = tcp_sk(sk);
1027         struct tcp_md5sig_key *key;
1028         unsigned int size = sizeof(struct in_addr);
1029         const struct tcp_md5sig_info *md5sig;
1030
1031         /* caller either holds rcu_read_lock() or socket lock */
1032         md5sig = rcu_dereference_check(tp->md5sig_info,
1033                                        lockdep_sock_is_held(sk));
1034         if (!md5sig)
1035                 return NULL;
1036 #if IS_ENABLED(CONFIG_IPV6)
1037         if (family == AF_INET6)
1038                 size = sizeof(struct in6_addr);
1039 #endif
1040         hlist_for_each_entry_rcu(key, &md5sig->head, node) {
1041                 if (key->family != family)
1042                         continue;
1043                 if (!memcmp(&key->addr, addr, size) &&
1044                     key->prefixlen == prefixlen)
1045                         return key;
1046         }
1047         return NULL;
1048 }
1049
1050 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1051                                          const struct sock *addr_sk)
1052 {
1053         const union tcp_md5_addr *addr;
1054
1055         addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
1056         return tcp_md5_do_lookup(sk, addr, AF_INET);
1057 }
1058 EXPORT_SYMBOL(tcp_v4_md5_lookup);
1059
1060 /* This can be called on a newly created socket, from other files */
1061 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1062                    int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
1063                    gfp_t gfp)
1064 {
1065         /* Add Key to the list */
1066         struct tcp_md5sig_key *key;
1067         struct tcp_sock *tp = tcp_sk(sk);
1068         struct tcp_md5sig_info *md5sig;
1069
1070         key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
1071         if (key) {
1072                 /* Pre-existing entry - just update that one. */
1073                 memcpy(key->key, newkey, newkeylen);
1074                 key->keylen = newkeylen;
1075                 return 0;
1076         }
1077
1078         md5sig = rcu_dereference_protected(tp->md5sig_info,
1079                                            lockdep_sock_is_held(sk));
1080         if (!md5sig) {
1081                 md5sig = kmalloc(sizeof(*md5sig), gfp);
1082                 if (!md5sig)
1083                         return -ENOMEM;
1084
1085                 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1086                 INIT_HLIST_HEAD(&md5sig->head);
1087                 rcu_assign_pointer(tp->md5sig_info, md5sig);
1088         }
1089
1090         key = sock_kmalloc(sk, sizeof(*key), gfp);
1091         if (!key)
1092                 return -ENOMEM;
1093         if (!tcp_alloc_md5sig_pool()) {
1094                 sock_kfree_s(sk, key, sizeof(*key));
1095                 return -ENOMEM;
1096         }
1097
1098         memcpy(key->key, newkey, newkeylen);
1099         key->keylen = newkeylen;
1100         key->family = family;
1101         key->prefixlen = prefixlen;
1102         memcpy(&key->addr, addr,
1103                (family == AF_INET6) ? sizeof(struct in6_addr) :
1104                                       sizeof(struct in_addr));
1105         hlist_add_head_rcu(&key->node, &md5sig->head);
1106         return 0;
1107 }
1108 EXPORT_SYMBOL(tcp_md5_do_add);
1109
1110 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1111                    u8 prefixlen)
1112 {
1113         struct tcp_md5sig_key *key;
1114
1115         key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
1116         if (!key)
1117                 return -ENOENT;
1118         hlist_del_rcu(&key->node);
1119         atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1120         kfree_rcu(key, rcu);
1121         return 0;
1122 }
1123 EXPORT_SYMBOL(tcp_md5_do_del);
1124
1125 static void tcp_clear_md5_list(struct sock *sk)
1126 {
1127         struct tcp_sock *tp = tcp_sk(sk);
1128         struct tcp_md5sig_key *key;
1129         struct hlist_node *n;
1130         struct tcp_md5sig_info *md5sig;
1131
1132         md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1133
1134         hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1135                 hlist_del_rcu(&key->node);
1136                 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1137                 kfree_rcu(key, rcu);
1138         }
1139 }
1140
1141 static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1142                                  char __user *optval, int optlen)
1143 {
1144         struct tcp_md5sig cmd;
1145         struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1146         u8 prefixlen = 32;
1147
1148         if (optlen < sizeof(cmd))
1149                 return -EINVAL;
1150
1151         if (copy_from_user(&cmd, optval, sizeof(cmd)))
1152                 return -EFAULT;
1153
1154         if (sin->sin_family != AF_INET)
1155                 return -EINVAL;
1156
1157         if (optname == TCP_MD5SIG_EXT &&
1158             cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1159                 prefixlen = cmd.tcpm_prefixlen;
1160                 if (prefixlen > 32)
1161                         return -EINVAL;
1162         }
1163
1164         if (!cmd.tcpm_keylen)
1165                 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1166                                       AF_INET, prefixlen);
1167
1168         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1169                 return -EINVAL;
1170
1171         return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1172                               AF_INET, prefixlen, cmd.tcpm_key, cmd.tcpm_keylen,
1173                               GFP_KERNEL);
1174 }
1175
1176 static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1177                                    __be32 daddr, __be32 saddr,
1178                                    const struct tcphdr *th, int nbytes)
1179 {
1180         struct tcp4_pseudohdr *bp;
1181         struct scatterlist sg;
1182         struct tcphdr *_th;
1183
1184         bp = hp->scratch;
1185         bp->saddr = saddr;
1186         bp->daddr = daddr;
1187         bp->pad = 0;
1188         bp->protocol = IPPROTO_TCP;
1189         bp->len = cpu_to_be16(nbytes);
1190
1191         _th = (struct tcphdr *)(bp + 1);
1192         memcpy(_th, th, sizeof(*th));
1193         _th->check = 0;
1194
1195         sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1196         ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1197                                 sizeof(*bp) + sizeof(*th));
1198         return crypto_ahash_update(hp->md5_req);
1199 }
1200
1201 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1202                                __be32 daddr, __be32 saddr, const struct tcphdr *th)
1203 {
1204         struct tcp_md5sig_pool *hp;
1205         struct ahash_request *req;
1206
1207         hp = tcp_get_md5sig_pool();
1208         if (!hp)
1209                 goto clear_hash_noput;
1210         req = hp->md5_req;
1211
1212         if (crypto_ahash_init(req))
1213                 goto clear_hash;
1214         if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1215                 goto clear_hash;
1216         if (tcp_md5_hash_key(hp, key))
1217                 goto clear_hash;
1218         ahash_request_set_crypt(req, NULL, md5_hash, 0);
1219         if (crypto_ahash_final(req))
1220                 goto clear_hash;
1221
1222         tcp_put_md5sig_pool();
1223         return 0;
1224
1225 clear_hash:
1226         tcp_put_md5sig_pool();
1227 clear_hash_noput:
1228         memset(md5_hash, 0, 16);
1229         return 1;
1230 }
1231
1232 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1233                         const struct sock *sk,
1234                         const struct sk_buff *skb)
1235 {
1236         struct tcp_md5sig_pool *hp;
1237         struct ahash_request *req;
1238         const struct tcphdr *th = tcp_hdr(skb);
1239         __be32 saddr, daddr;
1240
1241         if (sk) { /* valid for establish/request sockets */
1242                 saddr = sk->sk_rcv_saddr;
1243                 daddr = sk->sk_daddr;
1244         } else {
1245                 const struct iphdr *iph = ip_hdr(skb);
1246                 saddr = iph->saddr;
1247                 daddr = iph->daddr;
1248         }
1249
1250         hp = tcp_get_md5sig_pool();
1251         if (!hp)
1252                 goto clear_hash_noput;
1253         req = hp->md5_req;
1254
1255         if (crypto_ahash_init(req))
1256                 goto clear_hash;
1257
1258         if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1259                 goto clear_hash;
1260         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1261                 goto clear_hash;
1262         if (tcp_md5_hash_key(hp, key))
1263                 goto clear_hash;
1264         ahash_request_set_crypt(req, NULL, md5_hash, 0);
1265         if (crypto_ahash_final(req))
1266                 goto clear_hash;
1267
1268         tcp_put_md5sig_pool();
1269         return 0;
1270
1271 clear_hash:
1272         tcp_put_md5sig_pool();
1273 clear_hash_noput:
1274         memset(md5_hash, 0, 16);
1275         return 1;
1276 }
1277 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1278
1279 #endif
1280
1281 /* Called with rcu_read_lock() */
1282 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1283                                     const struct sk_buff *skb)
1284 {
1285 #ifdef CONFIG_TCP_MD5SIG
1286         /*
1287          * This gets called for each TCP segment that arrives
1288          * so we want to be efficient.
1289          * We have 3 drop cases:
1290          * o No MD5 hash and one expected.
1291          * o MD5 hash and we're not expecting one.
1292          * o MD5 hash and its wrong.
1293          */
1294         const __u8 *hash_location = NULL;
1295         struct tcp_md5sig_key *hash_expected;
1296         const struct iphdr *iph = ip_hdr(skb);
1297         const struct tcphdr *th = tcp_hdr(skb);
1298         int genhash;
1299         unsigned char newhash[16];
1300
1301         hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1302                                           AF_INET);
1303         hash_location = tcp_parse_md5sig_option(th);
1304
1305         /* We've parsed the options - do we have a hash? */
1306         if (!hash_expected && !hash_location)
1307                 return false;
1308
1309         if (hash_expected && !hash_location) {
1310                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1311                 return true;
1312         }
1313
1314         if (!hash_expected && hash_location) {
1315                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1316                 return true;
1317         }
1318
1319         /* Okay, so this is hash_expected and hash_location -
1320          * so we need to calculate the checksum.
1321          */
1322         genhash = tcp_v4_md5_hash_skb(newhash,
1323                                       hash_expected,
1324                                       NULL, skb);
1325
1326         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1327                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1328                 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1329                                      &iph->saddr, ntohs(th->source),
1330                                      &iph->daddr, ntohs(th->dest),
1331                                      genhash ? " tcp_v4_calc_md5_hash failed"
1332                                      : "");
1333                 return true;
1334         }
1335         return false;
1336 #endif
1337         return false;
1338 }
1339
1340 static void tcp_v4_init_req(struct request_sock *req,
1341                             const struct sock *sk_listener,
1342                             struct sk_buff *skb)
1343 {
1344         struct inet_request_sock *ireq = inet_rsk(req);
1345         struct net *net = sock_net(sk_listener);
1346
1347         sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1348         sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1349         RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1350 }
1351
1352 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1353                                           struct flowi *fl,
1354                                           const struct request_sock *req)
1355 {
1356         return inet_csk_route_req(sk, &fl->u.ip4, req);
1357 }
1358
1359 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1360         .family         =       PF_INET,
1361         .obj_size       =       sizeof(struct tcp_request_sock),
1362         .rtx_syn_ack    =       tcp_rtx_synack,
1363         .send_ack       =       tcp_v4_reqsk_send_ack,
1364         .destructor     =       tcp_v4_reqsk_destructor,
1365         .send_reset     =       tcp_v4_send_reset,
1366         .syn_ack_timeout =      tcp_syn_ack_timeout,
1367 };
1368
1369 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1370         .mss_clamp      =       TCP_MSS_DEFAULT,
1371 #ifdef CONFIG_TCP_MD5SIG
1372         .req_md5_lookup =       tcp_v4_md5_lookup,
1373         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1374 #endif
1375         .init_req       =       tcp_v4_init_req,
1376 #ifdef CONFIG_SYN_COOKIES
1377         .cookie_init_seq =      cookie_v4_init_sequence,
1378 #endif
1379         .route_req      =       tcp_v4_route_req,
1380         .init_seq       =       tcp_v4_init_seq,
1381         .init_ts_off    =       tcp_v4_init_ts_off,
1382         .send_synack    =       tcp_v4_send_synack,
1383 };
1384
1385 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1386 {
1387         /* Never answer to SYNs send to broadcast or multicast */
1388         if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1389                 goto drop;
1390
1391         return tcp_conn_request(&tcp_request_sock_ops,
1392                                 &tcp_request_sock_ipv4_ops, sk, skb);
1393
1394 drop:
1395         tcp_listendrop(sk);
1396         return 0;
1397 }
1398 EXPORT_SYMBOL(tcp_v4_conn_request);
1399
1400
1401 /*
1402  * The three way handshake has completed - we got a valid synack -
1403  * now create the new socket.
1404  */
1405 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1406                                   struct request_sock *req,
1407                                   struct dst_entry *dst,
1408                                   struct request_sock *req_unhash,
1409                                   bool *own_req)
1410 {
1411         struct inet_request_sock *ireq;
1412         struct inet_sock *newinet;
1413         struct tcp_sock *newtp;
1414         struct sock *newsk;
1415 #ifdef CONFIG_TCP_MD5SIG
1416         struct tcp_md5sig_key *key;
1417 #endif
1418         struct ip_options_rcu *inet_opt;
1419
1420         if (sk_acceptq_is_full(sk))
1421                 goto exit_overflow;
1422
1423         newsk = tcp_create_openreq_child(sk, req, skb);
1424         if (!newsk)
1425                 goto exit_nonewsk;
1426
1427         newsk->sk_gso_type = SKB_GSO_TCPV4;
1428         inet_sk_rx_dst_set(newsk, skb);
1429
1430         newtp                 = tcp_sk(newsk);
1431         newinet               = inet_sk(newsk);
1432         ireq                  = inet_rsk(req);
1433         sk_daddr_set(newsk, ireq->ir_rmt_addr);
1434         sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1435         newsk->sk_bound_dev_if = ireq->ir_iif;
1436         newinet->inet_saddr   = ireq->ir_loc_addr;
1437         inet_opt              = rcu_dereference(ireq->ireq_opt);
1438         RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1439         newinet->mc_index     = inet_iif(skb);
1440         newinet->mc_ttl       = ip_hdr(skb)->ttl;
1441         newinet->rcv_tos      = ip_hdr(skb)->tos;
1442         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1443         if (inet_opt)
1444                 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1445         newinet->inet_id = newtp->write_seq ^ jiffies;
1446
1447         if (!dst) {
1448                 dst = inet_csk_route_child_sock(sk, newsk, req);
1449                 if (!dst)
1450                         goto put_and_exit;
1451         } else {
1452                 /* syncookie case : see end of cookie_v4_check() */
1453         }
1454         sk_setup_caps(newsk, dst);
1455
1456         tcp_ca_openreq_child(newsk, dst);
1457
1458         tcp_sync_mss(newsk, dst_mtu(dst));
1459         newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1460
1461         tcp_initialize_rcv_mss(newsk);
1462
1463 #ifdef CONFIG_TCP_MD5SIG
1464         /* Copy over the MD5 key from the original socket */
1465         key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1466                                 AF_INET);
1467         if (key) {
1468                 /*
1469                  * We're using one, so create a matching key
1470                  * on the newsk structure. If we fail to get
1471                  * memory, then we end up not copying the key
1472                  * across. Shucks.
1473                  */
1474                 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1475                                AF_INET, 32, key->key, key->keylen, GFP_ATOMIC);
1476                 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1477         }
1478 #endif
1479
1480         if (__inet_inherit_port(sk, newsk) < 0)
1481                 goto put_and_exit;
1482         *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1483         if (likely(*own_req)) {
1484                 tcp_move_syn(newtp, req);
1485                 ireq->ireq_opt = NULL;
1486         } else {
1487                 newinet->inet_opt = NULL;
1488         }
1489         return newsk;
1490
1491 exit_overflow:
1492         NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1493 exit_nonewsk:
1494         dst_release(dst);
1495 exit:
1496         tcp_listendrop(sk);
1497         return NULL;
1498 put_and_exit:
1499         newinet->inet_opt = NULL;
1500         inet_csk_prepare_forced_close(newsk);
1501         tcp_done(newsk);
1502         goto exit;
1503 }
1504 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1505
1506 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1507 {
1508 #ifdef CONFIG_SYN_COOKIES
1509         const struct tcphdr *th = tcp_hdr(skb);
1510
1511         if (!th->syn)
1512                 sk = cookie_v4_check(sk, skb);
1513 #endif
1514         return sk;
1515 }
1516
1517 /* The socket must have it's spinlock held when we get
1518  * here, unless it is a TCP_LISTEN socket.
1519  *
1520  * We have a potential double-lock case here, so even when
1521  * doing backlog processing we use the BH locking scheme.
1522  * This is because we cannot sleep with the original spinlock
1523  * held.
1524  */
1525 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1526 {
1527         struct sock *rsk;
1528
1529         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1530                 struct dst_entry *dst = sk->sk_rx_dst;
1531
1532                 sock_rps_save_rxhash(sk, skb);
1533                 sk_mark_napi_id(sk, skb);
1534                 if (dst) {
1535                         if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1536                             !dst->ops->check(dst, 0)) {
1537                                 dst_release(dst);
1538                                 sk->sk_rx_dst = NULL;
1539                         }
1540                 }
1541                 tcp_rcv_established(sk, skb);
1542                 return 0;
1543         }
1544
1545         if (tcp_checksum_complete(skb))
1546                 goto csum_err;
1547
1548         if (sk->sk_state == TCP_LISTEN) {
1549                 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1550
1551                 if (!nsk)
1552                         goto discard;
1553                 if (nsk != sk) {
1554                         if (tcp_child_process(sk, nsk, skb)) {
1555                                 rsk = nsk;
1556                                 goto reset;
1557                         }
1558                         return 0;
1559                 }
1560         } else
1561                 sock_rps_save_rxhash(sk, skb);
1562
1563         if (tcp_rcv_state_process(sk, skb)) {
1564                 rsk = sk;
1565                 goto reset;
1566         }
1567         return 0;
1568
1569 reset:
1570         tcp_v4_send_reset(rsk, skb);
1571 discard:
1572         kfree_skb(skb);
1573         /* Be careful here. If this function gets more complicated and
1574          * gcc suffers from register pressure on the x86, sk (in %ebx)
1575          * might be destroyed here. This current version compiles correctly,
1576          * but you have been warned.
1577          */
1578         return 0;
1579
1580 csum_err:
1581         TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1582         TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1583         goto discard;
1584 }
1585 EXPORT_SYMBOL(tcp_v4_do_rcv);
1586
1587 int tcp_v4_early_demux(struct sk_buff *skb)
1588 {
1589         const struct iphdr *iph;
1590         const struct tcphdr *th;
1591         struct sock *sk;
1592
1593         if (skb->pkt_type != PACKET_HOST)
1594                 return 0;
1595
1596         if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1597                 return 0;
1598
1599         iph = ip_hdr(skb);
1600         th = tcp_hdr(skb);
1601
1602         if (th->doff < sizeof(struct tcphdr) / 4)
1603                 return 0;
1604
1605         sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1606                                        iph->saddr, th->source,
1607                                        iph->daddr, ntohs(th->dest),
1608                                        skb->skb_iif, inet_sdif(skb));
1609         if (sk) {
1610                 skb->sk = sk;
1611                 skb->destructor = sock_edemux;
1612                 if (sk_fullsock(sk)) {
1613                         struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1614
1615                         if (dst)
1616                                 dst = dst_check(dst, 0);
1617                         if (dst &&
1618                             inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1619                                 skb_dst_set_noref(skb, dst);
1620                 }
1621         }
1622         return 0;
1623 }
1624
1625 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1626 {
1627         u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
1628         struct skb_shared_info *shinfo;
1629         const struct tcphdr *th;
1630         struct tcphdr *thtail;
1631         struct sk_buff *tail;
1632         unsigned int hdrlen;
1633         bool fragstolen;
1634         u32 gso_segs;
1635         int delta;
1636
1637         /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1638          * we can fix skb->truesize to its real value to avoid future drops.
1639          * This is valid because skb is not yet charged to the socket.
1640          * It has been noticed pure SACK packets were sometimes dropped
1641          * (if cooked by drivers without copybreak feature).
1642          */
1643         skb_condense(skb);
1644
1645         skb_dst_drop(skb);
1646
1647         if (unlikely(tcp_checksum_complete(skb))) {
1648                 bh_unlock_sock(sk);
1649                 __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1650                 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1651                 return true;
1652         }
1653
1654         /* Attempt coalescing to last skb in backlog, even if we are
1655          * above the limits.
1656          * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
1657          */
1658         th = (const struct tcphdr *)skb->data;
1659         hdrlen = th->doff * 4;
1660         shinfo = skb_shinfo(skb);
1661
1662         if (!shinfo->gso_size)
1663                 shinfo->gso_size = skb->len - hdrlen;
1664
1665         if (!shinfo->gso_segs)
1666                 shinfo->gso_segs = 1;
1667
1668         tail = sk->sk_backlog.tail;
1669         if (!tail)
1670                 goto no_coalesce;
1671         thtail = (struct tcphdr *)tail->data;
1672
1673         if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
1674             TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
1675             ((TCP_SKB_CB(tail)->tcp_flags |
1676               TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
1677             !((TCP_SKB_CB(tail)->tcp_flags &
1678               TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
1679             ((TCP_SKB_CB(tail)->tcp_flags ^
1680               TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
1681 #ifdef CONFIG_TLS_DEVICE
1682             tail->decrypted != skb->decrypted ||
1683 #endif
1684             thtail->doff != th->doff ||
1685             memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
1686                 goto no_coalesce;
1687
1688         __skb_pull(skb, hdrlen);
1689         if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
1690                 thtail->window = th->window;
1691
1692                 TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
1693
1694                 if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
1695                         TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
1696
1697                 /* We have to update both TCP_SKB_CB(tail)->tcp_flags and
1698                  * thtail->fin, so that the fast path in tcp_rcv_established()
1699                  * is not entered if we append a packet with a FIN.
1700                  * SYN, RST, URG are not present.
1701                  * ACK is set on both packets.
1702                  * PSH : we do not really care in TCP stack,
1703                  *       at least for 'GRO' packets.
1704                  */
1705                 thtail->fin |= th->fin;
1706                 TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1707
1708                 if (TCP_SKB_CB(skb)->has_rxtstamp) {
1709                         TCP_SKB_CB(tail)->has_rxtstamp = true;
1710                         tail->tstamp = skb->tstamp;
1711                         skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
1712                 }
1713
1714                 /* Not as strict as GRO. We only need to carry mss max value */
1715                 skb_shinfo(tail)->gso_size = max(shinfo->gso_size,
1716                                                  skb_shinfo(tail)->gso_size);
1717
1718                 gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs;
1719                 skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
1720
1721                 sk->sk_backlog.len += delta;
1722                 __NET_INC_STATS(sock_net(sk),
1723                                 LINUX_MIB_TCPBACKLOGCOALESCE);
1724                 kfree_skb_partial(skb, fragstolen);
1725                 return false;
1726         }
1727         __skb_push(skb, hdrlen);
1728
1729 no_coalesce:
1730         /* Only socket owner can try to collapse/prune rx queues
1731          * to reduce memory overhead, so add a little headroom here.
1732          * Few sockets backlog are possibly concurrently non empty.
1733          */
1734         limit += 64*1024;
1735
1736         if (unlikely(sk_add_backlog(sk, skb, limit))) {
1737                 bh_unlock_sock(sk);
1738                 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1739                 return true;
1740         }
1741         return false;
1742 }
1743 EXPORT_SYMBOL(tcp_add_backlog);
1744
1745 int tcp_filter(struct sock *sk, struct sk_buff *skb)
1746 {
1747         struct tcphdr *th = (struct tcphdr *)skb->data;
1748
1749         return sk_filter_trim_cap(sk, skb, th->doff * 4);
1750 }
1751 EXPORT_SYMBOL(tcp_filter);
1752
1753 static void tcp_v4_restore_cb(struct sk_buff *skb)
1754 {
1755         memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1756                 sizeof(struct inet_skb_parm));
1757 }
1758
1759 static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1760                            const struct tcphdr *th)
1761 {
1762         /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1763          * barrier() makes sure compiler wont play fool^Waliasing games.
1764          */
1765         memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1766                 sizeof(struct inet_skb_parm));
1767         barrier();
1768
1769         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1770         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1771                                     skb->len - th->doff * 4);
1772         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1773         TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1774         TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1775         TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1776         TCP_SKB_CB(skb)->sacked  = 0;
1777         TCP_SKB_CB(skb)->has_rxtstamp =
1778                         skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1779 }
1780
1781 /*
1782  *      From tcp_input.c
1783  */
1784
1785 int tcp_v4_rcv(struct sk_buff *skb)
1786 {
1787         struct net *net = dev_net(skb->dev);
1788         struct sk_buff *skb_to_free;
1789         int sdif = inet_sdif(skb);
1790         const struct iphdr *iph;
1791         const struct tcphdr *th;
1792         bool refcounted;
1793         struct sock *sk;
1794         int ret;
1795
1796         if (skb->pkt_type != PACKET_HOST)
1797                 goto discard_it;
1798
1799         /* Count it even if it's bad */
1800         __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1801
1802         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1803                 goto discard_it;
1804
1805         th = (const struct tcphdr *)skb->data;
1806
1807         if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
1808                 goto bad_packet;
1809         if (!pskb_may_pull(skb, th->doff * 4))
1810                 goto discard_it;
1811
1812         /* An explanation is required here, I think.
1813          * Packet length and doff are validated by header prediction,
1814          * provided case of th->doff==0 is eliminated.
1815          * So, we defer the checks. */
1816
1817         if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1818                 goto csum_error;
1819
1820         th = (const struct tcphdr *)skb->data;
1821         iph = ip_hdr(skb);
1822 lookup:
1823         sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1824                                th->dest, sdif, &refcounted);
1825         if (!sk)
1826                 goto no_tcp_socket;
1827
1828 process:
1829         if (sk->sk_state == TCP_TIME_WAIT)
1830                 goto do_time_wait;
1831
1832         if (sk->sk_state == TCP_NEW_SYN_RECV) {
1833                 struct request_sock *req = inet_reqsk(sk);
1834                 bool req_stolen = false;
1835                 struct sock *nsk;
1836
1837                 sk = req->rsk_listener;
1838                 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1839                         sk_drops_add(sk, skb);
1840                         reqsk_put(req);
1841                         goto discard_it;
1842                 }
1843                 if (tcp_checksum_complete(skb)) {
1844                         reqsk_put(req);
1845                         goto csum_error;
1846                 }
1847                 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1848                         inet_csk_reqsk_queue_drop_and_put(sk, req);
1849                         goto lookup;
1850                 }
1851                 /* We own a reference on the listener, increase it again
1852                  * as we might lose it too soon.
1853                  */
1854                 sock_hold(sk);
1855                 refcounted = true;
1856                 nsk = NULL;
1857                 if (!tcp_filter(sk, skb)) {
1858                         th = (const struct tcphdr *)skb->data;
1859                         iph = ip_hdr(skb);
1860                         tcp_v4_fill_cb(skb, iph, th);
1861                         nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1862                 }
1863                 if (!nsk) {
1864                         reqsk_put(req);
1865                         if (req_stolen) {
1866                                 /* Another cpu got exclusive access to req
1867                                  * and created a full blown socket.
1868                                  * Try to feed this packet to this socket
1869                                  * instead of discarding it.
1870                                  */
1871                                 tcp_v4_restore_cb(skb);
1872                                 sock_put(sk);
1873                                 goto lookup;
1874                         }
1875                         goto discard_and_relse;
1876                 }
1877                 if (nsk == sk) {
1878                         reqsk_put(req);
1879                         tcp_v4_restore_cb(skb);
1880                 } else if (tcp_child_process(sk, nsk, skb)) {
1881                         tcp_v4_send_reset(nsk, skb);
1882                         goto discard_and_relse;
1883                 } else {
1884                         sock_put(sk);
1885                         return 0;
1886                 }
1887         }
1888         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1889                 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1890                 goto discard_and_relse;
1891         }
1892
1893         if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1894                 goto discard_and_relse;
1895
1896         if (tcp_v4_inbound_md5_hash(sk, skb))
1897                 goto discard_and_relse;
1898
1899         nf_reset(skb);
1900
1901         if (tcp_filter(sk, skb))
1902                 goto discard_and_relse;
1903         th = (const struct tcphdr *)skb->data;
1904         iph = ip_hdr(skb);
1905         tcp_v4_fill_cb(skb, iph, th);
1906
1907         skb->dev = NULL;
1908
1909         if (sk->sk_state == TCP_LISTEN) {
1910                 ret = tcp_v4_do_rcv(sk, skb);
1911                 goto put_and_return;
1912         }
1913
1914         sk_incoming_cpu_update(sk);
1915
1916         bh_lock_sock_nested(sk);
1917         tcp_segs_in(tcp_sk(sk), skb);
1918         ret = 0;
1919         if (!sock_owned_by_user(sk)) {
1920                 skb_to_free = sk->sk_rx_skb_cache;
1921                 sk->sk_rx_skb_cache = NULL;
1922                 ret = tcp_v4_do_rcv(sk, skb);
1923         } else {
1924                 if (tcp_add_backlog(sk, skb))
1925                         goto discard_and_relse;
1926                 skb_to_free = NULL;
1927         }
1928         bh_unlock_sock(sk);
1929         if (skb_to_free)
1930                 __kfree_skb(skb_to_free);
1931
1932 put_and_return:
1933         if (refcounted)
1934                 sock_put(sk);
1935
1936         return ret;
1937
1938 no_tcp_socket:
1939         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1940                 goto discard_it;
1941
1942         tcp_v4_fill_cb(skb, iph, th);
1943
1944         if (tcp_checksum_complete(skb)) {
1945 csum_error:
1946                 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1947 bad_packet:
1948                 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1949         } else {
1950                 tcp_v4_send_reset(NULL, skb);
1951         }
1952
1953 discard_it:
1954         /* Discard frame. */
1955         kfree_skb(skb);
1956         return 0;
1957
1958 discard_and_relse:
1959         sk_drops_add(sk, skb);
1960         if (refcounted)
1961                 sock_put(sk);
1962         goto discard_it;
1963
1964 do_time_wait:
1965         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1966                 inet_twsk_put(inet_twsk(sk));
1967                 goto discard_it;
1968         }
1969
1970         tcp_v4_fill_cb(skb, iph, th);
1971
1972         if (tcp_checksum_complete(skb)) {
1973                 inet_twsk_put(inet_twsk(sk));
1974                 goto csum_error;
1975         }
1976         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1977         case TCP_TW_SYN: {
1978                 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1979                                                         &tcp_hashinfo, skb,
1980                                                         __tcp_hdrlen(th),
1981                                                         iph->saddr, th->source,
1982                                                         iph->daddr, th->dest,
1983                                                         inet_iif(skb),
1984                                                         sdif);
1985                 if (sk2) {
1986                         inet_twsk_deschedule_put(inet_twsk(sk));
1987                         sk = sk2;
1988                         tcp_v4_restore_cb(skb);
1989                         refcounted = false;
1990                         goto process;
1991                 }
1992         }
1993                 /* to ACK */
1994                 /* fall through */
1995         case TCP_TW_ACK:
1996                 tcp_v4_timewait_ack(sk, skb);
1997                 break;
1998         case TCP_TW_RST:
1999                 tcp_v4_send_reset(sk, skb);
2000                 inet_twsk_deschedule_put(inet_twsk(sk));
2001                 goto discard_it;
2002         case TCP_TW_SUCCESS:;
2003         }
2004         goto discard_it;
2005 }
2006
2007 static struct timewait_sock_ops tcp_timewait_sock_ops = {
2008         .twsk_obj_size  = sizeof(struct tcp_timewait_sock),
2009         .twsk_unique    = tcp_twsk_unique,
2010         .twsk_destructor= tcp_twsk_destructor,
2011 };
2012
2013 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2014 {
2015         struct dst_entry *dst = skb_dst(skb);
2016
2017         if (dst && dst_hold_safe(dst)) {
2018                 sk->sk_rx_dst = dst;
2019                 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2020         }
2021 }
2022 EXPORT_SYMBOL(inet_sk_rx_dst_set);
2023
2024 const struct inet_connection_sock_af_ops ipv4_specific = {
2025         .queue_xmit        = ip_queue_xmit,
2026         .send_check        = tcp_v4_send_check,
2027         .rebuild_header    = inet_sk_rebuild_header,
2028         .sk_rx_dst_set     = inet_sk_rx_dst_set,
2029         .conn_request      = tcp_v4_conn_request,
2030         .syn_recv_sock     = tcp_v4_syn_recv_sock,
2031         .net_header_len    = sizeof(struct iphdr),
2032         .setsockopt        = ip_setsockopt,
2033         .getsockopt        = ip_getsockopt,
2034         .addr2sockaddr     = inet_csk_addr2sockaddr,
2035         .sockaddr_len      = sizeof(struct sockaddr_in),
2036 #ifdef CONFIG_COMPAT
2037         .compat_setsockopt = compat_ip_setsockopt,
2038         .compat_getsockopt = compat_ip_getsockopt,
2039 #endif
2040         .mtu_reduced       = tcp_v4_mtu_reduced,
2041 };
2042 EXPORT_SYMBOL(ipv4_specific);
2043
2044 #ifdef CONFIG_TCP_MD5SIG
2045 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2046         .md5_lookup             = tcp_v4_md5_lookup,
2047         .calc_md5_hash          = tcp_v4_md5_hash_skb,
2048         .md5_parse              = tcp_v4_parse_md5_keys,
2049 };
2050 #endif
2051
2052 /* NOTE: A lot of things set to zero explicitly by call to
2053  *       sk_alloc() so need not be done here.
2054  */
2055 static int tcp_v4_init_sock(struct sock *sk)
2056 {
2057         struct inet_connection_sock *icsk = inet_csk(sk);
2058
2059         tcp_init_sock(sk);
2060
2061         icsk->icsk_af_ops = &ipv4_specific;
2062
2063 #ifdef CONFIG_TCP_MD5SIG
2064         tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2065 #endif
2066
2067         return 0;
2068 }
2069
2070 void tcp_v4_destroy_sock(struct sock *sk)
2071 {
2072         struct tcp_sock *tp = tcp_sk(sk);
2073
2074         trace_tcp_destroy_sock(sk);
2075
2076         tcp_clear_xmit_timers(sk);
2077
2078         tcp_cleanup_congestion_control(sk);
2079
2080         tcp_cleanup_ulp(sk);
2081
2082         /* Cleanup up the write buffer. */
2083         tcp_write_queue_purge(sk);
2084
2085         /* Check if we want to disable active TFO */
2086         tcp_fastopen_active_disable_ofo_check(sk);
2087
2088         /* Cleans up our, hopefully empty, out_of_order_queue. */
2089         skb_rbtree_purge(&tp->out_of_order_queue);
2090
2091 #ifdef CONFIG_TCP_MD5SIG
2092         /* Clean up the MD5 key list, if any */
2093         if (tp->md5sig_info) {
2094                 tcp_clear_md5_list(sk);
2095                 kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
2096                 tp->md5sig_info = NULL;
2097         }
2098 #endif
2099
2100         /* Clean up a referenced TCP bind bucket. */
2101         if (inet_csk(sk)->icsk_bind_hash)
2102                 inet_put_port(sk);
2103
2104         BUG_ON(tp->fastopen_rsk);
2105
2106         /* If socket is aborted during connect operation */
2107         tcp_free_fastopen_req(tp);
2108         tcp_fastopen_destroy_cipher(sk);
2109         tcp_saved_syn_free(tp);
2110
2111         sk_sockets_allocated_dec(sk);
2112 }
2113 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2114
2115 #ifdef CONFIG_PROC_FS
2116 /* Proc filesystem TCP sock list dumping. */
2117
2118 /*
2119  * Get next listener socket follow cur.  If cur is NULL, get first socket
2120  * starting from bucket given in st->bucket; when st->bucket is zero the
2121  * very first socket in the hash table is returned.
2122  */
2123 static void *listening_get_next(struct seq_file *seq, void *cur)
2124 {
2125         struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
2126         struct tcp_iter_state *st = seq->private;
2127         struct net *net = seq_file_net(seq);
2128         struct inet_listen_hashbucket *ilb;
2129         struct sock *sk = cur;
2130
2131         if (!sk) {
2132 get_head:
2133                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2134                 spin_lock(&ilb->lock);
2135                 sk = sk_head(&ilb->head);
2136                 st->offset = 0;
2137                 goto get_sk;
2138         }
2139         ilb = &tcp_hashinfo.listening_hash[st->bucket];
2140         ++st->num;
2141         ++st->offset;
2142
2143         sk = sk_next(sk);
2144 get_sk:
2145         sk_for_each_from(sk) {
2146                 if (!net_eq(sock_net(sk), net))
2147                         continue;
2148                 if (sk->sk_family == afinfo->family)
2149                         return sk;
2150         }
2151         spin_unlock(&ilb->lock);
2152         st->offset = 0;
2153         if (++st->bucket < INET_LHTABLE_SIZE)
2154                 goto get_head;
2155         return NULL;
2156 }
2157
2158 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2159 {
2160         struct tcp_iter_state *st = seq->private;
2161         void *rc;
2162
2163         st->bucket = 0;
2164         st->offset = 0;
2165         rc = listening_get_next(seq, NULL);
2166
2167         while (rc && *pos) {
2168                 rc = listening_get_next(seq, rc);
2169                 --*pos;
2170         }
2171         return rc;
2172 }
2173
2174 static inline bool empty_bucket(const struct tcp_iter_state *st)
2175 {
2176         return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2177 }
2178
2179 /*
2180  * Get first established socket starting from bucket given in st->bucket.
2181  * If st->bucket is zero, the very first socket in the hash is returned.
2182  */
2183 static void *established_get_first(struct seq_file *seq)
2184 {
2185         struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
2186         struct tcp_iter_state *st = seq->private;
2187         struct net *net = seq_file_net(seq);
2188         void *rc = NULL;
2189
2190         st->offset = 0;
2191         for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2192                 struct sock *sk;
2193                 struct hlist_nulls_node *node;
2194                 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2195
2196                 /* Lockless fast path for the common case of empty buckets */
2197                 if (empty_bucket(st))
2198                         continue;
2199
2200                 spin_lock_bh(lock);
2201                 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2202                         if (sk->sk_family != afinfo->family ||
2203                             !net_eq(sock_net(sk), net)) {
2204                                 continue;
2205                         }
2206                         rc = sk;
2207                         goto out;
2208                 }
2209                 spin_unlock_bh(lock);
2210         }
2211 out:
2212         return rc;
2213 }
2214
2215 static void *established_get_next(struct seq_file *seq, void *cur)
2216 {
2217         struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
2218         struct sock *sk = cur;
2219         struct hlist_nulls_node *node;
2220         struct tcp_iter_state *st = seq->private;
2221         struct net *net = seq_file_net(seq);
2222
2223         ++st->num;
2224         ++st->offset;
2225
2226         sk = sk_nulls_next(sk);
2227
2228         sk_nulls_for_each_from(sk, node) {
2229                 if (sk->sk_family == afinfo->family &&
2230                     net_eq(sock_net(sk), net))
2231                         return sk;
2232         }
2233
2234         spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2235         ++st->bucket;
2236         return established_get_first(seq);
2237 }
2238
2239 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2240 {
2241         struct tcp_iter_state *st = seq->private;
2242         void *rc;
2243
2244         st->bucket = 0;
2245         rc = established_get_first(seq);
2246
2247         while (rc && pos) {
2248                 rc = established_get_next(seq, rc);
2249                 --pos;
2250         }
2251         return rc;
2252 }
2253
2254 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2255 {
2256         void *rc;
2257         struct tcp_iter_state *st = seq->private;
2258
2259         st->state = TCP_SEQ_STATE_LISTENING;
2260         rc        = listening_get_idx(seq, &pos);
2261
2262         if (!rc) {
2263                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2264                 rc        = established_get_idx(seq, pos);
2265         }
2266
2267         return rc;
2268 }
2269
2270 static void *tcp_seek_last_pos(struct seq_file *seq)
2271 {
2272         struct tcp_iter_state *st = seq->private;
2273         int offset = st->offset;
2274         int orig_num = st->num;
2275         void *rc = NULL;
2276
2277         switch (st->state) {
2278         case TCP_SEQ_STATE_LISTENING:
2279                 if (st->bucket >= INET_LHTABLE_SIZE)
2280                         break;
2281                 st->state = TCP_SEQ_STATE_LISTENING;
2282                 rc = listening_get_next(seq, NULL);
2283                 while (offset-- && rc)
2284                         rc = listening_get_next(seq, rc);
2285                 if (rc)
2286                         break;
2287                 st->bucket = 0;
2288                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2289                 /* Fallthrough */
2290         case TCP_SEQ_STATE_ESTABLISHED:
2291                 if (st->bucket > tcp_hashinfo.ehash_mask)
2292                         break;
2293                 rc = established_get_first(seq);
2294                 while (offset-- && rc)
2295                         rc = established_get_next(seq, rc);
2296         }
2297
2298         st->num = orig_num;
2299
2300         return rc;
2301 }
2302
2303 void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2304 {
2305         struct tcp_iter_state *st = seq->private;
2306         void *rc;
2307
2308         if (*pos && *pos == st->last_pos) {
2309                 rc = tcp_seek_last_pos(seq);
2310                 if (rc)
2311                         goto out;
2312         }
2313
2314         st->state = TCP_SEQ_STATE_LISTENING;
2315         st->num = 0;
2316         st->bucket = 0;
2317         st->offset = 0;
2318         rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2319
2320 out:
2321         st->last_pos = *pos;
2322         return rc;
2323 }
2324 EXPORT_SYMBOL(tcp_seq_start);
2325
2326 void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2327 {
2328         struct tcp_iter_state *st = seq->private;
2329         void *rc = NULL;
2330
2331         if (v == SEQ_START_TOKEN) {
2332                 rc = tcp_get_idx(seq, 0);
2333                 goto out;
2334         }
2335
2336         switch (st->state) {
2337         case TCP_SEQ_STATE_LISTENING:
2338                 rc = listening_get_next(seq, v);
2339                 if (!rc) {
2340                         st->state = TCP_SEQ_STATE_ESTABLISHED;
2341                         st->bucket = 0;
2342                         st->offset = 0;
2343                         rc        = established_get_first(seq);
2344                 }
2345                 break;
2346         case TCP_SEQ_STATE_ESTABLISHED:
2347                 rc = established_get_next(seq, v);
2348                 break;
2349         }
2350 out:
2351         ++*pos;
2352         st->last_pos = *pos;
2353         return rc;
2354 }
2355 EXPORT_SYMBOL(tcp_seq_next);
2356
2357 void tcp_seq_stop(struct seq_file *seq, void *v)
2358 {
2359         struct tcp_iter_state *st = seq->private;
2360
2361         switch (st->state) {
2362         case TCP_SEQ_STATE_LISTENING:
2363                 if (v != SEQ_START_TOKEN)
2364                         spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
2365                 break;
2366         case TCP_SEQ_STATE_ESTABLISHED:
2367                 if (v)
2368                         spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2369                 break;
2370         }
2371 }
2372 EXPORT_SYMBOL(tcp_seq_stop);
2373
2374 static void get_openreq4(const struct request_sock *req,
2375                          struct seq_file *f, int i)
2376 {
2377         const struct inet_request_sock *ireq = inet_rsk(req);
2378         long delta = req->rsk_timer.expires - jiffies;
2379
2380         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2381                 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2382                 i,
2383                 ireq->ir_loc_addr,
2384                 ireq->ir_num,
2385                 ireq->ir_rmt_addr,
2386                 ntohs(ireq->ir_rmt_port),
2387                 TCP_SYN_RECV,
2388                 0, 0, /* could print option size, but that is af dependent. */
2389                 1,    /* timers active (only the expire timer) */
2390                 jiffies_delta_to_clock_t(delta),
2391                 req->num_timeout,
2392                 from_kuid_munged(seq_user_ns(f),
2393                                  sock_i_uid(req->rsk_listener)),
2394                 0,  /* non standard timer */
2395                 0, /* open_requests have no inode */
2396                 0,
2397                 req);
2398 }
2399
2400 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2401 {
2402         int timer_active;
2403         unsigned long timer_expires;
2404         const struct tcp_sock *tp = tcp_sk(sk);
2405         const struct inet_connection_sock *icsk = inet_csk(sk);
2406         const struct inet_sock *inet = inet_sk(sk);
2407         const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2408         __be32 dest = inet->inet_daddr;
2409         __be32 src = inet->inet_rcv_saddr;
2410         __u16 destp = ntohs(inet->inet_dport);
2411         __u16 srcp = ntohs(inet->inet_sport);
2412         int rx_queue;
2413         int state;
2414
2415         if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2416             icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2417             icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2418                 timer_active    = 1;
2419                 timer_expires   = icsk->icsk_timeout;
2420         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2421                 timer_active    = 4;
2422                 timer_expires   = icsk->icsk_timeout;
2423         } else if (timer_pending(&sk->sk_timer)) {
2424                 timer_active    = 2;
2425                 timer_expires   = sk->sk_timer.expires;
2426         } else {
2427                 timer_active    = 0;
2428                 timer_expires = jiffies;
2429         }
2430
2431         state = inet_sk_state_load(sk);
2432         if (state == TCP_LISTEN)
2433                 rx_queue = sk->sk_ack_backlog;
2434         else
2435                 /* Because we don't lock the socket,
2436                  * we might find a transient negative value.
2437                  */
2438                 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2439
2440         seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2441                         "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2442                 i, src, srcp, dest, destp, state,
2443                 tp->write_seq - tp->snd_una,
2444                 rx_queue,
2445                 timer_active,
2446                 jiffies_delta_to_clock_t(timer_expires - jiffies),
2447                 icsk->icsk_retransmits,
2448                 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2449                 icsk->icsk_probes_out,
2450                 sock_i_ino(sk),
2451                 refcount_read(&sk->sk_refcnt), sk,
2452                 jiffies_to_clock_t(icsk->icsk_rto),
2453                 jiffies_to_clock_t(icsk->icsk_ack.ato),
2454                 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
2455                 tp->snd_cwnd,
2456                 state == TCP_LISTEN ?
2457                     fastopenq->max_qlen :
2458                     (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2459 }
2460
2461 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2462                                struct seq_file *f, int i)
2463 {
2464         long delta = tw->tw_timer.expires - jiffies;
2465         __be32 dest, src;
2466         __u16 destp, srcp;
2467
2468         dest  = tw->tw_daddr;
2469         src   = tw->tw_rcv_saddr;
2470         destp = ntohs(tw->tw_dport);
2471         srcp  = ntohs(tw->tw_sport);
2472
2473         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2474                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2475                 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2476                 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2477                 refcount_read(&tw->tw_refcnt), tw);
2478 }
2479
2480 #define TMPSZ 150
2481
2482 static int tcp4_seq_show(struct seq_file *seq, void *v)
2483 {
2484         struct tcp_iter_state *st;
2485         struct sock *sk = v;
2486
2487         seq_setwidth(seq, TMPSZ - 1);
2488         if (v == SEQ_START_TOKEN) {
2489                 seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2490                            "rx_queue tr tm->when retrnsmt   uid  timeout "
2491                            "inode");
2492                 goto out;
2493         }
2494         st = seq->private;
2495
2496         if (sk->sk_state == TCP_TIME_WAIT)
2497                 get_timewait4_sock(v, seq, st->num);
2498         else if (sk->sk_state == TCP_NEW_SYN_RECV)
2499                 get_openreq4(v, seq, st->num);
2500         else
2501                 get_tcp4_sock(v, seq, st->num);
2502 out:
2503         seq_pad(seq, '\n');
2504         return 0;
2505 }
2506
2507 static const struct seq_operations tcp4_seq_ops = {
2508         .show           = tcp4_seq_show,
2509         .start          = tcp_seq_start,
2510         .next           = tcp_seq_next,
2511         .stop           = tcp_seq_stop,
2512 };
2513
2514 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2515         .family         = AF_INET,
2516 };
2517
2518 static int __net_init tcp4_proc_init_net(struct net *net)
2519 {
2520         if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
2521                         sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
2522                 return -ENOMEM;
2523         return 0;
2524 }
2525
2526 static void __net_exit tcp4_proc_exit_net(struct net *net)
2527 {
2528         remove_proc_entry("tcp", net->proc_net);
2529 }
2530
2531 static struct pernet_operations tcp4_net_ops = {
2532         .init = tcp4_proc_init_net,
2533         .exit = tcp4_proc_exit_net,
2534 };
2535
2536 int __init tcp4_proc_init(void)
2537 {
2538         return register_pernet_subsys(&tcp4_net_ops);
2539 }
2540
2541 void tcp4_proc_exit(void)
2542 {
2543         unregister_pernet_subsys(&tcp4_net_ops);
2544 }
2545 #endif /* CONFIG_PROC_FS */
2546
2547 struct proto tcp_prot = {
2548         .name                   = "TCP",
2549         .owner                  = THIS_MODULE,
2550         .close                  = tcp_close,
2551         .pre_connect            = tcp_v4_pre_connect,
2552         .connect                = tcp_v4_connect,
2553         .disconnect             = tcp_disconnect,
2554         .accept                 = inet_csk_accept,
2555         .ioctl                  = tcp_ioctl,
2556         .init                   = tcp_v4_init_sock,
2557         .destroy                = tcp_v4_destroy_sock,
2558         .shutdown               = tcp_shutdown,
2559         .setsockopt             = tcp_setsockopt,
2560         .getsockopt             = tcp_getsockopt,
2561         .keepalive              = tcp_set_keepalive,
2562         .recvmsg                = tcp_recvmsg,
2563         .sendmsg                = tcp_sendmsg,
2564         .sendpage               = tcp_sendpage,
2565         .backlog_rcv            = tcp_v4_do_rcv,
2566         .release_cb             = tcp_release_cb,
2567         .hash                   = inet_hash,
2568         .unhash                 = inet_unhash,
2569         .get_port               = inet_csk_get_port,
2570         .enter_memory_pressure  = tcp_enter_memory_pressure,
2571         .leave_memory_pressure  = tcp_leave_memory_pressure,
2572         .stream_memory_free     = tcp_stream_memory_free,
2573         .sockets_allocated      = &tcp_sockets_allocated,
2574         .orphan_count           = &tcp_orphan_count,
2575         .memory_allocated       = &tcp_memory_allocated,
2576         .memory_pressure        = &tcp_memory_pressure,
2577         .sysctl_mem             = sysctl_tcp_mem,
2578         .sysctl_wmem_offset     = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2579         .sysctl_rmem_offset     = offsetof(struct net, ipv4.sysctl_tcp_rmem),
2580         .max_header             = MAX_TCP_HEADER,
2581         .obj_size               = sizeof(struct tcp_sock),
2582         .slab_flags             = SLAB_TYPESAFE_BY_RCU,
2583         .twsk_prot              = &tcp_timewait_sock_ops,
2584         .rsk_prot               = &tcp_request_sock_ops,
2585         .h.hashinfo             = &tcp_hashinfo,
2586         .no_autobind            = true,
2587 #ifdef CONFIG_COMPAT
2588         .compat_setsockopt      = compat_tcp_setsockopt,
2589         .compat_getsockopt      = compat_tcp_getsockopt,
2590 #endif
2591         .diag_destroy           = tcp_abort,
2592 };
2593 EXPORT_SYMBOL(tcp_prot);
2594
2595 static void __net_exit tcp_sk_exit(struct net *net)
2596 {
2597         int cpu;
2598
2599         if (net->ipv4.tcp_congestion_control)
2600                 module_put(net->ipv4.tcp_congestion_control->owner);
2601
2602         for_each_possible_cpu(cpu)
2603                 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2604         free_percpu(net->ipv4.tcp_sk);
2605 }
2606
2607 static int __net_init tcp_sk_init(struct net *net)
2608 {
2609         int res, cpu, cnt;
2610
2611         net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2612         if (!net->ipv4.tcp_sk)
2613                 return -ENOMEM;
2614
2615         for_each_possible_cpu(cpu) {
2616                 struct sock *sk;
2617
2618                 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2619                                            IPPROTO_TCP, net);
2620                 if (res)
2621                         goto fail;
2622                 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2623
2624                 /* Please enforce IP_DF and IPID==0 for RST and
2625                  * ACK sent in SYN-RECV and TIME-WAIT state.
2626                  */
2627                 inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
2628
2629                 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2630         }
2631
2632         net->ipv4.sysctl_tcp_ecn = 2;
2633         net->ipv4.sysctl_tcp_ecn_fallback = 1;
2634
2635         net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2636         net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2637         net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2638
2639         net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2640         net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2641         net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2642
2643         net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2644         net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2645         net->ipv4.sysctl_tcp_syncookies = 1;
2646         net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2647         net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2648         net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2649         net->ipv4.sysctl_tcp_orphan_retries = 0;
2650         net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2651         net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2652         net->ipv4.sysctl_tcp_tw_reuse = 2;
2653
2654         cnt = tcp_hashinfo.ehash_mask + 1;
2655         net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
2656         net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2657
2658         net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
2659         net->ipv4.sysctl_tcp_sack = 1;
2660         net->ipv4.sysctl_tcp_window_scaling = 1;
2661         net->ipv4.sysctl_tcp_timestamps = 1;
2662         net->ipv4.sysctl_tcp_early_retrans = 3;
2663         net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
2664         net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior.  */
2665         net->ipv4.sysctl_tcp_retrans_collapse = 1;
2666         net->ipv4.sysctl_tcp_max_reordering = 300;
2667         net->ipv4.sysctl_tcp_dsack = 1;
2668         net->ipv4.sysctl_tcp_app_win = 31;
2669         net->ipv4.sysctl_tcp_adv_win_scale = 1;
2670         net->ipv4.sysctl_tcp_frto = 2;
2671         net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
2672         /* This limits the percentage of the congestion window which we
2673          * will allow a single TSO frame to consume.  Building TSO frames
2674          * which are too large can cause TCP streams to be bursty.
2675          */
2676         net->ipv4.sysctl_tcp_tso_win_divisor = 3;
2677         /* Default TSQ limit of 16 TSO segments */
2678         net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
2679         /* rfc5961 challenge ack rate limiting */
2680         net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
2681         net->ipv4.sysctl_tcp_min_tso_segs = 2;
2682         net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
2683         net->ipv4.sysctl_tcp_autocorking = 1;
2684         net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
2685         net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
2686         net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
2687         if (net != &init_net) {
2688                 memcpy(net->ipv4.sysctl_tcp_rmem,
2689                        init_net.ipv4.sysctl_tcp_rmem,
2690                        sizeof(init_net.ipv4.sysctl_tcp_rmem));
2691                 memcpy(net->ipv4.sysctl_tcp_wmem,
2692                        init_net.ipv4.sysctl_tcp_wmem,
2693                        sizeof(init_net.ipv4.sysctl_tcp_wmem));
2694         }
2695         net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
2696         net->ipv4.sysctl_tcp_comp_sack_nr = 44;
2697         net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
2698         spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
2699         net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
2700         atomic_set(&net->ipv4.tfo_active_disable_times, 0);
2701
2702         /* Reno is always built in */
2703         if (!net_eq(net, &init_net) &&
2704             try_module_get(init_net.ipv4.tcp_congestion_control->owner))
2705                 net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
2706         else
2707                 net->ipv4.tcp_congestion_control = &tcp_reno;
2708
2709         return 0;
2710 fail:
2711         tcp_sk_exit(net);
2712
2713         return res;
2714 }
2715
2716 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2717 {
2718         struct net *net;
2719
2720         inet_twsk_purge(&tcp_hashinfo, AF_INET);
2721
2722         list_for_each_entry(net, net_exit_list, exit_list)
2723                 tcp_fastopen_ctx_destroy(net);
2724 }
2725
2726 static struct pernet_operations __net_initdata tcp_sk_ops = {
2727        .init       = tcp_sk_init,
2728        .exit       = tcp_sk_exit,
2729        .exit_batch = tcp_sk_exit_batch,
2730 };
2731
2732 void __init tcp_v4_init(void)
2733 {
2734         if (register_pernet_subsys(&tcp_sk_ops))
2735                 panic("Failed to create the TCP control socket.\n");
2736 }