3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
72 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
74 struct request_sock *req);
76 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78 static const struct inet_connection_sock_af_ops ipv6_mapped;
79 static const struct inet_connection_sock_af_ops ipv6_specific;
80 #ifdef CONFIG_TCP_MD5SIG
81 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
84 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
85 const struct in6_addr *addr)
91 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93 struct dst_entry *dst = skb_dst(skb);
95 if (dst && dst_hold_safe(dst)) {
96 const struct rt6_info *rt = (const struct rt6_info *)dst;
99 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
100 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
104 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
106 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32,
109 tcp_hdr(skb)->source);
112 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
115 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
116 struct inet_sock *inet = inet_sk(sk);
117 struct inet_connection_sock *icsk = inet_csk(sk);
118 struct ipv6_pinfo *np = inet6_sk(sk);
119 struct tcp_sock *tp = tcp_sk(sk);
120 struct in6_addr *saddr = NULL, *final_p, final;
121 struct ipv6_txoptions *opt;
123 struct dst_entry *dst;
127 if (addr_len < SIN6_LEN_RFC2133)
130 if (usin->sin6_family != AF_INET6)
131 return -EAFNOSUPPORT;
133 memset(&fl6, 0, sizeof(fl6));
136 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
137 IP6_ECN_flow_init(fl6.flowlabel);
138 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
139 struct ip6_flowlabel *flowlabel;
140 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
143 fl6_sock_release(flowlabel);
148 * connect() to INADDR_ANY means loopback (BSD'ism).
151 if (ipv6_addr_any(&usin->sin6_addr))
152 usin->sin6_addr.s6_addr[15] = 0x1;
154 addr_type = ipv6_addr_type(&usin->sin6_addr);
156 if (addr_type & IPV6_ADDR_MULTICAST)
159 if (addr_type&IPV6_ADDR_LINKLOCAL) {
160 if (addr_len >= sizeof(struct sockaddr_in6) &&
161 usin->sin6_scope_id) {
162 /* If interface is set while binding, indices
165 if (sk->sk_bound_dev_if &&
166 sk->sk_bound_dev_if != usin->sin6_scope_id)
169 sk->sk_bound_dev_if = usin->sin6_scope_id;
172 /* Connect to link-local address requires an interface */
173 if (!sk->sk_bound_dev_if)
177 if (tp->rx_opt.ts_recent_stamp &&
178 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
179 tp->rx_opt.ts_recent = 0;
180 tp->rx_opt.ts_recent_stamp = 0;
184 sk->sk_v6_daddr = usin->sin6_addr;
185 np->flow_label = fl6.flowlabel;
191 if (addr_type == IPV6_ADDR_MAPPED) {
192 u32 exthdrlen = icsk->icsk_ext_hdr_len;
193 struct sockaddr_in sin;
195 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
197 if (__ipv6_only_sock(sk))
200 sin.sin_family = AF_INET;
201 sin.sin_port = usin->sin6_port;
202 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
204 icsk->icsk_af_ops = &ipv6_mapped;
205 sk->sk_backlog_rcv = tcp_v4_do_rcv;
206 #ifdef CONFIG_TCP_MD5SIG
207 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
210 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
213 icsk->icsk_ext_hdr_len = exthdrlen;
214 icsk->icsk_af_ops = &ipv6_specific;
215 sk->sk_backlog_rcv = tcp_v6_do_rcv;
216 #ifdef CONFIG_TCP_MD5SIG
217 tp->af_specific = &tcp_sock_ipv6_specific;
221 np->saddr = sk->sk_v6_rcv_saddr;
226 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
227 saddr = &sk->sk_v6_rcv_saddr;
229 fl6.flowi6_proto = IPPROTO_TCP;
230 fl6.daddr = sk->sk_v6_daddr;
231 fl6.saddr = saddr ? *saddr : np->saddr;
232 fl6.flowi6_oif = sk->sk_bound_dev_if;
233 fl6.flowi6_mark = sk->sk_mark;
234 fl6.fl6_dport = usin->sin6_port;
235 fl6.fl6_sport = inet->inet_sport;
237 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
238 final_p = fl6_update_dst(&fl6, opt, &final);
240 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
242 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
250 sk->sk_v6_rcv_saddr = *saddr;
253 /* set the source address */
255 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
257 sk->sk_gso_type = SKB_GSO_TCPV6;
258 ip6_dst_store(sk, dst, NULL, NULL);
260 if (tcp_death_row.sysctl_tw_recycle &&
261 !tp->rx_opt.ts_recent_stamp &&
262 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
263 tcp_fetch_timewait_stamp(sk, dst);
265 icsk->icsk_ext_hdr_len = 0;
267 icsk->icsk_ext_hdr_len = opt->opt_flen +
270 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
272 inet->inet_dport = usin->sin6_port;
274 tcp_set_state(sk, TCP_SYN_SENT);
275 err = inet6_hash_connect(&tcp_death_row, sk);
281 if (!tp->write_seq && likely(!tp->repair))
282 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
283 sk->sk_v6_daddr.s6_addr32,
287 err = tcp_connect(sk);
294 tcp_set_state(sk, TCP_CLOSE);
297 inet->inet_dport = 0;
298 sk->sk_route_caps = 0;
302 static void tcp_v6_mtu_reduced(struct sock *sk)
304 struct dst_entry *dst;
306 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
309 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
313 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
314 tcp_sync_mss(sk, dst_mtu(dst));
315 tcp_simple_retransmit(sk);
319 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
320 u8 type, u8 code, int offset, __be32 info)
322 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
323 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
324 struct net *net = dev_net(skb->dev);
325 struct request_sock *fastopen;
326 struct ipv6_pinfo *np;
333 sk = __inet6_lookup_established(net, &tcp_hashinfo,
334 &hdr->daddr, th->dest,
335 &hdr->saddr, ntohs(th->source),
339 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
344 if (sk->sk_state == TCP_TIME_WAIT) {
345 inet_twsk_put(inet_twsk(sk));
348 seq = ntohl(th->seq);
349 fatal = icmpv6_err_convert(type, code, &err);
350 if (sk->sk_state == TCP_NEW_SYN_RECV)
351 return tcp_req_err(sk, seq, fatal);
354 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
355 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
357 if (sk->sk_state == TCP_CLOSE)
360 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
361 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
366 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
367 fastopen = tp->fastopen_rsk;
368 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
369 if (sk->sk_state != TCP_LISTEN &&
370 !between(seq, snd_una, tp->snd_nxt)) {
371 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
377 if (type == NDISC_REDIRECT) {
378 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
381 dst->ops->redirect(dst, sk, skb);
385 if (type == ICMPV6_PKT_TOOBIG) {
386 /* We are not interested in TCP_LISTEN and open_requests
387 * (SYN-ACKs send out by Linux are always <576bytes so
388 * they should go through unfragmented).
390 if (sk->sk_state == TCP_LISTEN)
393 if (!ip6_sk_accept_pmtu(sk))
396 tp->mtu_info = ntohl(info);
397 if (!sock_owned_by_user(sk))
398 tcp_v6_mtu_reduced(sk);
399 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
406 /* Might be for an request_sock */
407 switch (sk->sk_state) {
410 /* Only in fast or simultaneous open. If a fast open socket is
411 * is already accepted it is treated as a connected one below.
413 if (fastopen && !fastopen->sk)
416 if (!sock_owned_by_user(sk)) {
418 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
422 sk->sk_err_soft = err;
426 if (!sock_owned_by_user(sk) && np->recverr) {
428 sk->sk_error_report(sk);
430 sk->sk_err_soft = err;
438 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
440 struct request_sock *req,
441 struct tcp_fastopen_cookie *foc,
442 enum tcp_synack_type synack_type)
444 struct inet_request_sock *ireq = inet_rsk(req);
445 struct ipv6_pinfo *np = inet6_sk(sk);
446 struct ipv6_txoptions *opt;
447 struct flowi6 *fl6 = &fl->u.ip6;
451 /* First, grab a route. */
452 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
453 IPPROTO_TCP)) == NULL)
456 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
459 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
460 &ireq->ir_v6_rmt_addr);
462 fl6->daddr = ireq->ir_v6_rmt_addr;
463 if (np->repflow && ireq->pktopts)
464 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
467 opt = ireq->ipv6_opt;
469 opt = rcu_dereference(np->opt);
470 err = ip6_xmit(sk, skb, fl6, opt, np->tclass);
472 err = net_xmit_eval(err);
480 static void tcp_v6_reqsk_destructor(struct request_sock *req)
482 kfree(inet_rsk(req)->ipv6_opt);
483 kfree_skb(inet_rsk(req)->pktopts);
486 #ifdef CONFIG_TCP_MD5SIG
487 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
488 const struct in6_addr *addr)
490 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
493 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
494 const struct sock *addr_sk)
496 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
499 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
502 struct tcp_md5sig cmd;
503 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
505 if (optlen < sizeof(cmd))
508 if (copy_from_user(&cmd, optval, sizeof(cmd)))
511 if (sin6->sin6_family != AF_INET6)
514 if (!cmd.tcpm_keylen) {
515 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
516 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
518 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
522 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
525 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
526 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
527 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
529 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
530 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
533 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
534 const struct in6_addr *daddr,
535 const struct in6_addr *saddr, int nbytes)
537 struct tcp6_pseudohdr *bp;
538 struct scatterlist sg;
540 bp = &hp->md5_blk.ip6;
541 /* 1. TCP pseudo-header (RFC2460) */
544 bp->protocol = cpu_to_be32(IPPROTO_TCP);
545 bp->len = cpu_to_be32(nbytes);
547 sg_init_one(&sg, bp, sizeof(*bp));
548 ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp));
549 return crypto_ahash_update(hp->md5_req);
552 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
553 const struct in6_addr *daddr, struct in6_addr *saddr,
554 const struct tcphdr *th)
556 struct tcp_md5sig_pool *hp;
557 struct ahash_request *req;
559 hp = tcp_get_md5sig_pool();
561 goto clear_hash_noput;
564 if (crypto_ahash_init(req))
566 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
568 if (tcp_md5_hash_header(hp, th))
570 if (tcp_md5_hash_key(hp, key))
572 ahash_request_set_crypt(req, NULL, md5_hash, 0);
573 if (crypto_ahash_final(req))
576 tcp_put_md5sig_pool();
580 tcp_put_md5sig_pool();
582 memset(md5_hash, 0, 16);
586 static int tcp_v6_md5_hash_skb(char *md5_hash,
587 const struct tcp_md5sig_key *key,
588 const struct sock *sk,
589 const struct sk_buff *skb)
591 const struct in6_addr *saddr, *daddr;
592 struct tcp_md5sig_pool *hp;
593 struct ahash_request *req;
594 const struct tcphdr *th = tcp_hdr(skb);
596 if (sk) { /* valid for establish/request sockets */
597 saddr = &sk->sk_v6_rcv_saddr;
598 daddr = &sk->sk_v6_daddr;
600 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
601 saddr = &ip6h->saddr;
602 daddr = &ip6h->daddr;
605 hp = tcp_get_md5sig_pool();
607 goto clear_hash_noput;
610 if (crypto_ahash_init(req))
613 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
615 if (tcp_md5_hash_header(hp, th))
617 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
619 if (tcp_md5_hash_key(hp, key))
621 ahash_request_set_crypt(req, NULL, md5_hash, 0);
622 if (crypto_ahash_final(req))
625 tcp_put_md5sig_pool();
629 tcp_put_md5sig_pool();
631 memset(md5_hash, 0, 16);
637 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
638 const struct sk_buff *skb)
640 #ifdef CONFIG_TCP_MD5SIG
641 const __u8 *hash_location = NULL;
642 struct tcp_md5sig_key *hash_expected;
643 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
644 const struct tcphdr *th = tcp_hdr(skb);
648 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
649 hash_location = tcp_parse_md5sig_option(th);
651 /* We've parsed the options - do we have a hash? */
652 if (!hash_expected && !hash_location)
655 if (hash_expected && !hash_location) {
656 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
660 if (!hash_expected && hash_location) {
661 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
665 /* check the signature */
666 genhash = tcp_v6_md5_hash_skb(newhash,
670 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
671 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
672 genhash ? "failed" : "mismatch",
673 &ip6h->saddr, ntohs(th->source),
674 &ip6h->daddr, ntohs(th->dest));
681 static void tcp_v6_init_req(struct request_sock *req,
682 const struct sock *sk_listener,
685 struct inet_request_sock *ireq = inet_rsk(req);
686 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
688 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
689 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
691 /* So that link locals have meaning */
692 if (!sk_listener->sk_bound_dev_if &&
693 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
694 ireq->ir_iif = tcp_v6_iif(skb);
696 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
697 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
698 np->rxopt.bits.rxinfo ||
699 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
700 np->rxopt.bits.rxohlim || np->repflow)) {
701 atomic_inc(&skb->users);
706 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
708 const struct request_sock *req,
713 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
716 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
718 .obj_size = sizeof(struct tcp6_request_sock),
719 .rtx_syn_ack = tcp_rtx_synack,
720 .send_ack = tcp_v6_reqsk_send_ack,
721 .destructor = tcp_v6_reqsk_destructor,
722 .send_reset = tcp_v6_send_reset,
723 .syn_ack_timeout = tcp_syn_ack_timeout,
726 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
727 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
728 sizeof(struct ipv6hdr),
729 #ifdef CONFIG_TCP_MD5SIG
730 .req_md5_lookup = tcp_v6_md5_lookup,
731 .calc_md5_hash = tcp_v6_md5_hash_skb,
733 .init_req = tcp_v6_init_req,
734 #ifdef CONFIG_SYN_COOKIES
735 .cookie_init_seq = cookie_v6_init_sequence,
737 .route_req = tcp_v6_route_req,
738 .init_seq = tcp_v6_init_sequence,
739 .send_synack = tcp_v6_send_synack,
742 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
743 u32 ack, u32 win, u32 tsval, u32 tsecr,
744 int oif, struct tcp_md5sig_key *key, int rst,
745 u8 tclass, u32 label)
747 const struct tcphdr *th = tcp_hdr(skb);
749 struct sk_buff *buff;
751 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
752 struct sock *ctl_sk = net->ipv6.tcp_sk;
753 unsigned int tot_len = sizeof(struct tcphdr);
754 struct dst_entry *dst;
758 tot_len += TCPOLEN_TSTAMP_ALIGNED;
759 #ifdef CONFIG_TCP_MD5SIG
761 tot_len += TCPOLEN_MD5SIG_ALIGNED;
764 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
769 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
771 t1 = (struct tcphdr *) skb_push(buff, tot_len);
772 skb_reset_transport_header(buff);
774 /* Swap the send and the receive. */
775 memset(t1, 0, sizeof(*t1));
776 t1->dest = th->source;
777 t1->source = th->dest;
778 t1->doff = tot_len / 4;
779 t1->seq = htonl(seq);
780 t1->ack_seq = htonl(ack);
781 t1->ack = !rst || !th->ack;
783 t1->window = htons(win);
785 topt = (__be32 *)(t1 + 1);
788 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
789 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
790 *topt++ = htonl(tsval);
791 *topt++ = htonl(tsecr);
794 #ifdef CONFIG_TCP_MD5SIG
796 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
797 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
798 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
799 &ipv6_hdr(skb)->saddr,
800 &ipv6_hdr(skb)->daddr, t1);
804 memset(&fl6, 0, sizeof(fl6));
805 fl6.daddr = ipv6_hdr(skb)->saddr;
806 fl6.saddr = ipv6_hdr(skb)->daddr;
807 fl6.flowlabel = label;
809 buff->ip_summed = CHECKSUM_PARTIAL;
812 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
814 fl6.flowi6_proto = IPPROTO_TCP;
815 if (rt6_need_strict(&fl6.daddr) && !oif)
816 fl6.flowi6_oif = tcp_v6_iif(skb);
818 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
821 fl6.flowi6_oif = oif;
824 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
825 fl6.fl6_dport = t1->dest;
826 fl6.fl6_sport = t1->source;
827 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
829 /* Pass a socket to ip6_dst_lookup either it is for RST
830 * Underlying function will use this to retrieve the network
833 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
835 skb_dst_set(buff, dst);
836 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
837 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
839 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
846 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
848 const struct tcphdr *th = tcp_hdr(skb);
849 u32 seq = 0, ack_seq = 0;
850 struct tcp_md5sig_key *key = NULL;
851 #ifdef CONFIG_TCP_MD5SIG
852 const __u8 *hash_location = NULL;
853 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
854 unsigned char newhash[16];
856 struct sock *sk1 = NULL;
863 /* If sk not NULL, it means we did a successful lookup and incoming
864 * route had to be correct. prequeue might have dropped our dst.
866 if (!sk && !ipv6_unicast_destination(skb))
869 #ifdef CONFIG_TCP_MD5SIG
871 hash_location = tcp_parse_md5sig_option(th);
872 if (sk && sk_fullsock(sk)) {
873 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
874 } else if (hash_location) {
876 * active side is lost. Try to find listening socket through
877 * source port, and then find md5 key through listening socket.
878 * we are not loose security here:
879 * Incoming packet is checked with md5 hash with finding key,
880 * no RST generated if md5 hash doesn't match.
882 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
883 &tcp_hashinfo, NULL, 0,
885 th->source, &ipv6h->daddr,
886 ntohs(th->source), tcp_v6_iif(skb));
890 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
894 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
895 if (genhash || memcmp(hash_location, newhash, 16) != 0)
901 seq = ntohl(th->ack_seq);
903 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
906 oif = sk ? sk->sk_bound_dev_if : 0;
907 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
909 #ifdef CONFIG_TCP_MD5SIG
915 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
916 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
917 struct tcp_md5sig_key *key, u8 tclass,
920 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
924 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
926 struct inet_timewait_sock *tw = inet_twsk(sk);
927 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
929 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
930 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
931 tcp_time_stamp + tcptw->tw_ts_offset,
932 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
933 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
938 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
939 struct request_sock *req)
941 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
942 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
944 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
945 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
946 tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
947 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
948 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
953 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
955 #ifdef CONFIG_SYN_COOKIES
956 const struct tcphdr *th = tcp_hdr(skb);
959 sk = cookie_v6_check(sk, skb);
964 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
966 if (skb->protocol == htons(ETH_P_IP))
967 return tcp_v4_conn_request(sk, skb);
969 if (!ipv6_unicast_destination(skb))
972 return tcp_conn_request(&tcp6_request_sock_ops,
973 &tcp_request_sock_ipv6_ops, sk, skb);
977 return 0; /* don't send reset */
980 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
981 struct request_sock *req,
982 struct dst_entry *dst,
983 struct request_sock *req_unhash,
986 struct inet_request_sock *ireq;
987 struct ipv6_pinfo *newnp;
988 const struct ipv6_pinfo *np = inet6_sk(sk);
989 struct ipv6_txoptions *opt;
990 struct tcp6_sock *newtcp6sk;
991 struct inet_sock *newinet;
992 struct tcp_sock *newtp;
994 #ifdef CONFIG_TCP_MD5SIG
995 struct tcp_md5sig_key *key;
999 if (skb->protocol == htons(ETH_P_IP)) {
1004 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1005 req_unhash, own_req);
1010 newtcp6sk = (struct tcp6_sock *)newsk;
1011 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1013 newinet = inet_sk(newsk);
1014 newnp = inet6_sk(newsk);
1015 newtp = tcp_sk(newsk);
1017 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1019 newnp->saddr = newsk->sk_v6_rcv_saddr;
1021 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1022 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1023 #ifdef CONFIG_TCP_MD5SIG
1024 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1027 newnp->ipv6_ac_list = NULL;
1028 newnp->ipv6_fl_list = NULL;
1029 newnp->pktoptions = NULL;
1031 newnp->mcast_oif = tcp_v6_iif(skb);
1032 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1033 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1035 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1038 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1039 * here, tcp_create_openreq_child now does this for us, see the comment in
1040 * that function for the gory details. -acme
1043 /* It is tricky place. Until this moment IPv4 tcp
1044 worked with IPv6 icsk.icsk_af_ops.
1047 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1052 ireq = inet_rsk(req);
1054 if (sk_acceptq_is_full(sk))
1058 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1063 newsk = tcp_create_openreq_child(sk, req, skb);
1068 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1069 * count here, tcp_create_openreq_child now does this for us, see the
1070 * comment in that function for the gory details. -acme
1073 newsk->sk_gso_type = SKB_GSO_TCPV6;
1074 ip6_dst_store(newsk, dst, NULL, NULL);
1075 inet6_sk_rx_dst_set(newsk, skb);
1077 newtcp6sk = (struct tcp6_sock *)newsk;
1078 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1080 newtp = tcp_sk(newsk);
1081 newinet = inet_sk(newsk);
1082 newnp = inet6_sk(newsk);
1084 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1086 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1087 newnp->saddr = ireq->ir_v6_loc_addr;
1088 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1089 newsk->sk_bound_dev_if = ireq->ir_iif;
1091 /* Now IPv6 options...
1093 First: no IPv4 options.
1095 newinet->inet_opt = NULL;
1096 newnp->ipv6_ac_list = NULL;
1097 newnp->ipv6_fl_list = NULL;
1100 newnp->rxopt.all = np->rxopt.all;
1102 newnp->pktoptions = NULL;
1104 newnp->mcast_oif = tcp_v6_iif(skb);
1105 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1106 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1108 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1110 /* Clone native IPv6 options from listening socket (if any)
1112 Yes, keeping reference count would be much more clever,
1113 but we make one more one thing there: reattach optmem
1116 opt = ireq->ipv6_opt;
1118 opt = rcu_dereference(np->opt);
1120 opt = ipv6_dup_options(newsk, opt);
1121 RCU_INIT_POINTER(newnp->opt, opt);
1123 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1125 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1128 tcp_ca_openreq_child(newsk, dst);
1130 tcp_sync_mss(newsk, dst_mtu(dst));
1131 newtp->advmss = dst_metric_advmss(dst);
1132 if (tcp_sk(sk)->rx_opt.user_mss &&
1133 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1134 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1136 tcp_initialize_rcv_mss(newsk);
1138 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1139 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1141 #ifdef CONFIG_TCP_MD5SIG
1142 /* Copy over the MD5 key from the original socket */
1143 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1145 /* We're using one, so create a matching key
1146 * on the newsk structure. If we fail to get
1147 * memory, then we end up not copying the key
1150 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1151 AF_INET6, key->key, key->keylen,
1152 sk_gfp_mask(sk, GFP_ATOMIC));
1156 if (__inet_inherit_port(sk, newsk) < 0) {
1157 inet_csk_prepare_forced_close(newsk);
1161 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1163 tcp_move_syn(newtp, req);
1165 /* Clone pktoptions received with SYN, if we own the req */
1166 if (ireq->pktopts) {
1167 newnp->pktoptions = skb_clone(ireq->pktopts,
1168 sk_gfp_mask(sk, GFP_ATOMIC));
1169 consume_skb(ireq->pktopts);
1170 ireq->pktopts = NULL;
1171 if (newnp->pktoptions)
1172 skb_set_owner_r(newnp->pktoptions, newsk);
1179 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1187 /* The socket must have it's spinlock held when we get
1188 * here, unless it is a TCP_LISTEN socket.
1190 * We have a potential double-lock case here, so even when
1191 * doing backlog processing we use the BH locking scheme.
1192 * This is because we cannot sleep with the original spinlock
1195 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1197 struct ipv6_pinfo *np = inet6_sk(sk);
1198 struct tcp_sock *tp;
1199 struct sk_buff *opt_skb = NULL;
1201 /* Imagine: socket is IPv6. IPv4 packet arrives,
1202 goes to IPv4 receive handler and backlogged.
1203 From backlog it always goes here. Kerboom...
1204 Fortunately, tcp_rcv_established and rcv_established
1205 handle them correctly, but it is not case with
1206 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1209 if (skb->protocol == htons(ETH_P_IP))
1210 return tcp_v4_do_rcv(sk, skb);
1212 if (sk_filter(sk, skb))
1216 * socket locking is here for SMP purposes as backlog rcv
1217 * is currently called with bh processing disabled.
1220 /* Do Stevens' IPV6_PKTOPTIONS.
1222 Yes, guys, it is the only place in our code, where we
1223 may make it not affecting IPv4.
1224 The rest of code is protocol independent,
1225 and I do not like idea to uglify IPv4.
1227 Actually, all the idea behind IPV6_PKTOPTIONS
1228 looks not very well thought. For now we latch
1229 options, received in the last packet, enqueued
1230 by tcp. Feel free to propose better solution.
1234 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1236 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1237 struct dst_entry *dst = sk->sk_rx_dst;
1239 sock_rps_save_rxhash(sk, skb);
1240 sk_mark_napi_id(sk, skb);
1242 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1243 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1245 sk->sk_rx_dst = NULL;
1249 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1251 goto ipv6_pktoptions;
1255 if (tcp_checksum_complete(skb))
1258 if (sk->sk_state == TCP_LISTEN) {
1259 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1265 sock_rps_save_rxhash(nsk, skb);
1266 sk_mark_napi_id(nsk, skb);
1267 if (tcp_child_process(sk, nsk, skb))
1270 __kfree_skb(opt_skb);
1274 sock_rps_save_rxhash(sk, skb);
1276 if (tcp_rcv_state_process(sk, skb))
1279 goto ipv6_pktoptions;
1283 tcp_v6_send_reset(sk, skb);
1286 __kfree_skb(opt_skb);
1290 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1291 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1296 /* Do you ask, what is it?
1298 1. skb was enqueued by tcp.
1299 2. skb is added to tail of read queue, rather than out of order.
1300 3. socket is not in passive state.
1301 4. Finally, it really contains options, which user wants to receive.
1304 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1305 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1306 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1307 np->mcast_oif = tcp_v6_iif(opt_skb);
1308 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1309 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1310 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1311 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1313 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1314 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1315 skb_set_owner_r(opt_skb, sk);
1316 opt_skb = xchg(&np->pktoptions, opt_skb);
1318 __kfree_skb(opt_skb);
1319 opt_skb = xchg(&np->pktoptions, NULL);
1327 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1328 const struct tcphdr *th)
1330 /* This is tricky: we move IP6CB at its correct location into
1331 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1332 * _decode_session6() uses IP6CB().
1333 * barrier() makes sure compiler won't play aliasing games.
1335 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1336 sizeof(struct inet6_skb_parm));
1339 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1340 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1341 skb->len - th->doff*4);
1342 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1343 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1344 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1345 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1346 TCP_SKB_CB(skb)->sacked = 0;
1349 static void tcp_v6_restore_cb(struct sk_buff *skb)
1351 /* We need to move header back to the beginning if xfrm6_policy_check()
1352 * and tcp_v6_fill_cb() are going to be called again.
1354 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1355 sizeof(struct inet6_skb_parm));
1358 static int tcp_v6_rcv(struct sk_buff *skb)
1360 const struct tcphdr *th;
1361 const struct ipv6hdr *hdr;
1365 struct net *net = dev_net(skb->dev);
1367 if (skb->pkt_type != PACKET_HOST)
1371 * Count it even if it's bad.
1373 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1375 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1378 th = (const struct tcphdr *)skb->data;
1380 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1382 if (!pskb_may_pull(skb, th->doff*4))
1385 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1388 th = (const struct tcphdr *)skb->data;
1389 hdr = ipv6_hdr(skb);
1392 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1393 th->source, th->dest, inet6_iif(skb),
1399 if (sk->sk_state == TCP_TIME_WAIT)
1402 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1403 struct request_sock *req = inet_reqsk(sk);
1406 sk = req->rsk_listener;
1407 tcp_v6_fill_cb(skb, hdr, th);
1408 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1412 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1413 inet_csk_reqsk_queue_drop_and_put(sk, req);
1418 nsk = tcp_check_req(sk, skb, req, false);
1421 goto discard_and_relse;
1425 tcp_v6_restore_cb(skb);
1426 } else if (tcp_child_process(sk, nsk, skb)) {
1427 tcp_v6_send_reset(nsk, skb);
1428 goto discard_and_relse;
1434 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1435 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1436 goto discard_and_relse;
1439 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1440 goto discard_and_relse;
1442 tcp_v6_fill_cb(skb, hdr, th);
1444 if (tcp_v6_inbound_md5_hash(sk, skb))
1445 goto discard_and_relse;
1447 if (sk_filter(sk, skb))
1448 goto discard_and_relse;
1452 if (sk->sk_state == TCP_LISTEN) {
1453 ret = tcp_v6_do_rcv(sk, skb);
1454 goto put_and_return;
1457 sk_incoming_cpu_update(sk);
1459 bh_lock_sock_nested(sk);
1460 tcp_segs_in(tcp_sk(sk), skb);
1462 if (!sock_owned_by_user(sk)) {
1463 if (!tcp_prequeue(sk, skb))
1464 ret = tcp_v6_do_rcv(sk, skb);
1465 } else if (unlikely(sk_add_backlog(sk, skb,
1466 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1468 __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
1469 goto discard_and_relse;
1476 return ret ? -1 : 0;
1479 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1482 tcp_v6_fill_cb(skb, hdr, th);
1484 if (tcp_checksum_complete(skb)) {
1486 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1488 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1490 tcp_v6_send_reset(NULL, skb);
1498 sk_drops_add(sk, skb);
1504 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1505 inet_twsk_put(inet_twsk(sk));
1509 tcp_v6_fill_cb(skb, hdr, th);
1511 if (tcp_checksum_complete(skb)) {
1512 inet_twsk_put(inet_twsk(sk));
1516 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1521 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1522 skb, __tcp_hdrlen(th),
1523 &ipv6_hdr(skb)->saddr, th->source,
1524 &ipv6_hdr(skb)->daddr,
1525 ntohs(th->dest), tcp_v6_iif(skb));
1527 struct inet_timewait_sock *tw = inet_twsk(sk);
1528 inet_twsk_deschedule_put(tw);
1530 tcp_v6_restore_cb(skb);
1534 /* Fall through to ACK */
1537 tcp_v6_timewait_ack(sk, skb);
1540 tcp_v6_restore_cb(skb);
1541 tcp_v6_send_reset(sk, skb);
1542 inet_twsk_deschedule_put(inet_twsk(sk));
1544 case TCP_TW_SUCCESS:
1550 static void tcp_v6_early_demux(struct sk_buff *skb)
1552 const struct ipv6hdr *hdr;
1553 const struct tcphdr *th;
1556 if (skb->pkt_type != PACKET_HOST)
1559 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1562 hdr = ipv6_hdr(skb);
1565 if (th->doff < sizeof(struct tcphdr) / 4)
1568 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1569 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1570 &hdr->saddr, th->source,
1571 &hdr->daddr, ntohs(th->dest),
1575 skb->destructor = sock_edemux;
1576 if (sk_fullsock(sk)) {
1577 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1580 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1582 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1583 skb_dst_set_noref(skb, dst);
1588 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1589 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1590 .twsk_unique = tcp_twsk_unique,
1591 .twsk_destructor = tcp_twsk_destructor,
1594 static const struct inet_connection_sock_af_ops ipv6_specific = {
1595 .queue_xmit = inet6_csk_xmit,
1596 .send_check = tcp_v6_send_check,
1597 .rebuild_header = inet6_sk_rebuild_header,
1598 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1599 .conn_request = tcp_v6_conn_request,
1600 .syn_recv_sock = tcp_v6_syn_recv_sock,
1601 .net_header_len = sizeof(struct ipv6hdr),
1602 .net_frag_header_len = sizeof(struct frag_hdr),
1603 .setsockopt = ipv6_setsockopt,
1604 .getsockopt = ipv6_getsockopt,
1605 .addr2sockaddr = inet6_csk_addr2sockaddr,
1606 .sockaddr_len = sizeof(struct sockaddr_in6),
1607 .bind_conflict = inet6_csk_bind_conflict,
1608 #ifdef CONFIG_COMPAT
1609 .compat_setsockopt = compat_ipv6_setsockopt,
1610 .compat_getsockopt = compat_ipv6_getsockopt,
1612 .mtu_reduced = tcp_v6_mtu_reduced,
1615 #ifdef CONFIG_TCP_MD5SIG
1616 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1617 .md5_lookup = tcp_v6_md5_lookup,
1618 .calc_md5_hash = tcp_v6_md5_hash_skb,
1619 .md5_parse = tcp_v6_parse_md5_keys,
1624 * TCP over IPv4 via INET6 API
1626 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1627 .queue_xmit = ip_queue_xmit,
1628 .send_check = tcp_v4_send_check,
1629 .rebuild_header = inet_sk_rebuild_header,
1630 .sk_rx_dst_set = inet_sk_rx_dst_set,
1631 .conn_request = tcp_v6_conn_request,
1632 .syn_recv_sock = tcp_v6_syn_recv_sock,
1633 .net_header_len = sizeof(struct iphdr),
1634 .setsockopt = ipv6_setsockopt,
1635 .getsockopt = ipv6_getsockopt,
1636 .addr2sockaddr = inet6_csk_addr2sockaddr,
1637 .sockaddr_len = sizeof(struct sockaddr_in6),
1638 .bind_conflict = inet6_csk_bind_conflict,
1639 #ifdef CONFIG_COMPAT
1640 .compat_setsockopt = compat_ipv6_setsockopt,
1641 .compat_getsockopt = compat_ipv6_getsockopt,
1643 .mtu_reduced = tcp_v4_mtu_reduced,
1646 #ifdef CONFIG_TCP_MD5SIG
1647 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1648 .md5_lookup = tcp_v4_md5_lookup,
1649 .calc_md5_hash = tcp_v4_md5_hash_skb,
1650 .md5_parse = tcp_v6_parse_md5_keys,
1654 /* NOTE: A lot of things set to zero explicitly by call to
1655 * sk_alloc() so need not be done here.
1657 static int tcp_v6_init_sock(struct sock *sk)
1659 struct inet_connection_sock *icsk = inet_csk(sk);
1663 icsk->icsk_af_ops = &ipv6_specific;
1665 #ifdef CONFIG_TCP_MD5SIG
1666 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1672 static void tcp_v6_destroy_sock(struct sock *sk)
1674 tcp_v4_destroy_sock(sk);
1675 inet6_destroy_sock(sk);
1678 #ifdef CONFIG_PROC_FS
1679 /* Proc filesystem TCPv6 sock list dumping. */
1680 static void get_openreq6(struct seq_file *seq,
1681 const struct request_sock *req, int i)
1683 long ttd = req->rsk_timer.expires - jiffies;
1684 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1685 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1691 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1692 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1694 src->s6_addr32[0], src->s6_addr32[1],
1695 src->s6_addr32[2], src->s6_addr32[3],
1696 inet_rsk(req)->ir_num,
1697 dest->s6_addr32[0], dest->s6_addr32[1],
1698 dest->s6_addr32[2], dest->s6_addr32[3],
1699 ntohs(inet_rsk(req)->ir_rmt_port),
1701 0, 0, /* could print option size, but that is af dependent. */
1702 1, /* timers active (only the expire timer) */
1703 jiffies_to_clock_t(ttd),
1705 from_kuid_munged(seq_user_ns(seq),
1706 sock_i_uid(req->rsk_listener)),
1707 0, /* non standard timer */
1708 0, /* open_requests have no inode */
1712 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1714 const struct in6_addr *dest, *src;
1717 unsigned long timer_expires;
1718 const struct inet_sock *inet = inet_sk(sp);
1719 const struct tcp_sock *tp = tcp_sk(sp);
1720 const struct inet_connection_sock *icsk = inet_csk(sp);
1721 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1725 dest = &sp->sk_v6_daddr;
1726 src = &sp->sk_v6_rcv_saddr;
1727 destp = ntohs(inet->inet_dport);
1728 srcp = ntohs(inet->inet_sport);
1730 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1732 timer_expires = icsk->icsk_timeout;
1733 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1735 timer_expires = icsk->icsk_timeout;
1736 } else if (timer_pending(&sp->sk_timer)) {
1738 timer_expires = sp->sk_timer.expires;
1741 timer_expires = jiffies;
1744 state = sk_state_load(sp);
1745 if (state == TCP_LISTEN)
1746 rx_queue = sp->sk_ack_backlog;
1748 /* Because we don't lock the socket,
1749 * we might find a transient negative value.
1751 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1754 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1755 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1757 src->s6_addr32[0], src->s6_addr32[1],
1758 src->s6_addr32[2], src->s6_addr32[3], srcp,
1759 dest->s6_addr32[0], dest->s6_addr32[1],
1760 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1762 tp->write_seq - tp->snd_una,
1765 jiffies_delta_to_clock_t(timer_expires - jiffies),
1766 icsk->icsk_retransmits,
1767 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1768 icsk->icsk_probes_out,
1770 atomic_read(&sp->sk_refcnt), sp,
1771 jiffies_to_clock_t(icsk->icsk_rto),
1772 jiffies_to_clock_t(icsk->icsk_ack.ato),
1773 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1775 state == TCP_LISTEN ?
1776 fastopenq->max_qlen :
1777 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1781 static void get_timewait6_sock(struct seq_file *seq,
1782 struct inet_timewait_sock *tw, int i)
1784 long delta = tw->tw_timer.expires - jiffies;
1785 const struct in6_addr *dest, *src;
1788 dest = &tw->tw_v6_daddr;
1789 src = &tw->tw_v6_rcv_saddr;
1790 destp = ntohs(tw->tw_dport);
1791 srcp = ntohs(tw->tw_sport);
1794 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1795 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1797 src->s6_addr32[0], src->s6_addr32[1],
1798 src->s6_addr32[2], src->s6_addr32[3], srcp,
1799 dest->s6_addr32[0], dest->s6_addr32[1],
1800 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1801 tw->tw_substate, 0, 0,
1802 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1803 atomic_read(&tw->tw_refcnt), tw);
1806 static int tcp6_seq_show(struct seq_file *seq, void *v)
1808 struct tcp_iter_state *st;
1809 struct sock *sk = v;
1811 if (v == SEQ_START_TOKEN) {
1816 "st tx_queue rx_queue tr tm->when retrnsmt"
1817 " uid timeout inode\n");
1822 if (sk->sk_state == TCP_TIME_WAIT)
1823 get_timewait6_sock(seq, v, st->num);
1824 else if (sk->sk_state == TCP_NEW_SYN_RECV)
1825 get_openreq6(seq, v, st->num);
1827 get_tcp6_sock(seq, v, st->num);
1832 static const struct file_operations tcp6_afinfo_seq_fops = {
1833 .owner = THIS_MODULE,
1834 .open = tcp_seq_open,
1836 .llseek = seq_lseek,
1837 .release = seq_release_net
1840 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1843 .seq_fops = &tcp6_afinfo_seq_fops,
1845 .show = tcp6_seq_show,
1849 int __net_init tcp6_proc_init(struct net *net)
1851 return tcp_proc_register(net, &tcp6_seq_afinfo);
1854 void tcp6_proc_exit(struct net *net)
1856 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1860 static void tcp_v6_clear_sk(struct sock *sk, int size)
1862 struct inet_sock *inet = inet_sk(sk);
1864 /* we do not want to clear pinet6 field, because of RCU lookups */
1865 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1867 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1868 memset(&inet->pinet6 + 1, 0, size);
1871 struct proto tcpv6_prot = {
1873 .owner = THIS_MODULE,
1875 .connect = tcp_v6_connect,
1876 .disconnect = tcp_disconnect,
1877 .accept = inet_csk_accept,
1879 .init = tcp_v6_init_sock,
1880 .destroy = tcp_v6_destroy_sock,
1881 .shutdown = tcp_shutdown,
1882 .setsockopt = tcp_setsockopt,
1883 .getsockopt = tcp_getsockopt,
1884 .recvmsg = tcp_recvmsg,
1885 .sendmsg = tcp_sendmsg,
1886 .sendpage = tcp_sendpage,
1887 .backlog_rcv = tcp_v6_do_rcv,
1888 .release_cb = tcp_release_cb,
1890 .unhash = inet_unhash,
1891 .get_port = inet_csk_get_port,
1892 .enter_memory_pressure = tcp_enter_memory_pressure,
1893 .stream_memory_free = tcp_stream_memory_free,
1894 .sockets_allocated = &tcp_sockets_allocated,
1895 .memory_allocated = &tcp_memory_allocated,
1896 .memory_pressure = &tcp_memory_pressure,
1897 .orphan_count = &tcp_orphan_count,
1898 .sysctl_mem = sysctl_tcp_mem,
1899 .sysctl_wmem = sysctl_tcp_wmem,
1900 .sysctl_rmem = sysctl_tcp_rmem,
1901 .max_header = MAX_TCP_HEADER,
1902 .obj_size = sizeof(struct tcp6_sock),
1903 .slab_flags = SLAB_DESTROY_BY_RCU,
1904 .twsk_prot = &tcp6_timewait_sock_ops,
1905 .rsk_prot = &tcp6_request_sock_ops,
1906 .h.hashinfo = &tcp_hashinfo,
1907 .no_autobind = true,
1908 #ifdef CONFIG_COMPAT
1909 .compat_setsockopt = compat_tcp_setsockopt,
1910 .compat_getsockopt = compat_tcp_getsockopt,
1912 .clear_sk = tcp_v6_clear_sk,
1913 .diag_destroy = tcp_abort,
1916 static const struct inet6_protocol tcpv6_protocol = {
1917 .early_demux = tcp_v6_early_demux,
1918 .handler = tcp_v6_rcv,
1919 .err_handler = tcp_v6_err,
1920 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1923 static struct inet_protosw tcpv6_protosw = {
1924 .type = SOCK_STREAM,
1925 .protocol = IPPROTO_TCP,
1926 .prot = &tcpv6_prot,
1927 .ops = &inet6_stream_ops,
1928 .flags = INET_PROTOSW_PERMANENT |
1932 static int __net_init tcpv6_net_init(struct net *net)
1934 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1935 SOCK_RAW, IPPROTO_TCP, net);
1938 static void __net_exit tcpv6_net_exit(struct net *net)
1940 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1943 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1945 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1948 static struct pernet_operations tcpv6_net_ops = {
1949 .init = tcpv6_net_init,
1950 .exit = tcpv6_net_exit,
1951 .exit_batch = tcpv6_net_exit_batch,
1954 int __init tcpv6_init(void)
1958 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1962 /* register inet6 protocol */
1963 ret = inet6_register_protosw(&tcpv6_protosw);
1965 goto out_tcpv6_protocol;
1967 ret = register_pernet_subsys(&tcpv6_net_ops);
1969 goto out_tcpv6_protosw;
1974 inet6_unregister_protosw(&tcpv6_protosw);
1976 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1980 void tcpv6_exit(void)
1982 unregister_pernet_subsys(&tcpv6_net_ops);
1983 inet6_unregister_protosw(&tcpv6_protosw);
1984 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);