]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - net/ipv4/tcp_input.c
Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
[linux.git] / net / ipv4 / tcp_input.c
index 5347ab2c9c58b312d3f3a969141a835a8c4b6657..e325b4506e2520ba345709074b4e485fbfac6a31 100644 (file)
@@ -79,6 +79,7 @@
 #include <trace/events/tcp.h>
 #include <linux/jump_label_ratelimit.h>
 #include <net/busy_poll.h>
+#include <net/mptcp.h>
 
 int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
 
@@ -1423,7 +1424,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
        if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED)
                goto fallback;
 
-       if (!tcp_skb_can_collapse_to(prev))
+       if (!tcp_skb_can_collapse(prev, skb))
                goto fallback;
 
        in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
@@ -3164,6 +3165,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
                        tp->retransmit_skb_hint = NULL;
                if (unlikely(skb == tp->lost_skb_hint))
                        tp->lost_skb_hint = NULL;
+               tcp_highest_sack_replace(sk, skb, next);
                tcp_rtx_queue_unlink_and_free(skb, sk);
        }
 
@@ -3554,7 +3556,7 @@ static void tcp_xmit_recovery(struct sock *sk, int rexmit)
        if (rexmit == REXMIT_NONE || sk->sk_state == TCP_SYN_SENT)
                return;
 
-       if (unlikely(rexmit == 2)) {
+       if (unlikely(rexmit == REXMIT_NEW)) {
                __tcp_push_pending_frames(sk, tcp_current_mss(sk),
                                          TCP_NAGLE_OFF);
                if (after(tp->snd_nxt, tp->high_seq))
@@ -3924,6 +3926,10 @@ void tcp_parse_options(const struct net *net,
                                 */
                                break;
 #endif
+                       case TCPOPT_MPTCP:
+                               mptcp_parse_option(skb, ptr, opsize, opt_rx);
+                               break;
+
                        case TCPOPT_FASTOPEN:
                                tcp_parse_fastopen_option(
                                        opsize - TCPOLEN_FASTOPEN_BASE,
@@ -4265,8 +4271,10 @@ static void tcp_rcv_spurious_retrans(struct sock *sk, const struct sk_buff *skb)
         * The receiver remembers and reflects via DSACKs. Leverage the
         * DSACK state and change the txhash to re-route speculatively.
         */
-       if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq)
+       if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq) {
                sk_rethink_txhash(sk);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDUPLICATEDATAREHASH);
+       }
 }
 
 static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
@@ -4424,6 +4432,9 @@ static bool tcp_try_coalesce(struct sock *sk,
        if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq)
                return false;
 
+       if (!mptcp_skb_can_collapse(to, from))
+               return false;
+
 #ifdef CONFIG_TLS_DEVICE
        if (from->decrypted != to->decrypted)
                return false;
@@ -4762,6 +4773,9 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
        bool fragstolen;
        int eaten;
 
+       if (sk_is_mptcp(sk))
+               mptcp_incoming_options(sk, skb, &tp->rx_opt);
+
        if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
                __kfree_skb(skb);
                return;
@@ -4933,7 +4947,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
                /* The first skb to collapse is:
                 * - not SYN/FIN and
                 * - bloated or contains data before "start" or
-                *   overlaps to the next one.
+                *   overlaps to the next one and mptcp allow collapsing.
                 */
                if (!(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) &&
                    (tcp_win_from_space(sk, skb->truesize) > skb->len ||
@@ -4942,7 +4956,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
                        break;
                }
 
-               if (n && n != tail &&
+               if (n && n != tail && mptcp_skb_can_collapse(skb, n) &&
                    TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) {
                        end_of_skbs = false;
                        break;
@@ -4975,6 +4989,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
                else
                        __skb_queue_tail(&tmp, nskb); /* defer rbtree insertion */
                skb_set_owner_r(nskb, sk);
+               mptcp_skb_ext_move(nskb, skb);
 
                /* Copy data, releasing collapsed skbs. */
                while (copy > 0) {
@@ -4994,6 +5009,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
                                skb = tcp_collapse_one(sk, skb, list, root);
                                if (!skb ||
                                    skb == tail ||
+                                   !mptcp_skb_can_collapse(nskb, skb) ||
                                    (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
                                        goto end;
 #ifdef CONFIG_TLS_DEVICE
@@ -5968,6 +5984,9 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
                tcp_initialize_rcv_mss(sk);
 
+               if (sk_is_mptcp(sk))
+                       mptcp_rcv_synsent(sk);
+
                /* Remember, tcp_poll() does not lock socket!
                 * Change state from SYN-SENT only after copied_seq
                 * is initialized. */
@@ -6333,8 +6352,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
        case TCP_CLOSE_WAIT:
        case TCP_CLOSING:
        case TCP_LAST_ACK:
-               if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
+               if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
+                       if (sk_is_mptcp(sk))
+                               mptcp_incoming_options(sk, skb, &tp->rx_opt);
                        break;
+               }
                /* fall through */
        case TCP_FIN_WAIT1:
        case TCP_FIN_WAIT2:
@@ -6590,6 +6612,9 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
 
        tcp_rsk(req)->af_specific = af_ops;
        tcp_rsk(req)->ts_off = 0;
+#if IS_ENABLED(CONFIG_MPTCP)
+       tcp_rsk(req)->is_mptcp = 0;
+#endif
 
        tcp_clear_options(&tmp_opt);
        tmp_opt.mss_clamp = af_ops->mss_clamp;
@@ -6612,6 +6637,9 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
 
        af_ops->init_req(req, sk, skb);
 
+       if (IS_ENABLED(CONFIG_MPTCP) && want_cookie)
+               tcp_rsk(req)->is_mptcp = 0;
+
        if (security_inet_conn_request(sk, skb, req))
                goto drop_and_free;