1 // SPDX-License-Identifier: GPL-2.0-only
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * The Internet Protocol (IP) output module.
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Donald Becker, <becker@super.org>
12 * Alan Cox, <Alan.Cox@linux.org>
14 * Stefan Becker, <stefanb@yello.ping.de>
15 * Jorge Cwik, <jorge@laser.satlink.net>
16 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
17 * Hirokazu Takahashi, <taka@valinux.co.jp>
19 * See ip_input.c for original log
22 * Alan Cox : Missing nonblock feature in ip_build_xmit.
23 * Mike Kilburn : htons() missing in ip_build_xmit.
24 * Bradford Johnson: Fix faulty handling of some frames when
26 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
27 * (in case if packet not accepted by
28 * output firewall rules)
29 * Mike McLagan : Routing by source
30 * Alexey Kuznetsov: use new route cache
31 * Andi Kleen: Fix broken PMTU recovery and remove
32 * some redundant tests.
33 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
34 * Andi Kleen : Replace ip_reply with ip_send_reply.
35 * Andi Kleen : Split fast and slow ip_build_xmit path
36 * for decreased register pressure on x86
37 * and more readibility.
38 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
39 * silently drop skb instead of failing with -EPERM.
40 * Detlev Wengorz : Copy protocol for fragments.
41 * Hirokazu Takahashi: HW checksumming for outgoing UDP
43 * Hirokazu Takahashi: sendfile() on UDP works now.
46 #include <linux/uaccess.h>
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/kernel.h>
51 #include <linux/string.h>
52 #include <linux/errno.h>
53 #include <linux/highmem.h>
54 #include <linux/slab.h>
56 #include <linux/socket.h>
57 #include <linux/sockios.h>
59 #include <linux/inet.h>
60 #include <linux/netdevice.h>
61 #include <linux/etherdevice.h>
62 #include <linux/proc_fs.h>
63 #include <linux/stat.h>
64 #include <linux/init.h>
68 #include <net/protocol.h>
69 #include <net/route.h>
71 #include <linux/skbuff.h>
75 #include <net/checksum.h>
76 #include <net/inetpeer.h>
77 #include <net/lwtunnel.h>
78 #include <linux/bpf-cgroup.h>
79 #include <linux/igmp.h>
80 #include <linux/netfilter_ipv4.h>
81 #include <linux/netfilter_bridge.h>
82 #include <linux/netlink.h>
83 #include <linux/tcp.h>
86 ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
88 int (*output)(struct net *, struct sock *, struct sk_buff *));
90 /* Generate a checksum for an outgoing IP datagram. */
91 void ip_send_check(struct iphdr *iph)
94 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
96 EXPORT_SYMBOL(ip_send_check);
98 int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
100 struct iphdr *iph = ip_hdr(skb);
102 iph->tot_len = htons(skb->len);
105 /* if egress device is enslaved to an L3 master device pass the
106 * skb to its handler for processing
108 skb = l3mdev_ip_out(sk, skb);
112 skb->protocol = htons(ETH_P_IP);
114 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
115 net, sk, skb, NULL, skb_dst(skb)->dev,
119 int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
123 err = __ip_local_out(net, sk, skb);
124 if (likely(err == 1))
125 err = dst_output(net, sk, skb);
129 EXPORT_SYMBOL_GPL(ip_local_out);
131 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
133 int ttl = inet->uc_ttl;
136 ttl = ip4_dst_hoplimit(dst);
141 * Add an ip header to a skbuff and send it out.
144 int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
145 __be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
147 struct inet_sock *inet = inet_sk(sk);
148 struct rtable *rt = skb_rtable(skb);
149 struct net *net = sock_net(sk);
152 /* Build the IP header. */
153 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
154 skb_reset_network_header(skb);
158 iph->tos = inet->tos;
159 iph->ttl = ip_select_ttl(inet, &rt->dst);
160 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
162 iph->protocol = sk->sk_protocol;
163 if (ip_dont_fragment(sk, &rt->dst)) {
164 iph->frag_off = htons(IP_DF);
168 __ip_select_ident(net, iph, 1);
171 if (opt && opt->opt.optlen) {
172 iph->ihl += opt->opt.optlen>>2;
173 ip_options_build(skb, &opt->opt, daddr, rt, 0);
176 skb->priority = sk->sk_priority;
178 skb->mark = sk->sk_mark;
181 return ip_local_out(net, skb->sk, skb);
183 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
185 static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
187 struct dst_entry *dst = skb_dst(skb);
188 struct rtable *rt = (struct rtable *)dst;
189 struct net_device *dev = dst->dev;
190 unsigned int hh_len = LL_RESERVED_SPACE(dev);
191 struct neighbour *neigh;
192 bool is_v6gw = false;
194 if (rt->rt_type == RTN_MULTICAST) {
195 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len);
196 } else if (rt->rt_type == RTN_BROADCAST)
197 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len);
199 /* Be paranoid, rather than too clever. */
200 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
201 struct sk_buff *skb2;
203 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
209 skb_set_owner_w(skb2, skb->sk);
214 if (lwtunnel_xmit_redirect(dst->lwtstate)) {
215 int res = lwtunnel_xmit(skb);
217 if (res < 0 || res == LWTUNNEL_XMIT_DONE)
222 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
223 if (!IS_ERR(neigh)) {
226 sock_confirm_neigh(skb, neigh);
227 /* if crossing protocols, can not use the cached header */
228 res = neigh_output(neigh, skb, is_v6gw);
229 rcu_read_unlock_bh();
232 rcu_read_unlock_bh();
234 net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
240 static int ip_finish_output_gso(struct net *net, struct sock *sk,
241 struct sk_buff *skb, unsigned int mtu)
243 netdev_features_t features;
244 struct sk_buff *segs;
247 /* common case: seglen is <= mtu
249 if (skb_gso_validate_network_len(skb, mtu))
250 return ip_finish_output2(net, sk, skb);
252 /* Slowpath - GSO segment length exceeds the egress MTU.
254 * This can happen in several cases:
255 * - Forwarding of a TCP GRO skb, when DF flag is not set.
256 * - Forwarding of an skb that arrived on a virtualization interface
257 * (virtio-net/vhost/tap) with TSO/GSO size set by other network
259 * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
260 * interface with a smaller MTU.
261 * - Arriving GRO skb (or GSO skb in a virtualized environment) that is
262 * bridged to a NETIF_F_TSO tunnel stacked over an interface with an
265 features = netif_skb_features(skb);
266 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
267 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
268 if (IS_ERR_OR_NULL(segs)) {
276 struct sk_buff *nskb = segs->next;
279 skb_mark_not_on_list(segs);
280 err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
290 static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
294 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
295 /* Policy lookup after SNAT yielded a new policy */
296 if (skb_dst(skb)->xfrm) {
297 IPCB(skb)->flags |= IPSKB_REROUTED;
298 return dst_output(net, sk, skb);
301 mtu = ip_skb_dst_mtu(sk, skb);
303 return ip_finish_output_gso(net, sk, skb, mtu);
305 if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU))
306 return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
308 return ip_finish_output2(net, sk, skb);
311 static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
315 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
317 case NET_XMIT_SUCCESS:
318 return __ip_finish_output(net, sk, skb);
320 return __ip_finish_output(net, sk, skb) ? : ret;
327 static int ip_mc_finish_output(struct net *net, struct sock *sk,
332 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
334 case NET_XMIT_SUCCESS:
335 return dev_loopback_xmit(net, sk, skb);
337 return dev_loopback_xmit(net, sk, skb) ? : ret;
344 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
346 struct rtable *rt = skb_rtable(skb);
347 struct net_device *dev = rt->dst.dev;
350 * If the indicated interface is up and running, send the packet.
352 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
355 skb->protocol = htons(ETH_P_IP);
358 * Multicasts are looped back for other local users
361 if (rt->rt_flags&RTCF_MULTICAST) {
363 #ifdef CONFIG_IP_MROUTE
364 /* Small optimization: do not loopback not local frames,
365 which returned after forwarding; they will be dropped
366 by ip_mr_input in any case.
367 Note, that local frames are looped back to be delivered
370 This check is duplicated in ip_mr_input at the moment.
373 ((rt->rt_flags & RTCF_LOCAL) ||
374 !(IPCB(skb)->flags & IPSKB_FORWARDED))
377 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
379 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
380 net, sk, newskb, NULL, newskb->dev,
381 ip_mc_finish_output);
384 /* Multicasts with ttl 0 must not go beyond the host */
386 if (ip_hdr(skb)->ttl == 0) {
392 if (rt->rt_flags&RTCF_BROADCAST) {
393 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
395 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
396 net, sk, newskb, NULL, newskb->dev,
397 ip_mc_finish_output);
400 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
401 net, sk, skb, NULL, skb->dev,
403 !(IPCB(skb)->flags & IPSKB_REROUTED));
406 int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
408 struct net_device *dev = skb_dst(skb)->dev;
410 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
413 skb->protocol = htons(ETH_P_IP);
415 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
416 net, sk, skb, NULL, dev,
418 !(IPCB(skb)->flags & IPSKB_REROUTED));
422 * copy saddr and daddr, possibly using 64bit load/stores
424 * iph->saddr = fl4->saddr;
425 * iph->daddr = fl4->daddr;
427 static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
429 BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
430 offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
431 memcpy(&iph->saddr, &fl4->saddr,
432 sizeof(fl4->saddr) + sizeof(fl4->daddr));
435 /* Note: skb->sk can be different from sk, in case of tunnels */
436 int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
439 struct inet_sock *inet = inet_sk(sk);
440 struct net *net = sock_net(sk);
441 struct ip_options_rcu *inet_opt;
447 /* Skip all of this if the packet is already routed,
448 * f.e. by something like SCTP.
451 inet_opt = rcu_dereference(inet->inet_opt);
453 rt = skb_rtable(skb);
457 /* Make sure we can route this packet. */
458 rt = (struct rtable *)__sk_dst_check(sk, 0);
462 /* Use correct destination address if we have options. */
463 daddr = inet->inet_daddr;
464 if (inet_opt && inet_opt->opt.srr)
465 daddr = inet_opt->opt.faddr;
467 /* If this fails, retransmit mechanism of transport layer will
468 * keep trying until route appears or the connection times
471 rt = ip_route_output_ports(net, fl4, sk,
472 daddr, inet->inet_saddr,
476 RT_CONN_FLAGS_TOS(sk, tos),
477 sk->sk_bound_dev_if);
480 sk_setup_caps(sk, &rt->dst);
482 skb_dst_set_noref(skb, &rt->dst);
485 if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_gw_family)
488 /* OK, we know where to send it, allocate and build IP header. */
489 skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
490 skb_reset_network_header(skb);
492 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (tos & 0xff));
493 if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
494 iph->frag_off = htons(IP_DF);
497 iph->ttl = ip_select_ttl(inet, &rt->dst);
498 iph->protocol = sk->sk_protocol;
499 ip_copy_addrs(iph, fl4);
501 /* Transport layer set skb->h.foo itself. */
503 if (inet_opt && inet_opt->opt.optlen) {
504 iph->ihl += inet_opt->opt.optlen >> 2;
505 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
508 ip_select_ident_segs(net, skb, sk,
509 skb_shinfo(skb)->gso_segs ?: 1);
511 /* TODO : should we use skb->sk here instead of sk ? */
512 skb->priority = sk->sk_priority;
513 skb->mark = sk->sk_mark;
515 res = ip_local_out(net, sk, skb);
521 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
523 return -EHOSTUNREACH;
525 EXPORT_SYMBOL(__ip_queue_xmit);
527 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
529 to->pkt_type = from->pkt_type;
530 to->priority = from->priority;
531 to->protocol = from->protocol;
532 to->skb_iif = from->skb_iif;
534 skb_dst_copy(to, from);
536 to->mark = from->mark;
538 skb_copy_hash(to, from);
540 #ifdef CONFIG_NET_SCHED
541 to->tc_index = from->tc_index;
544 skb_ext_copy(to, from);
545 #if IS_ENABLED(CONFIG_IP_VS)
546 to->ipvs_property = from->ipvs_property;
548 skb_copy_secmark(to, from);
551 static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
553 int (*output)(struct net *, struct sock *, struct sk_buff *))
555 struct iphdr *iph = ip_hdr(skb);
557 if ((iph->frag_off & htons(IP_DF)) == 0)
558 return ip_do_fragment(net, sk, skb, output);
560 if (unlikely(!skb->ignore_df ||
561 (IPCB(skb)->frag_max_size &&
562 IPCB(skb)->frag_max_size > mtu))) {
563 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
564 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
570 return ip_do_fragment(net, sk, skb, output);
573 void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
574 unsigned int hlen, struct ip_fraglist_iter *iter)
576 unsigned int first_len = skb_pagelen(skb);
578 iter->frag_list = skb_shinfo(skb)->frag_list;
579 iter->frag = iter->frag_list;
580 skb_frag_list_init(skb);
586 skb->data_len = first_len - skb_headlen(skb);
587 skb->len = first_len;
588 iph->tot_len = htons(first_len);
589 iph->frag_off = htons(IP_MF);
592 EXPORT_SYMBOL(ip_fraglist_init);
594 static void ip_fraglist_ipcb_prepare(struct sk_buff *skb,
595 struct ip_fraglist_iter *iter)
597 struct sk_buff *to = iter->frag;
599 /* Copy the flags to each fragment. */
600 IPCB(to)->flags = IPCB(skb)->flags;
602 if (iter->offset == 0)
603 ip_options_fragment(to);
606 void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
608 unsigned int hlen = iter->hlen;
609 struct iphdr *iph = iter->iph;
610 struct sk_buff *frag;
613 frag->ip_summed = CHECKSUM_NONE;
614 skb_reset_transport_header(frag);
615 __skb_push(frag, hlen);
616 skb_reset_network_header(frag);
617 memcpy(skb_network_header(frag), iph, hlen);
618 iter->iph = ip_hdr(frag);
620 iph->tot_len = htons(frag->len);
621 ip_copy_metadata(frag, skb);
622 iter->offset += skb->len - hlen;
623 iph->frag_off = htons(iter->offset >> 3);
625 iph->frag_off |= htons(IP_MF);
626 /* Ready, complete checksum */
629 EXPORT_SYMBOL(ip_fraglist_prepare);
631 void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
632 unsigned int ll_rs, unsigned int mtu,
633 struct ip_frag_state *state)
635 struct iphdr *iph = ip_hdr(skb);
638 state->ll_rs = ll_rs;
641 state->left = skb->len - hlen; /* Space per frame */
642 state->ptr = hlen; /* Where to start from */
644 state->offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
645 state->not_last_frag = iph->frag_off & htons(IP_MF);
647 EXPORT_SYMBOL(ip_frag_init);
649 static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
650 bool first_frag, struct ip_frag_state *state)
652 /* Copy the flags to each fragment. */
653 IPCB(to)->flags = IPCB(from)->flags;
655 if (IPCB(from)->flags & IPSKB_FRAG_PMTU)
656 state->iph->frag_off |= htons(IP_DF);
658 /* ANK: dirty, but effective trick. Upgrade options only if
659 * the segment to be fragmented was THE FIRST (otherwise,
660 * options are already fixed) and make it ONCE
661 * on the initial skb, so that all the following fragments
662 * will inherit fixed options.
665 ip_options_fragment(from);
668 struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state)
670 unsigned int len = state->left;
671 struct sk_buff *skb2;
675 /* IF: it doesn't fit, use 'mtu' - the data space left */
676 if (len > state->mtu)
678 /* IF: we are not sending up to and including the packet end
679 then align the next start on an eight byte boundary */
680 if (len < state->left) {
684 /* Allocate buffer */
685 skb2 = alloc_skb(len + state->hlen + state->ll_rs, GFP_ATOMIC);
687 return ERR_PTR(-ENOMEM);
690 * Set up data on packet
693 ip_copy_metadata(skb2, skb);
694 skb_reserve(skb2, state->ll_rs);
695 skb_put(skb2, len + state->hlen);
696 skb_reset_network_header(skb2);
697 skb2->transport_header = skb2->network_header + state->hlen;
700 * Charge the memory for the fragment to any owner
705 skb_set_owner_w(skb2, skb->sk);
708 * Copy the packet header into the new buffer.
711 skb_copy_from_linear_data(skb, skb_network_header(skb2), state->hlen);
714 * Copy a block of the IP datagram.
716 if (skb_copy_bits(skb, state->ptr, skb_transport_header(skb2), len))
721 * Fill in the new header fields.
724 iph->frag_off = htons((state->offset >> 3));
727 * Added AC : If we are fragmenting a fragment that's not the
728 * last fragment then keep MF on each bit
730 if (state->left > 0 || state->not_last_frag)
731 iph->frag_off |= htons(IP_MF);
733 state->offset += len;
735 iph->tot_len = htons(len + state->hlen);
741 EXPORT_SYMBOL(ip_frag_next);
744 * This IP datagram is too large to be sent in one piece. Break it up into
745 * smaller pieces (each of size equal to IP header plus
746 * a block of the data of the original IP data part) that will yet fit in a
747 * single device frame, and queue such a frame for sending.
750 int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
751 int (*output)(struct net *, struct sock *, struct sk_buff *))
754 struct sk_buff *skb2;
755 struct rtable *rt = skb_rtable(skb);
756 unsigned int mtu, hlen, ll_rs;
757 struct ip_fraglist_iter iter;
758 struct ip_frag_state state;
761 /* for offloaded checksums cleanup checksum before fragmentation */
762 if (skb->ip_summed == CHECKSUM_PARTIAL &&
763 (err = skb_checksum_help(skb)))
767 * Point into the IP datagram header.
772 mtu = ip_skb_dst_mtu(sk, skb);
773 if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
774 mtu = IPCB(skb)->frag_max_size;
777 * Setup starting values.
781 mtu = mtu - hlen; /* Size of data space */
782 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
783 ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
785 /* When frag_list is given, use it. First, check its validity:
786 * some transformers could create wrong frag_list or break existing
787 * one, it is not prohibited. In this case fall back to copying.
789 * LATER: this step can be merged to real generation of fragments,
790 * we can switch to copy when see the first bad fragment.
792 if (skb_has_frag_list(skb)) {
793 struct sk_buff *frag, *frag2;
794 unsigned int first_len = skb_pagelen(skb);
796 if (first_len - hlen > mtu ||
797 ((first_len - hlen) & 7) ||
798 ip_is_fragment(iph) ||
800 skb_headroom(skb) < ll_rs)
803 skb_walk_frags(skb, frag) {
804 /* Correct geometry. */
805 if (frag->len > mtu ||
806 ((frag->len & 7) && frag->next) ||
807 skb_headroom(frag) < hlen + ll_rs)
808 goto slow_path_clean;
810 /* Partially cloned skb? */
811 if (skb_shared(frag))
812 goto slow_path_clean;
817 frag->destructor = sock_wfree;
819 skb->truesize -= frag->truesize;
822 /* Everything is OK. Generate! */
823 ip_fraglist_init(skb, iph, hlen, &iter);
826 /* Prepare header of the next frame,
827 * before previous one went down. */
829 ip_fraglist_ipcb_prepare(skb, &iter);
830 ip_fraglist_prepare(skb, &iter);
833 err = output(net, sk, skb);
836 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
837 if (err || !iter.frag)
840 skb = ip_fraglist_next(&iter);
844 IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
848 kfree_skb_list(iter.frag_list);
850 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
854 skb_walk_frags(skb, frag2) {
858 frag2->destructor = NULL;
859 skb->truesize += frag2->truesize;
865 * Fragment the datagram.
868 ip_frag_init(skb, hlen, ll_rs, mtu, &state);
871 * Keep copying data until we run out.
874 while (state.left > 0) {
875 bool first_frag = (state.offset == 0);
877 skb2 = ip_frag_next(skb, &state);
882 ip_frag_ipcb(skb, skb2, first_frag, &state);
885 * Put this fragment into the sending queue.
887 err = output(net, sk, skb2);
891 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
894 IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
899 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
902 EXPORT_SYMBOL(ip_do_fragment);
905 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
907 struct msghdr *msg = from;
909 if (skb->ip_summed == CHECKSUM_PARTIAL) {
910 if (!copy_from_iter_full(to, len, &msg->msg_iter))
914 if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter))
916 skb->csum = csum_block_add(skb->csum, csum, odd);
920 EXPORT_SYMBOL(ip_generic_getfrag);
923 csum_page(struct page *page, int offset, int copy)
928 csum = csum_partial(kaddr + offset, copy, 0);
933 static int __ip_append_data(struct sock *sk,
935 struct sk_buff_head *queue,
936 struct inet_cork *cork,
937 struct page_frag *pfrag,
938 int getfrag(void *from, char *to, int offset,
939 int len, int odd, struct sk_buff *skb),
940 void *from, int length, int transhdrlen,
943 struct inet_sock *inet = inet_sk(sk);
944 struct ubuf_info *uarg = NULL;
947 struct ip_options *opt = cork->opt;
954 unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
955 int csummode = CHECKSUM_NONE;
956 struct rtable *rt = (struct rtable *)cork->dst;
957 unsigned int wmem_alloc_delta = 0;
958 bool paged, extra_uref = false;
961 skb = skb_peek_tail(queue);
963 exthdrlen = !skb ? rt->dst.header_len : 0;
964 mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
965 paged = !!cork->gso_size;
967 if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
968 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
969 tskey = sk->sk_tskey++;
971 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
973 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
974 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
975 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
977 if (cork->length + length > maxnonfragsize - fragheaderlen) {
978 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
979 mtu - (opt ? opt->optlen : 0));
984 * transhdrlen > 0 means that this is the first fragment and we wish
985 * it won't be fragmented in the future.
988 length + fragheaderlen <= mtu &&
989 rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) &&
990 (!(flags & MSG_MORE) || cork->gso_size) &&
991 (!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM)))
992 csummode = CHECKSUM_PARTIAL;
994 if (flags & MSG_ZEROCOPY && length && sock_flag(sk, SOCK_ZEROCOPY)) {
995 uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb));
998 extra_uref = !skb; /* only extra ref if !MSG_MORE */
999 if (rt->dst.dev->features & NETIF_F_SG &&
1000 csummode == CHECKSUM_PARTIAL) {
1004 skb_zcopy_set(skb, uarg, &extra_uref);
1008 cork->length += length;
1010 /* So, what's going on in the loop below?
1012 * We use calculated fragment length to generate chained skb,
1013 * each of segments is IP fragment ready for sending to network after
1014 * adding appropriate IP header.
1020 while (length > 0) {
1021 /* Check if the remaining data fits into current packet. */
1022 copy = mtu - skb->len;
1024 copy = maxfraglen - skb->len;
1027 unsigned int datalen;
1028 unsigned int fraglen;
1029 unsigned int fraggap;
1030 unsigned int alloclen;
1031 unsigned int pagedlen;
1032 struct sk_buff *skb_prev;
1036 fraggap = skb_prev->len - maxfraglen;
1041 * If remaining data exceeds the mtu,
1042 * we know we need more fragment(s).
1044 datalen = length + fraggap;
1045 if (datalen > mtu - fragheaderlen)
1046 datalen = maxfraglen - fragheaderlen;
1047 fraglen = datalen + fragheaderlen;
1050 if ((flags & MSG_MORE) &&
1051 !(rt->dst.dev->features&NETIF_F_SG))
1056 alloclen = min_t(int, fraglen, MAX_HEADER);
1057 pagedlen = fraglen - alloclen;
1060 alloclen += exthdrlen;
1062 /* The last fragment gets additional space at tail.
1063 * Note, with MSG_MORE we overallocate on fragments,
1064 * because we have no idea what fragment will be
1067 if (datalen == length + fraggap)
1068 alloclen += rt->dst.trailer_len;
1071 skb = sock_alloc_send_skb(sk,
1072 alloclen + hh_len + 15,
1073 (flags & MSG_DONTWAIT), &err);
1076 if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
1078 skb = alloc_skb(alloclen + hh_len + 15,
1087 * Fill in the control structures
1089 skb->ip_summed = csummode;
1091 skb_reserve(skb, hh_len);
1094 * Find where to start putting bytes.
1096 data = skb_put(skb, fraglen + exthdrlen - pagedlen);
1097 skb_set_network_header(skb, exthdrlen);
1098 skb->transport_header = (skb->network_header +
1100 data += fragheaderlen + exthdrlen;
1103 skb->csum = skb_copy_and_csum_bits(
1104 skb_prev, maxfraglen,
1105 data + transhdrlen, fraggap, 0);
1106 skb_prev->csum = csum_sub(skb_prev->csum,
1109 pskb_trim_unique(skb_prev, maxfraglen);
1112 copy = datalen - transhdrlen - fraggap - pagedlen;
1113 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1120 length -= copy + transhdrlen;
1123 csummode = CHECKSUM_NONE;
1125 /* only the initial fragment is time stamped */
1126 skb_shinfo(skb)->tx_flags = cork->tx_flags;
1128 skb_shinfo(skb)->tskey = tskey;
1130 skb_zcopy_set(skb, uarg, &extra_uref);
1132 if ((flags & MSG_CONFIRM) && !skb_prev)
1133 skb_set_dst_pending_confirm(skb, 1);
1136 * Put the packet on the pending queue.
1138 if (!skb->destructor) {
1139 skb->destructor = sock_wfree;
1141 wmem_alloc_delta += skb->truesize;
1143 __skb_queue_tail(queue, skb);
1150 if (!(rt->dst.dev->features&NETIF_F_SG) &&
1151 skb_tailroom(skb) >= copy) {
1155 if (getfrag(from, skb_put(skb, copy),
1156 offset, copy, off, skb) < 0) {
1157 __skb_trim(skb, off);
1161 } else if (!uarg || !uarg->zerocopy) {
1162 int i = skb_shinfo(skb)->nr_frags;
1165 if (!sk_page_frag_refill(sk, pfrag))
1168 if (!skb_can_coalesce(skb, i, pfrag->page,
1171 if (i == MAX_SKB_FRAGS)
1174 __skb_fill_page_desc(skb, i, pfrag->page,
1176 skb_shinfo(skb)->nr_frags = ++i;
1177 get_page(pfrag->page);
1179 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1181 page_address(pfrag->page) + pfrag->offset,
1182 offset, copy, skb->len, skb) < 0)
1185 pfrag->offset += copy;
1186 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1188 skb->data_len += copy;
1189 skb->truesize += copy;
1190 wmem_alloc_delta += copy;
1192 err = skb_zerocopy_iter_dgram(skb, from, copy);
1200 if (wmem_alloc_delta)
1201 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1208 sock_zerocopy_put_abort(uarg, extra_uref);
1209 cork->length -= length;
1210 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1211 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1215 static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1216 struct ipcm_cookie *ipc, struct rtable **rtp)
1218 struct ip_options_rcu *opt;
1226 * setup for corking.
1231 cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1233 if (unlikely(!cork->opt))
1236 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
1237 cork->flags |= IPCORK_OPT;
1238 cork->addr = ipc->addr;
1242 * We steal reference to this route, caller should not release it
1245 cork->fragsize = ip_sk_use_pmtu(sk) ?
1246 dst_mtu(&rt->dst) : rt->dst.dev->mtu;
1248 cork->gso_size = ipc->gso_size;
1249 cork->dst = &rt->dst;
1251 cork->ttl = ipc->ttl;
1252 cork->tos = ipc->tos;
1253 cork->priority = ipc->priority;
1254 cork->transmit_time = ipc->sockc.transmit_time;
1256 sock_tx_timestamp(sk, ipc->sockc.tsflags, &cork->tx_flags);
1262 * ip_append_data() and ip_append_page() can make one large IP datagram
1263 * from many pieces of data. Each pieces will be holded on the socket
1264 * until ip_push_pending_frames() is called. Each piece can be a page
1267 * Not only UDP, other transport protocols - e.g. raw sockets - can use
1268 * this interface potentially.
1270 * LATER: length must be adjusted by pad at tail, when it is required.
1272 int ip_append_data(struct sock *sk, struct flowi4 *fl4,
1273 int getfrag(void *from, char *to, int offset, int len,
1274 int odd, struct sk_buff *skb),
1275 void *from, int length, int transhdrlen,
1276 struct ipcm_cookie *ipc, struct rtable **rtp,
1279 struct inet_sock *inet = inet_sk(sk);
1282 if (flags&MSG_PROBE)
1285 if (skb_queue_empty(&sk->sk_write_queue)) {
1286 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
1293 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
1294 sk_page_frag(sk), getfrag,
1295 from, length, transhdrlen, flags);
1298 ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1299 int offset, size_t size, int flags)
1301 struct inet_sock *inet = inet_sk(sk);
1302 struct sk_buff *skb;
1304 struct ip_options *opt = NULL;
1305 struct inet_cork *cork;
1310 unsigned int maxfraglen, fragheaderlen, fraggap, maxnonfragsize;
1315 if (flags&MSG_PROBE)
1318 if (skb_queue_empty(&sk->sk_write_queue))
1321 cork = &inet->cork.base;
1322 rt = (struct rtable *)cork->dst;
1323 if (cork->flags & IPCORK_OPT)
1326 if (!(rt->dst.dev->features&NETIF_F_SG))
1329 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1330 mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
1332 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1333 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1334 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
1336 if (cork->length + size > maxnonfragsize - fragheaderlen) {
1337 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
1338 mtu - (opt ? opt->optlen : 0));
1342 skb = skb_peek_tail(&sk->sk_write_queue);
1346 cork->length += size;
1349 /* Check if the remaining data fits into current packet. */
1350 len = mtu - skb->len;
1352 len = maxfraglen - skb->len;
1355 struct sk_buff *skb_prev;
1359 fraggap = skb_prev->len - maxfraglen;
1361 alloclen = fragheaderlen + hh_len + fraggap + 15;
1362 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1363 if (unlikely(!skb)) {
1369 * Fill in the control structures
1371 skb->ip_summed = CHECKSUM_NONE;
1373 skb_reserve(skb, hh_len);
1376 * Find where to start putting bytes.
1378 skb_put(skb, fragheaderlen + fraggap);
1379 skb_reset_network_header(skb);
1380 skb->transport_header = (skb->network_header +
1383 skb->csum = skb_copy_and_csum_bits(skb_prev,
1385 skb_transport_header(skb),
1387 skb_prev->csum = csum_sub(skb_prev->csum,
1389 pskb_trim_unique(skb_prev, maxfraglen);
1393 * Put the packet on the pending queue.
1395 __skb_queue_tail(&sk->sk_write_queue, skb);
1402 if (skb_append_pagefrags(skb, page, offset, len)) {
1407 if (skb->ip_summed == CHECKSUM_NONE) {
1409 csum = csum_page(page, offset, len);
1410 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1414 skb->data_len += len;
1415 skb->truesize += len;
1416 refcount_add(len, &sk->sk_wmem_alloc);
1423 cork->length -= size;
1424 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1428 static void ip_cork_release(struct inet_cork *cork)
1430 cork->flags &= ~IPCORK_OPT;
1433 dst_release(cork->dst);
1438 * Combined all pending IP fragments on the socket as one IP datagram
1439 * and push them out.
1441 struct sk_buff *__ip_make_skb(struct sock *sk,
1443 struct sk_buff_head *queue,
1444 struct inet_cork *cork)
1446 struct sk_buff *skb, *tmp_skb;
1447 struct sk_buff **tail_skb;
1448 struct inet_sock *inet = inet_sk(sk);
1449 struct net *net = sock_net(sk);
1450 struct ip_options *opt = NULL;
1451 struct rtable *rt = (struct rtable *)cork->dst;
1456 skb = __skb_dequeue(queue);
1459 tail_skb = &(skb_shinfo(skb)->frag_list);
1461 /* move skb->data to ip header from ext header */
1462 if (skb->data < skb_network_header(skb))
1463 __skb_pull(skb, skb_network_offset(skb));
1464 while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1465 __skb_pull(tmp_skb, skb_network_header_len(skb));
1466 *tail_skb = tmp_skb;
1467 tail_skb = &(tmp_skb->next);
1468 skb->len += tmp_skb->len;
1469 skb->data_len += tmp_skb->len;
1470 skb->truesize += tmp_skb->truesize;
1471 tmp_skb->destructor = NULL;
1475 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1476 * to fragment the frame generated here. No matter, what transforms
1477 * how transforms change size of the packet, it will come out.
1479 skb->ignore_df = ip_sk_ignore_df(sk);
1481 /* DF bit is set when we want to see DF on outgoing frames.
1482 * If ignore_df is set too, we still allow to fragment this frame
1484 if (inet->pmtudisc == IP_PMTUDISC_DO ||
1485 inet->pmtudisc == IP_PMTUDISC_PROBE ||
1486 (skb->len <= dst_mtu(&rt->dst) &&
1487 ip_dont_fragment(sk, &rt->dst)))
1490 if (cork->flags & IPCORK_OPT)
1495 else if (rt->rt_type == RTN_MULTICAST)
1498 ttl = ip_select_ttl(inet, &rt->dst);
1503 iph->tos = (cork->tos != -1) ? cork->tos : inet->tos;
1506 iph->protocol = sk->sk_protocol;
1507 ip_copy_addrs(iph, fl4);
1508 ip_select_ident(net, skb, sk);
1511 iph->ihl += opt->optlen>>2;
1512 ip_options_build(skb, opt, cork->addr, rt, 0);
1515 skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
1516 skb->mark = sk->sk_mark;
1517 skb->tstamp = cork->transmit_time;
1519 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1523 skb_dst_set(skb, &rt->dst);
1525 if (iph->protocol == IPPROTO_ICMP)
1526 icmp_out_count(net, ((struct icmphdr *)
1527 skb_transport_header(skb))->type);
1529 ip_cork_release(cork);
1534 int ip_send_skb(struct net *net, struct sk_buff *skb)
1538 err = ip_local_out(net, skb->sk, skb);
1541 err = net_xmit_errno(err);
1543 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1549 int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
1551 struct sk_buff *skb;
1553 skb = ip_finish_skb(sk, fl4);
1557 /* Netfilter gets whole the not fragmented skb. */
1558 return ip_send_skb(sock_net(sk), skb);
1562 * Throw away all pending data on the socket.
1564 static void __ip_flush_pending_frames(struct sock *sk,
1565 struct sk_buff_head *queue,
1566 struct inet_cork *cork)
1568 struct sk_buff *skb;
1570 while ((skb = __skb_dequeue_tail(queue)) != NULL)
1573 ip_cork_release(cork);
1576 void ip_flush_pending_frames(struct sock *sk)
1578 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
1581 struct sk_buff *ip_make_skb(struct sock *sk,
1583 int getfrag(void *from, char *to, int offset,
1584 int len, int odd, struct sk_buff *skb),
1585 void *from, int length, int transhdrlen,
1586 struct ipcm_cookie *ipc, struct rtable **rtp,
1587 struct inet_cork *cork, unsigned int flags)
1589 struct sk_buff_head queue;
1592 if (flags & MSG_PROBE)
1595 __skb_queue_head_init(&queue);
1600 err = ip_setup_cork(sk, cork, ipc, rtp);
1602 return ERR_PTR(err);
1604 err = __ip_append_data(sk, fl4, &queue, cork,
1605 ¤t->task_frag, getfrag,
1606 from, length, transhdrlen, flags);
1608 __ip_flush_pending_frames(sk, &queue, cork);
1609 return ERR_PTR(err);
1612 return __ip_make_skb(sk, fl4, &queue, cork);
1616 * Fetch data from kernel space and fill in checksum if needed.
1618 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1619 int len, int odd, struct sk_buff *skb)
1623 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1624 skb->csum = csum_block_add(skb->csum, csum, odd);
1629 * Generic function to send a packet as reply to another packet.
1630 * Used to send some TCP resets/acks so far.
1632 void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
1633 const struct ip_options *sopt,
1634 __be32 daddr, __be32 saddr,
1635 const struct ip_reply_arg *arg,
1638 struct ip_options_data replyopts;
1639 struct ipcm_cookie ipc;
1641 struct rtable *rt = skb_rtable(skb);
1642 struct net *net = sock_net(sk);
1643 struct sk_buff *nskb;
1647 if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt))
1653 if (replyopts.opt.opt.optlen) {
1654 ipc.opt = &replyopts.opt;
1656 if (replyopts.opt.opt.srr)
1657 daddr = replyopts.opt.opt.faddr;
1660 oif = arg->bound_dev_if;
1661 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
1664 flowi4_init_output(&fl4, oif,
1665 IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark,
1667 RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
1668 ip_reply_arg_flowi_flags(arg),
1670 tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
1672 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
1673 rt = ip_route_output_key(net, &fl4);
1677 inet_sk(sk)->tos = arg->tos;
1679 sk->sk_priority = skb->priority;
1680 sk->sk_protocol = ip_hdr(skb)->protocol;
1681 sk->sk_bound_dev_if = arg->bound_dev_if;
1682 sk->sk_sndbuf = sysctl_wmem_default;
1683 sk->sk_mark = fl4.flowi4_mark;
1684 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1685 len, 0, &ipc, &rt, MSG_DONTWAIT);
1686 if (unlikely(err)) {
1687 ip_flush_pending_frames(sk);
1691 nskb = skb_peek(&sk->sk_write_queue);
1693 if (arg->csumoffset >= 0)
1694 *((__sum16 *)skb_transport_header(nskb) +
1695 arg->csumoffset) = csum_fold(csum_add(nskb->csum,
1697 nskb->ip_summed = CHECKSUM_NONE;
1698 ip_push_pending_frames(sk, &fl4);
1704 void __init ip_init(void)
1709 #if defined(CONFIG_IP_MULTICAST)