2 * Checksum updating actions
4 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
19 #include <linux/netlink.h>
20 #include <net/netlink.h>
21 #include <linux/rtnetlink.h>
23 #include <linux/skbuff.h>
28 #include <linux/icmpv6.h>
29 #include <linux/igmp.h>
32 #include <net/ip6_checksum.h>
33 #include <net/sctp/checksum.h>
35 #include <net/act_api.h>
36 #include <net/pkt_cls.h>
38 #include <linux/tc_act/tc_csum.h>
39 #include <net/tc_act/tc_csum.h>
41 static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
42 [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
45 static unsigned int csum_net_id;
46 static struct tc_action_ops act_csum_ops;
48 static int tcf_csum_init(struct net *net, struct nlattr *nla,
49 struct nlattr *est, struct tc_action **a, int ovr,
50 int bind, bool rtnl_held, struct tcf_proto *tp,
51 struct netlink_ext_ack *extack)
53 struct tc_action_net *tn = net_generic(net, csum_net_id);
54 struct tcf_csum_params *params_new;
55 struct nlattr *tb[TCA_CSUM_MAX + 1];
56 struct tcf_chain *goto_ch = NULL;
64 err = nla_parse_nested_deprecated(tb, TCA_CSUM_MAX, nla, csum_policy,
69 if (tb[TCA_CSUM_PARMS] == NULL)
71 parm = nla_data(tb[TCA_CSUM_PARMS]);
73 err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
75 ret = tcf_idr_create(tn, parm->index, est, a,
76 &act_csum_ops, bind, true);
78 tcf_idr_cleanup(tn, parm->index);
83 if (bind)/* dont override defaults */
86 tcf_idr_release(*a, bind);
93 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
99 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
100 if (unlikely(!params_new)) {
104 params_new->update_flags = parm->update_flags;
106 spin_lock_bh(&p->tcf_lock);
107 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
108 rcu_swap_protected(p->params, params_new,
109 lockdep_is_held(&p->tcf_lock));
110 spin_unlock_bh(&p->tcf_lock);
113 tcf_chain_put_by_act(goto_ch);
115 kfree_rcu(params_new, rcu);
117 if (ret == ACT_P_CREATED)
118 tcf_idr_insert(tn, *a);
123 tcf_chain_put_by_act(goto_ch);
125 tcf_idr_release(*a, bind);
130 * tcf_csum_skb_nextlayer - Get next layer pointer
131 * @skb: sk_buff to use
132 * @ihl: previous summed headers length
133 * @ipl: complete packet length
134 * @jhl: next header length
136 * Check the expected next layer availability in the specified sk_buff.
137 * Return the next layer pointer if pass, NULL otherwise.
139 static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
140 unsigned int ihl, unsigned int ipl,
143 int ntkoff = skb_network_offset(skb);
146 if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
147 skb_try_make_writable(skb, hl + ntkoff))
150 return (void *)(skb_network_header(skb) + ihl);
153 static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl,
156 struct icmphdr *icmph;
158 icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
163 skb->csum = csum_partial(icmph, ipl - ihl, 0);
164 icmph->checksum = csum_fold(skb->csum);
166 skb->ip_summed = CHECKSUM_NONE;
171 static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
172 unsigned int ihl, unsigned int ipl)
174 struct igmphdr *igmph;
176 igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
181 skb->csum = csum_partial(igmph, ipl - ihl, 0);
182 igmph->csum = csum_fold(skb->csum);
184 skb->ip_summed = CHECKSUM_NONE;
189 static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl,
192 struct icmp6hdr *icmp6h;
193 const struct ipv6hdr *ip6h;
195 icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
199 ip6h = ipv6_hdr(skb);
200 icmp6h->icmp6_cksum = 0;
201 skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
202 icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
203 ipl - ihl, IPPROTO_ICMPV6,
206 skb->ip_summed = CHECKSUM_NONE;
211 static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl,
215 const struct iphdr *iph;
217 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
220 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
226 skb->csum = csum_partial(tcph, ipl - ihl, 0);
227 tcph->check = tcp_v4_check(ipl - ihl,
228 iph->saddr, iph->daddr, skb->csum);
230 skb->ip_summed = CHECKSUM_NONE;
235 static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl,
239 const struct ipv6hdr *ip6h;
241 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
244 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
248 ip6h = ipv6_hdr(skb);
250 skb->csum = csum_partial(tcph, ipl - ihl, 0);
251 tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
252 ipl - ihl, IPPROTO_TCP,
255 skb->ip_summed = CHECKSUM_NONE;
260 static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
261 unsigned int ipl, int udplite)
264 const struct iphdr *iph;
267 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
271 * Support both UDP and UDPLITE checksum algorithms, Don't use
272 * udph->len to get the real length without any protocol check,
273 * UDPLITE uses udph->len for another thing,
274 * Use iph->tot_len, or just ipl.
277 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
282 ul = ntohs(udph->len);
284 if (udplite || udph->check) {
290 skb->csum = csum_partial(udph, ipl - ihl, 0);
291 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
292 skb->csum = csum_partial(udph, ul, 0);
294 goto ignore_obscure_skb;
297 goto ignore_obscure_skb;
299 skb->csum = csum_partial(udph, ul, 0);
302 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
307 udph->check = CSUM_MANGLED_0;
310 skb->ip_summed = CHECKSUM_NONE;
316 static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
317 unsigned int ipl, int udplite)
320 const struct ipv6hdr *ip6h;
323 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
327 * Support both UDP and UDPLITE checksum algorithms, Don't use
328 * udph->len to get the real length without any protocol check,
329 * UDPLITE uses udph->len for another thing,
330 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
333 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
337 ip6h = ipv6_hdr(skb);
338 ul = ntohs(udph->len);
344 skb->csum = csum_partial(udph, ipl - ihl, 0);
346 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
347 skb->csum = csum_partial(udph, ul, 0);
350 goto ignore_obscure_skb;
353 goto ignore_obscure_skb;
355 skb->csum = csum_partial(udph, ul, 0);
358 udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
359 udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
363 udph->check = CSUM_MANGLED_0;
365 skb->ip_summed = CHECKSUM_NONE;
371 static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl,
374 struct sctphdr *sctph;
376 if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
379 sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph));
383 sctph->checksum = sctp_compute_cksum(skb,
384 skb_network_offset(skb) + ihl);
385 skb->ip_summed = CHECKSUM_NONE;
386 skb->csum_not_inet = 0;
391 static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
393 const struct iphdr *iph;
396 ntkoff = skb_network_offset(skb);
398 if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
403 switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
405 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
406 if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
407 ntohs(iph->tot_len)))
411 if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
412 if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
413 ntohs(iph->tot_len)))
417 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
418 if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4,
419 ntohs(iph->tot_len)))
423 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
424 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
425 ntohs(iph->tot_len), 0))
428 case IPPROTO_UDPLITE:
429 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
430 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
431 ntohs(iph->tot_len), 1))
435 if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
436 !tcf_csum_sctp(skb, iph->ihl * 4, ntohs(iph->tot_len)))
441 if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
442 if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
445 ip_send_check(ip_hdr(skb));
454 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl,
457 int off, len, optlen;
458 unsigned char *xh = (void *)ip6xh;
460 off = sizeof(*ip6xh);
469 optlen = xh[off + 1] + 2;
470 if (optlen != 6 || len < 6 || (off & 3) != 2)
471 /* wrong jumbo option length/alignment */
473 *pl = ntohl(*(__be32 *)(xh + off + 2));
476 optlen = xh[off + 1] + 2;
478 /* ignore obscure options */
490 static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
492 struct ipv6hdr *ip6h;
493 struct ipv6_opt_hdr *ip6xh;
494 unsigned int hl, ixhl;
499 ntkoff = skb_network_offset(skb);
503 if (!pskb_may_pull(skb, hl + ntkoff))
506 ip6h = ipv6_hdr(skb);
508 pl = ntohs(ip6h->payload_len);
509 nexthdr = ip6h->nexthdr;
513 case NEXTHDR_FRAGMENT:
515 case NEXTHDR_ROUTING:
518 if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
520 ip6xh = (void *)(skb_network_header(skb) + hl);
521 ixhl = ipv6_optlen(ip6xh);
522 if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
524 ip6xh = (void *)(skb_network_header(skb) + hl);
525 if ((nexthdr == NEXTHDR_HOP) &&
526 !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
528 nexthdr = ip6xh->nexthdr;
532 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
533 if (!tcf_csum_ipv6_icmp(skb,
534 hl, pl + sizeof(*ip6h)))
538 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
539 if (!tcf_csum_ipv6_tcp(skb,
540 hl, pl + sizeof(*ip6h)))
544 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
545 if (!tcf_csum_ipv6_udp(skb, hl,
546 pl + sizeof(*ip6h), 0))
549 case IPPROTO_UDPLITE:
550 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
551 if (!tcf_csum_ipv6_udp(skb, hl,
552 pl + sizeof(*ip6h), 1))
556 if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
557 !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h)))
563 } while (pskb_may_pull(skb, hl + 1 + ntkoff));
573 static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a,
574 struct tcf_result *res)
576 struct tcf_csum *p = to_tcf_csum(a);
577 bool orig_vlan_tag_present = false;
578 unsigned int vlan_hdr_count = 0;
579 struct tcf_csum_params *params;
584 params = rcu_dereference_bh(p->params);
586 tcf_lastuse_update(&p->tcf_tm);
587 bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb);
589 action = READ_ONCE(p->tcf_action);
590 if (unlikely(action == TC_ACT_SHOT))
593 update_flags = params->update_flags;
594 protocol = tc_skb_protocol(skb);
597 case cpu_to_be16(ETH_P_IP):
598 if (!tcf_csum_ipv4(skb, update_flags))
601 case cpu_to_be16(ETH_P_IPV6):
602 if (!tcf_csum_ipv6(skb, update_flags))
605 case cpu_to_be16(ETH_P_8021AD): /* fall through */
606 case cpu_to_be16(ETH_P_8021Q):
607 if (skb_vlan_tag_present(skb) && !orig_vlan_tag_present) {
608 protocol = skb->protocol;
609 orig_vlan_tag_present = true;
611 struct vlan_hdr *vlan = (struct vlan_hdr *)skb->data;
613 protocol = vlan->h_vlan_encapsulated_proto;
614 skb_pull(skb, VLAN_HLEN);
615 skb_reset_network_header(skb);
622 /* Restore the skb for the pulled VLAN tags */
623 while (vlan_hdr_count--) {
624 skb_push(skb, VLAN_HLEN);
625 skb_reset_network_header(skb);
631 qstats_drop_inc(this_cpu_ptr(p->common.cpu_qstats));
632 action = TC_ACT_SHOT;
636 static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
639 unsigned char *b = skb_tail_pointer(skb);
640 struct tcf_csum *p = to_tcf_csum(a);
641 struct tcf_csum_params *params;
642 struct tc_csum opt = {
643 .index = p->tcf_index,
644 .refcnt = refcount_read(&p->tcf_refcnt) - ref,
645 .bindcnt = atomic_read(&p->tcf_bindcnt) - bind,
649 spin_lock_bh(&p->tcf_lock);
650 params = rcu_dereference_protected(p->params,
651 lockdep_is_held(&p->tcf_lock));
652 opt.action = p->tcf_action;
653 opt.update_flags = params->update_flags;
655 if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
656 goto nla_put_failure;
658 tcf_tm_dump(&t, &p->tcf_tm);
659 if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
660 goto nla_put_failure;
661 spin_unlock_bh(&p->tcf_lock);
666 spin_unlock_bh(&p->tcf_lock);
671 static void tcf_csum_cleanup(struct tc_action *a)
673 struct tcf_csum *p = to_tcf_csum(a);
674 struct tcf_csum_params *params;
676 params = rcu_dereference_protected(p->params, 1);
678 kfree_rcu(params, rcu);
681 static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
682 struct netlink_callback *cb, int type,
683 const struct tc_action_ops *ops,
684 struct netlink_ext_ack *extack)
686 struct tc_action_net *tn = net_generic(net, csum_net_id);
688 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
691 static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index)
693 struct tc_action_net *tn = net_generic(net, csum_net_id);
695 return tcf_idr_search(tn, a, index);
698 static size_t tcf_csum_get_fill_size(const struct tc_action *act)
700 return nla_total_size(sizeof(struct tc_csum));
703 static struct tc_action_ops act_csum_ops = {
706 .owner = THIS_MODULE,
708 .dump = tcf_csum_dump,
709 .init = tcf_csum_init,
710 .cleanup = tcf_csum_cleanup,
711 .walk = tcf_csum_walker,
712 .lookup = tcf_csum_search,
713 .get_fill_size = tcf_csum_get_fill_size,
714 .size = sizeof(struct tcf_csum),
717 static __net_init int csum_init_net(struct net *net)
719 struct tc_action_net *tn = net_generic(net, csum_net_id);
721 return tc_action_net_init(tn, &act_csum_ops);
724 static void __net_exit csum_exit_net(struct list_head *net_list)
726 tc_action_net_exit(net_list, csum_net_id);
729 static struct pernet_operations csum_net_ops = {
730 .init = csum_init_net,
731 .exit_batch = csum_exit_net,
733 .size = sizeof(struct tc_action_net),
736 MODULE_DESCRIPTION("Checksum updating actions");
737 MODULE_LICENSE("GPL");
739 static int __init csum_init_module(void)
741 return tcf_register_action(&act_csum_ops, &csum_net_ops);
744 static void __exit csum_cleanup_module(void)
746 tcf_unregister_action(&act_csum_ops, &csum_net_ops);
749 module_init(csum_init_module);
750 module_exit(csum_cleanup_module);