1 // SPDX-License-Identifier: GPL-2.0
6 * YOSHIFUJI Hideaki @USAGI
7 * Split up af-specific portion
11 #include <linux/bottom_half.h>
12 #include <linux/cache.h>
13 #include <linux/interrupt.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/netdevice.h>
17 #include <linux/percpu.h>
21 #include <net/ip_tunnels.h>
22 #include <net/ip6_tunnel.h>
24 #include "xfrm_inout.h"
26 struct xfrm_trans_tasklet {
27 struct tasklet_struct tasklet;
28 struct sk_buff_head queue;
31 struct xfrm_trans_cb {
33 struct inet_skb_parm h4;
34 #if IS_ENABLED(CONFIG_IPV6)
35 struct inet6_skb_parm h6;
38 int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb);
42 #define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0]))
44 static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
45 static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[AF_INET6 + 1];
47 static struct gro_cells gro_cells;
48 static struct net_device xfrm_napi_dev;
50 static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet);
52 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo)
56 if (WARN_ON(afinfo->family >= ARRAY_SIZE(xfrm_input_afinfo)))
59 spin_lock_bh(&xfrm_input_afinfo_lock);
60 if (unlikely(xfrm_input_afinfo[afinfo->family] != NULL))
63 rcu_assign_pointer(xfrm_input_afinfo[afinfo->family], afinfo);
64 spin_unlock_bh(&xfrm_input_afinfo_lock);
67 EXPORT_SYMBOL(xfrm_input_register_afinfo);
69 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo)
73 spin_lock_bh(&xfrm_input_afinfo_lock);
74 if (likely(xfrm_input_afinfo[afinfo->family] != NULL)) {
75 if (unlikely(xfrm_input_afinfo[afinfo->family] != afinfo))
78 RCU_INIT_POINTER(xfrm_input_afinfo[afinfo->family], NULL);
80 spin_unlock_bh(&xfrm_input_afinfo_lock);
84 EXPORT_SYMBOL(xfrm_input_unregister_afinfo);
86 static const struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family)
88 const struct xfrm_input_afinfo *afinfo;
90 if (WARN_ON_ONCE(family >= ARRAY_SIZE(xfrm_input_afinfo)))
94 afinfo = rcu_dereference(xfrm_input_afinfo[family]);
95 if (unlikely(!afinfo))
100 static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol,
104 const struct xfrm_input_afinfo *afinfo = xfrm_input_get_afinfo(family);
107 return -EAFNOSUPPORT;
109 ret = afinfo->callback(skb, protocol, err);
115 struct sec_path *secpath_set(struct sk_buff *skb)
117 struct sec_path *sp, *tmp = skb_ext_find(skb, SKB_EXT_SEC_PATH);
119 sp = skb_ext_add(skb, SKB_EXT_SEC_PATH);
123 if (tmp) /* reused existing one (was COW'd if needed) */
126 /* allocated new secpath */
127 memset(sp->ovec, 0, sizeof(sp->ovec));
133 EXPORT_SYMBOL(secpath_set);
135 /* Fetch spi and seq from ipsec header */
137 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
139 int offset, offset_seq;
144 hlen = sizeof(struct ip_auth_hdr);
145 offset = offsetof(struct ip_auth_hdr, spi);
146 offset_seq = offsetof(struct ip_auth_hdr, seq_no);
149 hlen = sizeof(struct ip_esp_hdr);
150 offset = offsetof(struct ip_esp_hdr, spi);
151 offset_seq = offsetof(struct ip_esp_hdr, seq_no);
154 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
156 *spi = htonl(ntohs(*(__be16 *)(skb_transport_header(skb) + 2)));
163 if (!pskb_may_pull(skb, hlen))
166 *spi = *(__be32 *)(skb_transport_header(skb) + offset);
167 *seq = *(__be32 *)(skb_transport_header(skb) + offset_seq);
170 EXPORT_SYMBOL(xfrm_parse_spi);
172 static int xfrm4_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb)
178 if (unlikely(XFRM_MODE_SKB_CB(skb)->protocol == IPPROTO_BEETPH)) {
179 struct ip_beet_phdr *ph;
182 if (!pskb_may_pull(skb, sizeof(*ph)))
185 ph = (struct ip_beet_phdr *)skb->data;
187 phlen = sizeof(*ph) + ph->padlen;
188 optlen = ph->hdrlen * 8 + (IPV4_BEET_PHMAXLEN - phlen);
189 if (optlen < 0 || optlen & 3 || optlen > 250)
192 XFRM_MODE_SKB_CB(skb)->protocol = ph->nexthdr;
194 if (!pskb_may_pull(skb, phlen))
196 __skb_pull(skb, phlen);
199 skb_push(skb, sizeof(*iph));
200 skb_reset_network_header(skb);
201 skb_mac_header_rebuild(skb);
203 xfrm4_beet_make_header(skb);
207 iph->ihl += optlen / 4;
208 iph->tot_len = htons(skb->len);
209 iph->daddr = x->sel.daddr.a4;
210 iph->saddr = x->sel.saddr.a4;
212 iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl);
218 static void ipip_ecn_decapsulate(struct sk_buff *skb)
220 struct iphdr *inner_iph = ipip_hdr(skb);
222 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
223 IP_ECN_set_ce(inner_iph);
226 static int xfrm4_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
230 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
233 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
236 err = skb_unclone(skb, GFP_ATOMIC);
240 if (x->props.flags & XFRM_STATE_DECAP_DSCP)
241 ipv4_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipip_hdr(skb));
242 if (!(x->props.flags & XFRM_STATE_NOECN))
243 ipip_ecn_decapsulate(skb);
245 skb_reset_network_header(skb);
246 skb_mac_header_rebuild(skb);
248 eth_hdr(skb)->h_proto = skb->protocol;
256 static void ipip6_ecn_decapsulate(struct sk_buff *skb)
258 struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
260 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
261 IP6_ECN_set_ce(skb, inner_iph);
264 static int xfrm6_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
268 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6)
270 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
273 err = skb_unclone(skb, GFP_ATOMIC);
277 if (x->props.flags & XFRM_STATE_DECAP_DSCP)
278 ipv6_copy_dscp(ipv6_get_dsfield(ipv6_hdr(skb)),
280 if (!(x->props.flags & XFRM_STATE_NOECN))
281 ipip6_ecn_decapsulate(skb);
283 skb_reset_network_header(skb);
284 skb_mac_header_rebuild(skb);
286 eth_hdr(skb)->h_proto = skb->protocol;
294 static int xfrm6_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb)
296 struct ipv6hdr *ip6h;
297 int size = sizeof(struct ipv6hdr);
300 err = skb_cow_head(skb, size + skb->mac_len);
304 __skb_push(skb, size);
305 skb_reset_network_header(skb);
306 skb_mac_header_rebuild(skb);
308 xfrm6_beet_make_header(skb);
310 ip6h = ipv6_hdr(skb);
311 ip6h->payload_len = htons(skb->len - size);
312 ip6h->daddr = x->sel.daddr.in6;
313 ip6h->saddr = x->sel.saddr.in6;
319 /* Remove encapsulation header.
321 * The IP header will be moved over the top of the encapsulation
324 * On entry, the transport header shall point to where the IP header
325 * should be and the network header shall be set to where the IP
326 * header currently is. skb->data shall point to the start of the
330 xfrm_inner_mode_encap_remove(struct xfrm_state *x,
331 const struct xfrm_mode *inner_mode,
334 switch (inner_mode->encap) {
336 if (inner_mode->family == AF_INET)
337 return xfrm4_remove_beet_encap(x, skb);
338 if (inner_mode->family == AF_INET6)
339 return xfrm6_remove_beet_encap(x, skb);
341 case XFRM_MODE_TUNNEL:
342 if (inner_mode->family == AF_INET)
343 return xfrm4_remove_tunnel_encap(x, skb);
344 if (inner_mode->family == AF_INET6)
345 return xfrm6_remove_tunnel_encap(x, skb);
353 static int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
355 const struct xfrm_mode *inner_mode = &x->inner_mode;
356 const struct xfrm_state_afinfo *afinfo;
357 int err = -EAFNOSUPPORT;
360 afinfo = xfrm_state_afinfo_get_rcu(x->outer_mode.family);
362 err = afinfo->extract_input(x, skb);
368 if (x->sel.family == AF_UNSPEC) {
369 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
371 return -EAFNOSUPPORT;
374 switch (inner_mode->family) {
376 skb->protocol = htons(ETH_P_IP);
379 skb->protocol = htons(ETH_P_IPV6);
386 return xfrm_inner_mode_encap_remove(x, inner_mode, skb);
389 /* Remove encapsulation header.
391 * The IP header will be moved over the top of the encapsulation header.
393 * On entry, skb_transport_header() shall point to where the IP header
394 * should be and skb_network_header() shall be set to where the IP header
395 * currently is. skb->data shall point to the start of the payload.
397 static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
399 int ihl = skb->data - skb_transport_header(skb);
401 if (skb->transport_header != skb->network_header) {
402 memmove(skb_transport_header(skb),
403 skb_network_header(skb), ihl);
404 skb->network_header = skb->transport_header;
406 ip_hdr(skb)->tot_len = htons(skb->len + ihl);
407 skb_reset_transport_header(skb);
411 static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
413 #if IS_ENABLED(CONFIG_IPV6)
414 int ihl = skb->data - skb_transport_header(skb);
416 if (skb->transport_header != skb->network_header) {
417 memmove(skb_transport_header(skb),
418 skb_network_header(skb), ihl);
419 skb->network_header = skb->transport_header;
421 ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
422 sizeof(struct ipv6hdr));
423 skb_reset_transport_header(skb);
427 return -EAFNOSUPPORT;
431 static int xfrm_inner_mode_input(struct xfrm_state *x,
432 const struct xfrm_mode *inner_mode,
435 switch (inner_mode->encap) {
437 case XFRM_MODE_TUNNEL:
438 return xfrm_prepare_input(x, skb);
439 case XFRM_MODE_TRANSPORT:
440 if (inner_mode->family == AF_INET)
441 return xfrm4_transport_input(x, skb);
442 if (inner_mode->family == AF_INET6)
443 return xfrm6_transport_input(x, skb);
445 case XFRM_MODE_ROUTEOPTIMIZATION:
456 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
458 const struct xfrm_state_afinfo *afinfo;
459 struct net *net = dev_net(skb->dev);
460 const struct xfrm_mode *inner_mode;
464 struct xfrm_state *x = NULL;
465 xfrm_address_t *daddr;
466 u32 mark = skb->mark;
467 unsigned int family = AF_UNSPEC;
470 bool xfrm_gro = false;
471 bool crypto_done = false;
472 struct xfrm_offload *xo = xfrm_offload(skb);
475 if (encap_type < 0) {
476 x = xfrm_input_state(skb);
478 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
479 if (x->km.state == XFRM_STATE_ACQ)
480 XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
483 LINUX_MIB_XFRMINSTATEINVALID);
485 if (encap_type == -1)
490 family = x->outer_mode.family;
492 /* An encap_type of -1 indicates async resumption. */
493 if (encap_type == -1) {
495 seq = XFRM_SKB_CB(skb)->seq.input.low;
499 /* encap_type < -1 indicates a GRO call. */
501 seq = XFRM_SPI_SKB_CB(skb)->seq;
503 if (xo && (xo->flags & CRYPTO_DONE)) {
505 family = XFRM_SPI_SKB_CB(skb)->family;
507 if (!(xo->status & CRYPTO_SUCCESS)) {
509 (CRYPTO_TRANSPORT_AH_AUTH_FAILED |
510 CRYPTO_TRANSPORT_ESP_AUTH_FAILED |
511 CRYPTO_TUNNEL_AH_AUTH_FAILED |
512 CRYPTO_TUNNEL_ESP_AUTH_FAILED)) {
514 xfrm_audit_state_icvfail(x, skb,
516 x->stats.integrity_failed++;
517 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
521 if (xo->status & CRYPTO_INVALID_PROTOCOL) {
522 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
526 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
530 if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
531 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
539 family = XFRM_SPI_SKB_CB(skb)->family;
541 /* if tunnel is present override skb->mark value with tunnel i_key */
544 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
545 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
548 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
549 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
553 sp = secpath_set(skb);
555 XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
560 if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
562 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
566 daddr = (xfrm_address_t *)(skb_network_header(skb) +
567 XFRM_SPI_SKB_CB(skb)->daddroff);
569 sp = skb_sec_path(skb);
571 if (sp->len == XFRM_MAX_DEPTH) {
573 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
577 x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family);
580 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
581 xfrm_audit_state_notfound(skb, family, spi, seq);
585 skb->mark = xfrm_smark_get(skb->mark, x);
587 sp->xvec[sp->len++] = x;
591 XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
598 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
599 if (x->km.state == XFRM_STATE_ACQ)
600 XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
603 LINUX_MIB_XFRMINSTATEINVALID);
607 if ((x->encap ? x->encap->encap_type : 0) != encap_type) {
608 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
612 if (x->repl->check(x, skb, seq)) {
613 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
617 if (xfrm_state_check_expire(x)) {
618 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED);
622 spin_unlock(&x->lock);
624 if (xfrm_tunnel_check(skb, x, family)) {
625 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
629 seq_hi = htonl(xfrm_replay_seqhi(x, seq));
631 XFRM_SKB_CB(skb)->seq.input.low = seq;
632 XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
637 nexthdr = x->type_offload->input_tail(x, skb);
639 nexthdr = x->type->input(x, skb);
641 if (nexthdr == -EINPROGRESS)
648 if (nexthdr == -EBADMSG) {
649 xfrm_audit_state_icvfail(x, skb,
651 x->stats.integrity_failed++;
653 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
657 /* only the first xfrm gets the encap type */
660 if (async && x->repl->recheck(x, skb, seq)) {
661 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
665 x->repl->advance(x, seq);
667 x->curlft.bytes += skb->len;
670 spin_unlock(&x->lock);
672 XFRM_MODE_SKB_CB(skb)->protocol = nexthdr;
674 inner_mode = &x->inner_mode;
676 if (x->sel.family == AF_UNSPEC) {
677 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
678 if (inner_mode == NULL) {
679 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
684 if (xfrm_inner_mode_input(x, inner_mode, skb)) {
685 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
689 if (x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL) {
695 * We need the inner address. However, we only get here for
696 * transport mode so the outer address is identical.
698 daddr = &x->id.daddr;
699 family = x->outer_mode.family;
701 err = xfrm_parse_spi(skb, nexthdr, &spi, &seq);
703 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
709 err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
716 sp = skb_sec_path(skb);
720 gro_cells_receive(&gro_cells, skb);
723 xo = xfrm_offload(skb);
725 xfrm_gro = xo->flags & XFRM_GRO;
729 afinfo = xfrm_state_afinfo_get_rcu(x->inner_mode.family);
731 err = afinfo->transport_finish(skb, xfrm_gro || async);
734 sp = skb_sec_path(skb);
738 gro_cells_receive(&gro_cells, skb);
746 spin_unlock(&x->lock);
748 xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1);
752 EXPORT_SYMBOL(xfrm_input);
754 int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
756 return xfrm_input(skb, nexthdr, 0, -1);
758 EXPORT_SYMBOL(xfrm_input_resume);
760 static void xfrm_trans_reinject(unsigned long data)
762 struct xfrm_trans_tasklet *trans = (void *)data;
763 struct sk_buff_head queue;
766 __skb_queue_head_init(&queue);
767 skb_queue_splice_init(&trans->queue, &queue);
769 while ((skb = __skb_dequeue(&queue)))
770 XFRM_TRANS_SKB_CB(skb)->finish(XFRM_TRANS_SKB_CB(skb)->net,
774 int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
775 int (*finish)(struct net *, struct sock *,
778 struct xfrm_trans_tasklet *trans;
780 trans = this_cpu_ptr(&xfrm_trans_tasklet);
782 if (skb_queue_len(&trans->queue) >= netdev_max_backlog)
785 BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb));
787 XFRM_TRANS_SKB_CB(skb)->finish = finish;
788 XFRM_TRANS_SKB_CB(skb)->net = net;
789 __skb_queue_tail(&trans->queue, skb);
790 tasklet_schedule(&trans->tasklet);
793 EXPORT_SYMBOL(xfrm_trans_queue_net);
795 int xfrm_trans_queue(struct sk_buff *skb,
796 int (*finish)(struct net *, struct sock *,
799 return xfrm_trans_queue_net(dev_net(skb->dev), skb, finish);
801 EXPORT_SYMBOL(xfrm_trans_queue);
803 void __init xfrm_input_init(void)
808 init_dummy_netdev(&xfrm_napi_dev);
809 err = gro_cells_init(&gro_cells, &xfrm_napi_dev);
811 gro_cells.cells = NULL;
813 for_each_possible_cpu(i) {
814 struct xfrm_trans_tasklet *trans;
816 trans = &per_cpu(xfrm_trans_tasklet, i);
817 __skb_queue_head_init(&trans->queue);
818 tasklet_init(&trans->tasklet, xfrm_trans_reinject,
819 (unsigned long)trans);