6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * Kazunori MIYAZAWA @USAGI
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/kmod.h>
19 #include <linux/list.h>
20 #include <linux/spinlock.h>
21 #include <linux/workqueue.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/module.h>
26 #include <linux/cache.h>
27 #include <linux/cpu.h>
28 #include <linux/audit.h>
33 #ifdef CONFIG_XFRM_STATISTICS
37 #include "xfrm_hash.h"
39 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
40 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
41 #define XFRM_MAX_QUEUE_LEN 100
44 struct dst_entry *dst_orig;
48 static DEFINE_PER_CPU(struct xfrm_dst *, xfrm_last_dst);
49 static struct work_struct *xfrm_pcpu_work __read_mostly;
50 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
51 static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
54 static struct kmem_cache *xfrm_dst_cache __read_mostly;
55 static __read_mostly seqcount_t xfrm_policy_hash_generation;
57 static void xfrm_init_pmtu(struct dst_entry *dst);
58 static int stale_bundle(struct dst_entry *dst);
59 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
60 static void xfrm_policy_queue_process(struct timer_list *t);
62 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
63 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
66 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
68 return refcount_inc_not_zero(&policy->refcnt);
72 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
74 const struct flowi4 *fl4 = &fl->u.ip4;
76 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
77 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
78 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
79 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
80 (fl4->flowi4_proto == sel->proto || !sel->proto) &&
81 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
85 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
87 const struct flowi6 *fl6 = &fl->u.ip6;
89 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
90 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
91 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
92 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
93 (fl6->flowi6_proto == sel->proto || !sel->proto) &&
94 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
97 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
98 unsigned short family)
102 return __xfrm4_selector_match(sel, fl);
104 return __xfrm6_selector_match(sel, fl);
109 static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
111 const struct xfrm_policy_afinfo *afinfo;
113 if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
116 afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
117 if (unlikely(!afinfo))
122 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
123 const xfrm_address_t *saddr,
124 const xfrm_address_t *daddr,
125 int family, u32 mark)
127 const struct xfrm_policy_afinfo *afinfo;
128 struct dst_entry *dst;
130 afinfo = xfrm_policy_get_afinfo(family);
131 if (unlikely(afinfo == NULL))
132 return ERR_PTR(-EAFNOSUPPORT);
134 dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
140 EXPORT_SYMBOL(__xfrm_dst_lookup);
142 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
144 xfrm_address_t *prev_saddr,
145 xfrm_address_t *prev_daddr,
146 int family, u32 mark)
148 struct net *net = xs_net(x);
149 xfrm_address_t *saddr = &x->props.saddr;
150 xfrm_address_t *daddr = &x->id.daddr;
151 struct dst_entry *dst;
153 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
157 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
162 dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
165 if (prev_saddr != saddr)
166 memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
167 if (prev_daddr != daddr)
168 memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
174 static inline unsigned long make_jiffies(long secs)
176 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
177 return MAX_SCHEDULE_TIMEOUT-1;
182 static void xfrm_policy_timer(struct timer_list *t)
184 struct xfrm_policy *xp = from_timer(xp, t, timer);
185 unsigned long now = get_seconds();
186 long next = LONG_MAX;
190 read_lock(&xp->lock);
192 if (unlikely(xp->walk.dead))
195 dir = xfrm_policy_id2dir(xp->index);
197 if (xp->lft.hard_add_expires_seconds) {
198 long tmo = xp->lft.hard_add_expires_seconds +
199 xp->curlft.add_time - now;
205 if (xp->lft.hard_use_expires_seconds) {
206 long tmo = xp->lft.hard_use_expires_seconds +
207 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
213 if (xp->lft.soft_add_expires_seconds) {
214 long tmo = xp->lft.soft_add_expires_seconds +
215 xp->curlft.add_time - now;
218 tmo = XFRM_KM_TIMEOUT;
223 if (xp->lft.soft_use_expires_seconds) {
224 long tmo = xp->lft.soft_use_expires_seconds +
225 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
228 tmo = XFRM_KM_TIMEOUT;
235 km_policy_expired(xp, dir, 0, 0);
236 if (next != LONG_MAX &&
237 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
241 read_unlock(&xp->lock);
246 read_unlock(&xp->lock);
247 if (!xfrm_policy_delete(xp, dir))
248 km_policy_expired(xp, dir, 1, 0);
252 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
256 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
258 struct xfrm_policy *policy;
260 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
263 write_pnet(&policy->xp_net, net);
264 INIT_LIST_HEAD(&policy->walk.all);
265 INIT_HLIST_NODE(&policy->bydst);
266 INIT_HLIST_NODE(&policy->byidx);
267 rwlock_init(&policy->lock);
268 refcount_set(&policy->refcnt, 1);
269 skb_queue_head_init(&policy->polq.hold_queue);
270 timer_setup(&policy->timer, xfrm_policy_timer, 0);
271 timer_setup(&policy->polq.hold_timer,
272 xfrm_policy_queue_process, 0);
276 EXPORT_SYMBOL(xfrm_policy_alloc);
278 static void xfrm_policy_destroy_rcu(struct rcu_head *head)
280 struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
282 security_xfrm_policy_free(policy->security);
286 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
288 void xfrm_policy_destroy(struct xfrm_policy *policy)
290 BUG_ON(!policy->walk.dead);
292 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
295 call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
297 EXPORT_SYMBOL(xfrm_policy_destroy);
299 /* Rule must be locked. Release descendant resources, announce
300 * entry dead. The rule must be unlinked from lists to the moment.
303 static void xfrm_policy_kill(struct xfrm_policy *policy)
305 policy->walk.dead = 1;
307 atomic_inc(&policy->genid);
309 if (del_timer(&policy->polq.hold_timer))
310 xfrm_pol_put(policy);
311 skb_queue_purge(&policy->polq.hold_queue);
313 if (del_timer(&policy->timer))
314 xfrm_pol_put(policy);
316 xfrm_pol_put(policy);
319 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
321 static inline unsigned int idx_hash(struct net *net, u32 index)
323 return __idx_hash(index, net->xfrm.policy_idx_hmask);
326 /* calculate policy hash thresholds */
327 static void __get_hash_thresh(struct net *net,
328 unsigned short family, int dir,
329 u8 *dbits, u8 *sbits)
333 *dbits = net->xfrm.policy_bydst[dir].dbits4;
334 *sbits = net->xfrm.policy_bydst[dir].sbits4;
338 *dbits = net->xfrm.policy_bydst[dir].dbits6;
339 *sbits = net->xfrm.policy_bydst[dir].sbits6;
348 static struct hlist_head *policy_hash_bysel(struct net *net,
349 const struct xfrm_selector *sel,
350 unsigned short family, int dir)
352 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
357 __get_hash_thresh(net, family, dir, &dbits, &sbits);
358 hash = __sel_hash(sel, family, hmask, dbits, sbits);
360 if (hash == hmask + 1)
361 return &net->xfrm.policy_inexact[dir];
363 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
364 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
367 static struct hlist_head *policy_hash_direct(struct net *net,
368 const xfrm_address_t *daddr,
369 const xfrm_address_t *saddr,
370 unsigned short family, int dir)
372 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
377 __get_hash_thresh(net, family, dir, &dbits, &sbits);
378 hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
380 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
381 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
384 static void xfrm_dst_hash_transfer(struct net *net,
385 struct hlist_head *list,
386 struct hlist_head *ndsttable,
387 unsigned int nhashmask,
390 struct hlist_node *tmp, *entry0 = NULL;
391 struct xfrm_policy *pol;
397 hlist_for_each_entry_safe(pol, tmp, list, bydst) {
400 __get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
401 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
402 pol->family, nhashmask, dbits, sbits);
404 hlist_del_rcu(&pol->bydst);
405 hlist_add_head_rcu(&pol->bydst, ndsttable + h);
410 hlist_del_rcu(&pol->bydst);
411 hlist_add_behind_rcu(&pol->bydst, entry0);
413 entry0 = &pol->bydst;
415 if (!hlist_empty(list)) {
421 static void xfrm_idx_hash_transfer(struct hlist_head *list,
422 struct hlist_head *nidxtable,
423 unsigned int nhashmask)
425 struct hlist_node *tmp;
426 struct xfrm_policy *pol;
428 hlist_for_each_entry_safe(pol, tmp, list, byidx) {
431 h = __idx_hash(pol->index, nhashmask);
432 hlist_add_head(&pol->byidx, nidxtable+h);
436 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
438 return ((old_hmask + 1) << 1) - 1;
441 static void xfrm_bydst_resize(struct net *net, int dir)
443 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
444 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
445 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
446 struct hlist_head *ndst = xfrm_hash_alloc(nsize);
447 struct hlist_head *odst;
453 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
454 write_seqcount_begin(&xfrm_policy_hash_generation);
456 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
457 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
459 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
460 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
462 for (i = hmask; i >= 0; i--)
463 xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
465 rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
466 net->xfrm.policy_bydst[dir].hmask = nhashmask;
468 write_seqcount_end(&xfrm_policy_hash_generation);
469 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
473 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
476 static void xfrm_byidx_resize(struct net *net, int total)
478 unsigned int hmask = net->xfrm.policy_idx_hmask;
479 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
480 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
481 struct hlist_head *oidx = net->xfrm.policy_byidx;
482 struct hlist_head *nidx = xfrm_hash_alloc(nsize);
488 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
490 for (i = hmask; i >= 0; i--)
491 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
493 net->xfrm.policy_byidx = nidx;
494 net->xfrm.policy_idx_hmask = nhashmask;
496 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
498 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
501 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
503 unsigned int cnt = net->xfrm.policy_count[dir];
504 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
509 if ((hmask + 1) < xfrm_policy_hashmax &&
516 static inline int xfrm_byidx_should_resize(struct net *net, int total)
518 unsigned int hmask = net->xfrm.policy_idx_hmask;
520 if ((hmask + 1) < xfrm_policy_hashmax &&
527 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
529 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
530 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
531 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
532 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
533 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
534 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
535 si->spdhcnt = net->xfrm.policy_idx_hmask;
536 si->spdhmcnt = xfrm_policy_hashmax;
538 EXPORT_SYMBOL(xfrm_spd_getinfo);
540 static DEFINE_MUTEX(hash_resize_mutex);
541 static void xfrm_hash_resize(struct work_struct *work)
543 struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
546 mutex_lock(&hash_resize_mutex);
549 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
550 if (xfrm_bydst_should_resize(net, dir, &total))
551 xfrm_bydst_resize(net, dir);
553 if (xfrm_byidx_should_resize(net, total))
554 xfrm_byidx_resize(net, total);
556 mutex_unlock(&hash_resize_mutex);
559 static void xfrm_hash_rebuild(struct work_struct *work)
561 struct net *net = container_of(work, struct net,
562 xfrm.policy_hthresh.work);
564 struct xfrm_policy *pol;
565 struct xfrm_policy *policy;
566 struct hlist_head *chain;
567 struct hlist_head *odst;
568 struct hlist_node *newpos;
572 u8 lbits4, rbits4, lbits6, rbits6;
574 mutex_lock(&hash_resize_mutex);
576 /* read selector prefixlen thresholds */
578 seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
580 lbits4 = net->xfrm.policy_hthresh.lbits4;
581 rbits4 = net->xfrm.policy_hthresh.rbits4;
582 lbits6 = net->xfrm.policy_hthresh.lbits6;
583 rbits6 = net->xfrm.policy_hthresh.rbits6;
584 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
586 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
588 /* reset the bydst and inexact table in all directions */
589 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
590 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
591 hmask = net->xfrm.policy_bydst[dir].hmask;
592 odst = net->xfrm.policy_bydst[dir].table;
593 for (i = hmask; i >= 0; i--)
594 INIT_HLIST_HEAD(odst + i);
595 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
596 /* dir out => dst = remote, src = local */
597 net->xfrm.policy_bydst[dir].dbits4 = rbits4;
598 net->xfrm.policy_bydst[dir].sbits4 = lbits4;
599 net->xfrm.policy_bydst[dir].dbits6 = rbits6;
600 net->xfrm.policy_bydst[dir].sbits6 = lbits6;
602 /* dir in/fwd => dst = local, src = remote */
603 net->xfrm.policy_bydst[dir].dbits4 = lbits4;
604 net->xfrm.policy_bydst[dir].sbits4 = rbits4;
605 net->xfrm.policy_bydst[dir].dbits6 = lbits6;
606 net->xfrm.policy_bydst[dir].sbits6 = rbits6;
610 /* re-insert all policies by order of creation */
611 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
612 if (xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
613 /* skip socket policies */
617 chain = policy_hash_bysel(net, &policy->selector,
619 xfrm_policy_id2dir(policy->index));
620 hlist_for_each_entry(pol, chain, bydst) {
621 if (policy->priority >= pol->priority)
622 newpos = &pol->bydst;
627 hlist_add_behind(&policy->bydst, newpos);
629 hlist_add_head(&policy->bydst, chain);
632 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
634 mutex_unlock(&hash_resize_mutex);
637 void xfrm_policy_hash_rebuild(struct net *net)
639 schedule_work(&net->xfrm.policy_hthresh.work);
641 EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
643 /* Generate new index... KAME seems to generate them ordered by cost
644 * of an absolute inpredictability of ordering of rules. This will not pass. */
645 static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
647 static u32 idx_generator;
650 struct hlist_head *list;
651 struct xfrm_policy *p;
656 idx = (idx_generator | dir);
665 list = net->xfrm.policy_byidx + idx_hash(net, idx);
667 hlist_for_each_entry(p, list, byidx) {
668 if (p->index == idx) {
678 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
680 u32 *p1 = (u32 *) s1;
681 u32 *p2 = (u32 *) s2;
682 int len = sizeof(struct xfrm_selector) / sizeof(u32);
685 for (i = 0; i < len; i++) {
693 static void xfrm_policy_requeue(struct xfrm_policy *old,
694 struct xfrm_policy *new)
696 struct xfrm_policy_queue *pq = &old->polq;
697 struct sk_buff_head list;
699 if (skb_queue_empty(&pq->hold_queue))
702 __skb_queue_head_init(&list);
704 spin_lock_bh(&pq->hold_queue.lock);
705 skb_queue_splice_init(&pq->hold_queue, &list);
706 if (del_timer(&pq->hold_timer))
708 spin_unlock_bh(&pq->hold_queue.lock);
712 spin_lock_bh(&pq->hold_queue.lock);
713 skb_queue_splice(&list, &pq->hold_queue);
714 pq->timeout = XFRM_QUEUE_TMO_MIN;
715 if (!mod_timer(&pq->hold_timer, jiffies))
717 spin_unlock_bh(&pq->hold_queue.lock);
720 static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
721 struct xfrm_policy *pol)
723 u32 mark = policy->mark.v & policy->mark.m;
725 if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
728 if ((mark & pol->mark.m) == pol->mark.v &&
729 policy->priority == pol->priority)
735 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
737 struct net *net = xp_net(policy);
738 struct xfrm_policy *pol;
739 struct xfrm_policy *delpol;
740 struct hlist_head *chain;
741 struct hlist_node *newpos;
743 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
744 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
747 hlist_for_each_entry(pol, chain, bydst) {
748 if (pol->type == policy->type &&
749 !selector_cmp(&pol->selector, &policy->selector) &&
750 xfrm_policy_mark_match(policy, pol) &&
751 xfrm_sec_ctx_match(pol->security, policy->security) &&
754 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
758 if (policy->priority > pol->priority)
760 } else if (policy->priority >= pol->priority) {
761 newpos = &pol->bydst;
768 hlist_add_behind(&policy->bydst, newpos);
770 hlist_add_head(&policy->bydst, chain);
771 __xfrm_policy_link(policy, dir);
773 /* After previous checking, family can either be AF_INET or AF_INET6 */
774 if (policy->family == AF_INET)
775 rt_genid_bump_ipv4(net);
777 rt_genid_bump_ipv6(net);
780 xfrm_policy_requeue(delpol, policy);
781 __xfrm_policy_unlink(delpol, dir);
783 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
784 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
785 policy->curlft.add_time = get_seconds();
786 policy->curlft.use_time = 0;
787 if (!mod_timer(&policy->timer, jiffies + HZ))
788 xfrm_pol_hold(policy);
789 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
792 xfrm_policy_kill(delpol);
793 else if (xfrm_bydst_should_resize(net, dir, NULL))
794 schedule_work(&net->xfrm.policy_hash_work);
798 EXPORT_SYMBOL(xfrm_policy_insert);
800 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
801 int dir, struct xfrm_selector *sel,
802 struct xfrm_sec_ctx *ctx, int delete,
805 struct xfrm_policy *pol, *ret;
806 struct hlist_head *chain;
809 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
810 chain = policy_hash_bysel(net, sel, sel->family, dir);
812 hlist_for_each_entry(pol, chain, bydst) {
813 if (pol->type == type &&
814 (mark & pol->mark.m) == pol->mark.v &&
815 !selector_cmp(sel, &pol->selector) &&
816 xfrm_sec_ctx_match(ctx, pol->security)) {
819 *err = security_xfrm_policy_delete(
822 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
825 __xfrm_policy_unlink(pol, dir);
831 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
834 xfrm_policy_kill(ret);
837 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
839 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
840 int dir, u32 id, int delete, int *err)
842 struct xfrm_policy *pol, *ret;
843 struct hlist_head *chain;
846 if (xfrm_policy_id2dir(id) != dir)
850 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
851 chain = net->xfrm.policy_byidx + idx_hash(net, id);
853 hlist_for_each_entry(pol, chain, byidx) {
854 if (pol->type == type && pol->index == id &&
855 (mark & pol->mark.m) == pol->mark.v) {
858 *err = security_xfrm_policy_delete(
861 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
864 __xfrm_policy_unlink(pol, dir);
870 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
873 xfrm_policy_kill(ret);
876 EXPORT_SYMBOL(xfrm_policy_byid);
878 #ifdef CONFIG_SECURITY_NETWORK_XFRM
880 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
884 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
885 struct xfrm_policy *pol;
888 hlist_for_each_entry(pol,
889 &net->xfrm.policy_inexact[dir], bydst) {
890 if (pol->type != type)
892 err = security_xfrm_policy_delete(pol->security);
894 xfrm_audit_policy_delete(pol, 0, task_valid);
898 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
899 hlist_for_each_entry(pol,
900 net->xfrm.policy_bydst[dir].table + i,
902 if (pol->type != type)
904 err = security_xfrm_policy_delete(
907 xfrm_audit_policy_delete(pol, 0,
918 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
924 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
926 int dir, err = 0, cnt = 0;
928 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
930 err = xfrm_policy_flush_secctx_check(net, type, task_valid);
934 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
935 struct xfrm_policy *pol;
939 hlist_for_each_entry(pol,
940 &net->xfrm.policy_inexact[dir], bydst) {
941 if (pol->type != type)
943 __xfrm_policy_unlink(pol, dir);
944 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
947 xfrm_audit_policy_delete(pol, 1, task_valid);
949 xfrm_policy_kill(pol);
951 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
955 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
957 hlist_for_each_entry(pol,
958 net->xfrm.policy_bydst[dir].table + i,
960 if (pol->type != type)
962 __xfrm_policy_unlink(pol, dir);
963 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
966 xfrm_audit_policy_delete(pol, 1, task_valid);
967 xfrm_policy_kill(pol);
969 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
978 xfrm_policy_cache_flush();
980 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
983 EXPORT_SYMBOL(xfrm_policy_flush);
985 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
986 int (*func)(struct xfrm_policy *, int, int, void*),
989 struct xfrm_policy *pol;
990 struct xfrm_policy_walk_entry *x;
993 if (walk->type >= XFRM_POLICY_TYPE_MAX &&
994 walk->type != XFRM_POLICY_TYPE_ANY)
997 if (list_empty(&walk->walk.all) && walk->seq != 0)
1000 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1001 if (list_empty(&walk->walk.all))
1002 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1004 x = list_first_entry(&walk->walk.all,
1005 struct xfrm_policy_walk_entry, all);
1007 list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1010 pol = container_of(x, struct xfrm_policy, walk);
1011 if (walk->type != XFRM_POLICY_TYPE_ANY &&
1012 walk->type != pol->type)
1014 error = func(pol, xfrm_policy_id2dir(pol->index),
1017 list_move_tail(&walk->walk.all, &x->all);
1022 if (walk->seq == 0) {
1026 list_del_init(&walk->walk.all);
1028 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1031 EXPORT_SYMBOL(xfrm_policy_walk);
1033 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1035 INIT_LIST_HEAD(&walk->walk.all);
1036 walk->walk.dead = 1;
1040 EXPORT_SYMBOL(xfrm_policy_walk_init);
1042 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1044 if (list_empty(&walk->walk.all))
1047 spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1048 list_del(&walk->walk.all);
1049 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1051 EXPORT_SYMBOL(xfrm_policy_walk_done);
1054 * Find policy to apply to this flow.
1056 * Returns 0 if policy found, else an -errno.
1058 static int xfrm_policy_match(const struct xfrm_policy *pol,
1059 const struct flowi *fl,
1060 u8 type, u16 family, int dir)
1062 const struct xfrm_selector *sel = &pol->selector;
1066 if (pol->family != family ||
1067 (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1071 match = xfrm_selector_match(sel, fl, family);
1073 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
1079 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
1080 const struct flowi *fl,
1084 struct xfrm_policy *pol, *ret;
1085 const xfrm_address_t *daddr, *saddr;
1086 struct hlist_head *chain;
1087 unsigned int sequence;
1090 daddr = xfrm_flowi_daddr(fl, family);
1091 saddr = xfrm_flowi_saddr(fl, family);
1092 if (unlikely(!daddr || !saddr))
1098 sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
1099 chain = policy_hash_direct(net, daddr, saddr, family, dir);
1100 } while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence));
1104 hlist_for_each_entry_rcu(pol, chain, bydst) {
1105 err = xfrm_policy_match(pol, fl, type, family, dir);
1115 priority = ret->priority;
1119 chain = &net->xfrm.policy_inexact[dir];
1120 hlist_for_each_entry_rcu(pol, chain, bydst) {
1121 if ((pol->priority >= priority) && ret)
1124 err = xfrm_policy_match(pol, fl, type, family, dir);
1138 if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence))
1141 if (ret && !xfrm_pol_hold_rcu(ret))
1149 static struct xfrm_policy *
1150 xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
1152 #ifdef CONFIG_XFRM_SUB_POLICY
1153 struct xfrm_policy *pol;
1155 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
1159 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
1162 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1163 const struct flowi *fl, u16 family)
1165 struct xfrm_policy *pol;
1169 pol = rcu_dereference(sk->sk_policy[dir]);
1174 if (pol->family != family) {
1179 match = xfrm_selector_match(&pol->selector, fl, family);
1181 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
1185 err = security_xfrm_policy_lookup(pol->security,
1189 if (!xfrm_pol_hold_rcu(pol))
1191 } else if (err == -ESRCH) {
1204 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1206 struct net *net = xp_net(pol);
1208 list_add(&pol->walk.all, &net->xfrm.policy_all);
1209 net->xfrm.policy_count[dir]++;
1213 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1216 struct net *net = xp_net(pol);
1218 if (list_empty(&pol->walk.all))
1221 /* Socket policies are not hashed. */
1222 if (!hlist_unhashed(&pol->bydst)) {
1223 hlist_del_rcu(&pol->bydst);
1224 hlist_del(&pol->byidx);
1227 list_del_init(&pol->walk.all);
1228 net->xfrm.policy_count[dir]--;
1233 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
1235 __xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
1238 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
1240 __xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
1243 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1245 struct net *net = xp_net(pol);
1247 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1248 pol = __xfrm_policy_unlink(pol, dir);
1249 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1251 xfrm_policy_kill(pol);
1256 EXPORT_SYMBOL(xfrm_policy_delete);
1258 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1260 struct net *net = xp_net(pol);
1261 struct xfrm_policy *old_pol;
1263 #ifdef CONFIG_XFRM_SUB_POLICY
1264 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1268 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1269 old_pol = rcu_dereference_protected(sk->sk_policy[dir],
1270 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
1272 pol->curlft.add_time = get_seconds();
1273 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
1274 xfrm_sk_policy_link(pol, dir);
1276 rcu_assign_pointer(sk->sk_policy[dir], pol);
1279 xfrm_policy_requeue(old_pol, pol);
1281 /* Unlinking succeeds always. This is the only function
1282 * allowed to delete or replace socket policy.
1284 xfrm_sk_policy_unlink(old_pol, dir);
1286 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1289 xfrm_policy_kill(old_pol);
1294 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
1296 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
1297 struct net *net = xp_net(old);
1300 newp->selector = old->selector;
1301 if (security_xfrm_policy_clone(old->security,
1304 return NULL; /* ENOMEM */
1306 newp->lft = old->lft;
1307 newp->curlft = old->curlft;
1308 newp->mark = old->mark;
1309 newp->action = old->action;
1310 newp->flags = old->flags;
1311 newp->xfrm_nr = old->xfrm_nr;
1312 newp->index = old->index;
1313 newp->type = old->type;
1314 newp->family = old->family;
1315 memcpy(newp->xfrm_vec, old->xfrm_vec,
1316 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1317 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1318 xfrm_sk_policy_link(newp, dir);
1319 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1325 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1327 const struct xfrm_policy *p;
1328 struct xfrm_policy *np;
1332 for (i = 0; i < 2; i++) {
1333 p = rcu_dereference(osk->sk_policy[i]);
1335 np = clone_policy(p, i);
1336 if (unlikely(!np)) {
1340 rcu_assign_pointer(sk->sk_policy[i], np);
1348 xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
1349 xfrm_address_t *remote, unsigned short family, u32 mark)
1352 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1354 if (unlikely(afinfo == NULL))
1356 err = afinfo->get_saddr(net, oif, local, remote, mark);
1361 /* Resolve list of templates for the flow, given policy. */
1364 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
1365 struct xfrm_state **xfrm, unsigned short family)
1367 struct net *net = xp_net(policy);
1370 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1371 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1374 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
1375 struct xfrm_state *x;
1376 xfrm_address_t *remote = daddr;
1377 xfrm_address_t *local = saddr;
1378 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1380 if (tmpl->mode == XFRM_MODE_TUNNEL ||
1381 tmpl->mode == XFRM_MODE_BEET) {
1382 remote = &tmpl->id.daddr;
1383 local = &tmpl->saddr;
1384 if (xfrm_addr_any(local, tmpl->encap_family)) {
1385 error = xfrm_get_saddr(net, fl->flowi_oif,
1387 tmpl->encap_family, 0);
1394 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
1396 if (x && x->km.state == XFRM_STATE_VALID) {
1403 error = (x->km.state == XFRM_STATE_ERROR ?
1406 } else if (error == -ESRCH) {
1410 if (!tmpl->optional)
1416 for (nx--; nx >= 0; nx--)
1417 xfrm_state_put(xfrm[nx]);
1422 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1423 struct xfrm_state **xfrm, unsigned short family)
1425 struct xfrm_state *tp[XFRM_MAX_DEPTH];
1426 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1432 for (i = 0; i < npols; i++) {
1433 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1438 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1446 /* found states are sorted for outbound processing */
1448 xfrm_state_sort(xfrm, tpp, cnx, family);
1453 for (cnx--; cnx >= 0; cnx--)
1454 xfrm_state_put(tpp[cnx]);
1459 static int xfrm_get_tos(const struct flowi *fl, int family)
1461 const struct xfrm_policy_afinfo *afinfo;
1464 afinfo = xfrm_policy_get_afinfo(family);
1465 tos = afinfo ? afinfo->get_tos(fl) : 0;
1472 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1474 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1475 struct dst_ops *dst_ops;
1476 struct xfrm_dst *xdst;
1479 return ERR_PTR(-EINVAL);
1483 dst_ops = &net->xfrm.xfrm4_dst_ops;
1485 #if IS_ENABLED(CONFIG_IPV6)
1487 dst_ops = &net->xfrm.xfrm6_dst_ops;
1493 xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
1496 struct dst_entry *dst = &xdst->u.dst;
1498 memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
1500 xdst = ERR_PTR(-ENOBUFS);
1507 static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1510 const struct xfrm_policy_afinfo *afinfo =
1511 xfrm_policy_get_afinfo(dst->ops->family);
1517 err = afinfo->init_path(path, dst, nfheader_len);
1524 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1525 const struct flowi *fl)
1527 const struct xfrm_policy_afinfo *afinfo =
1528 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
1534 err = afinfo->fill_dst(xdst, dev, fl);
1542 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
1543 * all the metrics... Shortly, bundle a bundle.
1546 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1547 struct xfrm_state **xfrm, int nx,
1548 const struct flowi *fl,
1549 struct dst_entry *dst)
1551 struct net *net = xp_net(policy);
1552 unsigned long now = jiffies;
1553 struct net_device *dev;
1554 struct xfrm_mode *inner_mode;
1555 struct dst_entry *dst_prev = NULL;
1556 struct dst_entry *dst0 = NULL;
1560 int nfheader_len = 0;
1561 int trailer_len = 0;
1563 int family = policy->selector.family;
1564 xfrm_address_t saddr, daddr;
1566 xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
1568 tos = xfrm_get_tos(fl, family);
1572 for (; i < nx; i++) {
1573 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1574 struct dst_entry *dst1 = &xdst->u.dst;
1576 err = PTR_ERR(xdst);
1585 /* Ref count is taken during xfrm_alloc_dst()
1586 * No need to do dst_clone() on dst1
1588 dst_prev->child = dst1;
1590 if (xfrm[i]->sel.family == AF_UNSPEC) {
1591 inner_mode = xfrm_ip2inner_mode(xfrm[i],
1592 xfrm_af2proto(family));
1594 err = -EAFNOSUPPORT;
1599 inner_mode = xfrm[i]->inner_mode;
1602 dst_copy_metrics(dst1, dst);
1604 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1605 family = xfrm[i]->props.family;
1606 dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
1607 &saddr, &daddr, family,
1608 xfrm[i]->props.output_mark);
1615 dst1->xfrm = xfrm[i];
1616 xdst->xfrm_genid = xfrm[i]->genid;
1618 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1619 dst1->flags |= DST_HOST;
1620 dst1->lastuse = now;
1622 dst1->input = dst_discard;
1623 dst1->output = inner_mode->afinfo->output;
1625 dst1->next = dst_prev;
1628 header_len += xfrm[i]->props.header_len;
1629 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
1630 nfheader_len += xfrm[i]->props.header_len;
1631 trailer_len += xfrm[i]->props.trailer_len;
1634 dst_prev->child = dst;
1642 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
1643 xfrm_init_pmtu(dst_prev);
1645 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
1646 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
1648 err = xfrm_fill_dst(xdst, dev, fl);
1652 dst_prev->header_len = header_len;
1653 dst_prev->trailer_len = trailer_len;
1654 header_len -= xdst->u.dst.xfrm->props.header_len;
1655 trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
1663 xfrm_state_put(xfrm[i]);
1666 dst_release_immediate(dst0);
1667 dst0 = ERR_PTR(err);
1671 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
1672 struct xfrm_policy **pols,
1673 int *num_pols, int *num_xfrms)
1677 if (*num_pols == 0 || !pols[0]) {
1682 if (IS_ERR(pols[0]))
1683 return PTR_ERR(pols[0]);
1685 *num_xfrms = pols[0]->xfrm_nr;
1687 #ifdef CONFIG_XFRM_SUB_POLICY
1688 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
1689 pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1690 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
1691 XFRM_POLICY_TYPE_MAIN,
1695 if (IS_ERR(pols[1])) {
1696 xfrm_pols_put(pols, *num_pols);
1697 return PTR_ERR(pols[1]);
1700 (*num_xfrms) += pols[1]->xfrm_nr;
1704 for (i = 0; i < *num_pols; i++) {
1705 if (pols[i]->action != XFRM_POLICY_ALLOW) {
1715 static void xfrm_last_dst_update(struct xfrm_dst *xdst, struct xfrm_dst *old)
1717 this_cpu_write(xfrm_last_dst, xdst);
1719 dst_release(&old->u.dst);
1722 static void __xfrm_pcpu_work_fn(void)
1724 struct xfrm_dst *old;
1726 old = this_cpu_read(xfrm_last_dst);
1727 if (old && !xfrm_bundle_ok(old))
1728 xfrm_last_dst_update(NULL, old);
1731 static void xfrm_pcpu_work_fn(struct work_struct *work)
1735 __xfrm_pcpu_work_fn();
1740 void xfrm_policy_cache_flush(void)
1742 struct xfrm_dst *old;
1748 for_each_possible_cpu(cpu) {
1749 old = per_cpu(xfrm_last_dst, cpu);
1750 if (old && !xfrm_bundle_ok(old)) {
1751 if (smp_processor_id() == cpu) {
1752 __xfrm_pcpu_work_fn();
1768 for_each_possible_cpu(cpu) {
1769 bool bundle_release;
1772 old = per_cpu(xfrm_last_dst, cpu);
1773 bundle_release = old && !xfrm_bundle_ok(old);
1776 if (!bundle_release)
1779 if (cpu_online(cpu)) {
1780 schedule_work_on(cpu, &xfrm_pcpu_work[cpu]);
1785 old = per_cpu(xfrm_last_dst, cpu);
1786 if (old && !xfrm_bundle_ok(old)) {
1787 per_cpu(xfrm_last_dst, cpu) = NULL;
1788 dst_release(&old->u.dst);
1796 static bool xfrm_xdst_can_reuse(struct xfrm_dst *xdst,
1797 struct xfrm_state * const xfrm[],
1800 const struct dst_entry *dst = &xdst->u.dst;
1803 if (xdst->num_xfrms != num)
1806 for (i = 0; i < num; i++) {
1807 if (!dst || dst->xfrm != xfrm[i])
1812 return xfrm_bundle_ok(xdst);
1815 static struct xfrm_dst *
1816 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1817 const struct flowi *fl, u16 family,
1818 struct dst_entry *dst_orig)
1820 struct net *net = xp_net(pols[0]);
1821 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1822 struct xfrm_dst *xdst, *old;
1823 struct dst_entry *dst;
1826 /* Try to instantiate a bundle */
1827 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1829 if (err != 0 && err != -EAGAIN)
1830 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1831 return ERR_PTR(err);
1834 xdst = this_cpu_read(xfrm_last_dst);
1836 xdst->u.dst.dev == dst_orig->dev &&
1837 xdst->num_pols == num_pols &&
1838 memcmp(xdst->pols, pols,
1839 sizeof(struct xfrm_policy *) * num_pols) == 0 &&
1840 xfrm_xdst_can_reuse(xdst, xfrm, err)) {
1841 dst_hold(&xdst->u.dst);
1842 xfrm_pols_put(pols, num_pols);
1844 xfrm_state_put(xfrm[--err]);
1850 dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
1852 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1853 return ERR_CAST(dst);
1856 xdst = (struct xfrm_dst *)dst;
1857 xdst->num_xfrms = err;
1858 xdst->num_pols = num_pols;
1859 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
1860 xdst->policy_genid = atomic_read(&pols[0]->genid);
1862 atomic_set(&xdst->u.dst.__refcnt, 2);
1863 xfrm_last_dst_update(xdst, old);
1868 static void xfrm_policy_queue_process(struct timer_list *t)
1870 struct sk_buff *skb;
1872 struct dst_entry *dst;
1873 struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
1874 struct net *net = xp_net(pol);
1875 struct xfrm_policy_queue *pq = &pol->polq;
1877 struct sk_buff_head list;
1879 spin_lock(&pq->hold_queue.lock);
1880 skb = skb_peek(&pq->hold_queue);
1882 spin_unlock(&pq->hold_queue.lock);
1887 xfrm_decode_session(skb, &fl, dst->ops->family);
1888 spin_unlock(&pq->hold_queue.lock);
1890 dst_hold(dst->path);
1891 dst = xfrm_lookup(net, dst->path, &fl, sk, 0);
1895 if (dst->flags & DST_XFRM_QUEUE) {
1898 if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
1901 pq->timeout = pq->timeout << 1;
1902 if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
1909 __skb_queue_head_init(&list);
1911 spin_lock(&pq->hold_queue.lock);
1913 skb_queue_splice_init(&pq->hold_queue, &list);
1914 spin_unlock(&pq->hold_queue.lock);
1916 while (!skb_queue_empty(&list)) {
1917 skb = __skb_dequeue(&list);
1919 xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
1920 dst_hold(skb_dst(skb)->path);
1921 dst = xfrm_lookup(net, skb_dst(skb)->path, &fl, skb->sk, 0);
1929 skb_dst_set(skb, dst);
1931 dst_output(net, skb->sk, skb);
1940 skb_queue_purge(&pq->hold_queue);
1944 static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
1946 unsigned long sched_next;
1947 struct dst_entry *dst = skb_dst(skb);
1948 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1949 struct xfrm_policy *pol = xdst->pols[0];
1950 struct xfrm_policy_queue *pq = &pol->polq;
1952 if (unlikely(skb_fclone_busy(sk, skb))) {
1957 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
1964 spin_lock_bh(&pq->hold_queue.lock);
1967 pq->timeout = XFRM_QUEUE_TMO_MIN;
1969 sched_next = jiffies + pq->timeout;
1971 if (del_timer(&pq->hold_timer)) {
1972 if (time_before(pq->hold_timer.expires, sched_next))
1973 sched_next = pq->hold_timer.expires;
1977 __skb_queue_tail(&pq->hold_queue, skb);
1978 if (!mod_timer(&pq->hold_timer, sched_next))
1981 spin_unlock_bh(&pq->hold_queue.lock);
1986 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
1987 struct xfrm_flo *xflo,
1988 const struct flowi *fl,
1993 struct net_device *dev;
1994 struct dst_entry *dst;
1995 struct dst_entry *dst1;
1996 struct xfrm_dst *xdst;
1998 xdst = xfrm_alloc_dst(net, family);
2002 if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
2003 net->xfrm.sysctl_larval_drop ||
2007 dst = xflo->dst_orig;
2008 dst1 = &xdst->u.dst;
2012 dst_copy_metrics(dst1, dst);
2014 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2015 dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
2016 dst1->lastuse = jiffies;
2018 dst1->input = dst_discard;
2019 dst1->output = xdst_queue_output;
2025 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
2032 err = xfrm_fill_dst(xdst, dev, fl);
2041 xdst = ERR_PTR(err);
2045 static struct xfrm_dst *
2046 xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, struct xfrm_flo *xflo)
2048 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2049 int num_pols = 0, num_xfrms = 0, err;
2050 struct xfrm_dst *xdst;
2052 /* Resolve policies to use if we couldn't get them from
2053 * previous cache entry */
2055 pols[0] = xfrm_policy_lookup(net, fl, family, dir);
2056 err = xfrm_expand_policies(fl, family, pols,
2057 &num_pols, &num_xfrms);
2063 goto make_dummy_bundle;
2065 xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
2068 err = PTR_ERR(xdst);
2071 goto make_dummy_bundle;
2072 } else if (xdst == NULL) {
2074 goto make_dummy_bundle;
2080 /* We found policies, but there's no bundles to instantiate:
2081 * either because the policy blocks, has no transformations or
2082 * we could not build template (no xfrm_states).*/
2083 xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
2085 xfrm_pols_put(pols, num_pols);
2086 return ERR_CAST(xdst);
2088 xdst->num_pols = num_pols;
2089 xdst->num_xfrms = num_xfrms;
2090 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2095 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2097 xfrm_pols_put(pols, num_pols);
2098 return ERR_PTR(err);
2101 static struct dst_entry *make_blackhole(struct net *net, u16 family,
2102 struct dst_entry *dst_orig)
2104 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2105 struct dst_entry *ret;
2108 dst_release(dst_orig);
2109 return ERR_PTR(-EINVAL);
2111 ret = afinfo->blackhole_route(net, dst_orig);
2118 /* Main function: finds/creates a bundle for given flow.
2120 * At the moment we eat a raw IP route. Mostly to speed up lookups
2121 * on interfaces with disabled IPsec.
2123 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2124 const struct flowi *fl,
2125 const struct sock *sk, int flags)
2127 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2128 struct xfrm_dst *xdst;
2129 struct dst_entry *dst, *route;
2130 u16 family = dst_orig->ops->family;
2131 u8 dir = XFRM_POLICY_OUT;
2132 int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
2138 sk = sk_const_to_full_sk(sk);
2139 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
2141 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family);
2142 err = xfrm_expand_policies(fl, family, pols,
2143 &num_pols, &num_xfrms);
2148 if (num_xfrms <= 0) {
2149 drop_pols = num_pols;
2153 xdst = xfrm_resolve_and_create_bundle(
2157 xfrm_pols_put(pols, num_pols);
2158 err = PTR_ERR(xdst);
2160 } else if (xdst == NULL) {
2162 drop_pols = num_pols;
2166 route = xdst->route;
2171 struct xfrm_flo xflo;
2173 xflo.dst_orig = dst_orig;
2176 /* To accelerate a bit... */
2177 if ((dst_orig->flags & DST_NOXFRM) ||
2178 !net->xfrm.policy_count[XFRM_POLICY_OUT])
2181 xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo);
2185 err = PTR_ERR(xdst);
2189 num_pols = xdst->num_pols;
2190 num_xfrms = xdst->num_xfrms;
2191 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
2192 route = xdst->route;
2196 if (route == NULL && num_xfrms > 0) {
2197 /* The only case when xfrm_bundle_lookup() returns a
2198 * bundle with null route, is when the template could
2199 * not be resolved. It means policies are there, but
2200 * bundle could not be created, since we don't yet
2201 * have the xfrm_state's. We need to wait for KM to
2202 * negotiate new SA's or bail out with error.*/
2203 if (net->xfrm.sysctl_larval_drop) {
2204 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2211 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2219 if ((flags & XFRM_LOOKUP_ICMP) &&
2220 !(pols[0]->flags & XFRM_POLICY_ICMP)) {
2225 for (i = 0; i < num_pols; i++)
2226 pols[i]->curlft.use_time = get_seconds();
2228 if (num_xfrms < 0) {
2229 /* Prohibit the flow */
2230 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
2233 } else if (num_xfrms > 0) {
2234 /* Flow transformed */
2235 dst_release(dst_orig);
2237 /* Flow passes untransformed */
2242 xfrm_pols_put(pols, drop_pols);
2243 if (dst && dst->xfrm &&
2244 dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
2245 dst->flags |= DST_XFRM_TUNNEL;
2249 if (!(flags & XFRM_LOOKUP_ICMP)) {
2257 if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
2258 dst_release(dst_orig);
2259 xfrm_pols_put(pols, drop_pols);
2260 return ERR_PTR(err);
2262 EXPORT_SYMBOL(xfrm_lookup);
2264 /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
2265 * Otherwise we may send out blackholed packets.
2267 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
2268 const struct flowi *fl,
2269 const struct sock *sk, int flags)
2271 struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
2272 flags | XFRM_LOOKUP_QUEUE |
2273 XFRM_LOOKUP_KEEP_DST_REF);
2275 if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
2276 return make_blackhole(net, dst_orig->ops->family, dst_orig);
2280 EXPORT_SYMBOL(xfrm_lookup_route);
2283 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
2285 struct xfrm_state *x;
2287 if (!skb->sp || idx < 0 || idx >= skb->sp->len)
2289 x = skb->sp->xvec[idx];
2290 if (!x->type->reject)
2292 return x->type->reject(x, skb, fl);
2295 /* When skb is transformed back to its "native" form, we have to
2296 * check policy restrictions. At the moment we make this in maximally
2297 * stupid way. Shame on me. :-) Of course, connected sockets must
2298 * have policy cached at them.
2302 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
2303 unsigned short family)
2305 if (xfrm_state_kern(x))
2306 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
2307 return x->id.proto == tmpl->id.proto &&
2308 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
2309 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
2310 x->props.mode == tmpl->mode &&
2311 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
2312 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
2313 !(x->props.mode != XFRM_MODE_TRANSPORT &&
2314 xfrm_state_addr_cmp(tmpl, x, family));
2318 * 0 or more than 0 is returned when validation is succeeded (either bypass
2319 * because of optional transport mode, or next index of the mathced secpath
2320 * state with the template.
2321 * -1 is returned when no matching template is found.
2322 * Otherwise "-2 - errored_index" is returned.
2325 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
2326 unsigned short family)
2330 if (tmpl->optional) {
2331 if (tmpl->mode == XFRM_MODE_TRANSPORT)
2335 for (; idx < sp->len; idx++) {
2336 if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
2338 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
2347 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
2348 unsigned int family, int reverse)
2350 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2353 if (unlikely(afinfo == NULL))
2354 return -EAFNOSUPPORT;
2356 afinfo->decode_session(skb, fl, reverse);
2357 err = security_xfrm_decode_session(skb, &fl->flowi_secid);
2361 EXPORT_SYMBOL(__xfrm_decode_session);
2363 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
2365 for (; k < sp->len; k++) {
2366 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
2375 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2376 unsigned short family)
2378 struct net *net = dev_net(skb->dev);
2379 struct xfrm_policy *pol;
2380 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2388 reverse = dir & ~XFRM_POLICY_MASK;
2389 dir &= XFRM_POLICY_MASK;
2391 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
2392 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
2396 nf_nat_decode_session(skb, &fl, family);
2398 /* First, check used SA against their selectors. */
2402 for (i = skb->sp->len-1; i >= 0; i--) {
2403 struct xfrm_state *x = skb->sp->xvec[i];
2404 if (!xfrm_selector_match(&x->sel, &fl, family)) {
2405 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
2412 sk = sk_to_full_sk(sk);
2413 if (sk && sk->sk_policy[dir]) {
2414 pol = xfrm_sk_policy_lookup(sk, dir, &fl, family);
2416 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2422 pol = xfrm_policy_lookup(net, &fl, family, dir);
2425 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2430 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
2431 xfrm_secpath_reject(xerr_idx, skb, &fl);
2432 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
2438 pol->curlft.use_time = get_seconds();
2442 #ifdef CONFIG_XFRM_SUB_POLICY
2443 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2444 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
2448 if (IS_ERR(pols[1])) {
2449 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2452 pols[1]->curlft.use_time = get_seconds();
2458 if (pol->action == XFRM_POLICY_ALLOW) {
2459 struct sec_path *sp;
2460 static struct sec_path dummy;
2461 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
2462 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
2463 struct xfrm_tmpl **tpp = tp;
2467 if ((sp = skb->sp) == NULL)
2470 for (pi = 0; pi < npols; pi++) {
2471 if (pols[pi] != pol &&
2472 pols[pi]->action != XFRM_POLICY_ALLOW) {
2473 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2476 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
2477 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
2480 for (i = 0; i < pols[pi]->xfrm_nr; i++)
2481 tpp[ti++] = &pols[pi]->xfrm_vec[i];
2485 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family, net);
2489 /* For each tunnel xfrm, find the first matching tmpl.
2490 * For each tmpl before that, find corresponding xfrm.
2491 * Order is _important_. Later we will implement
2492 * some barriers, but at the moment barriers
2493 * are implied between each two transformations.
2495 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
2496 k = xfrm_policy_ok(tpp[i], sp, k, family);
2499 /* "-2 - errored_index" returned */
2501 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2506 if (secpath_has_nontransport(sp, k, &xerr_idx)) {
2507 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2511 xfrm_pols_put(pols, npols);
2514 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2517 xfrm_secpath_reject(xerr_idx, skb, &fl);
2519 xfrm_pols_put(pols, npols);
2522 EXPORT_SYMBOL(__xfrm_policy_check);
2524 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2526 struct net *net = dev_net(skb->dev);
2528 struct dst_entry *dst;
2531 if (xfrm_decode_session(skb, &fl, family) < 0) {
2532 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2538 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
2543 skb_dst_set(skb, dst);
2546 EXPORT_SYMBOL(__xfrm_route_forward);
2548 /* Optimize later using cookies and generation ids. */
2550 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2552 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2553 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
2554 * get validated by dst_ops->check on every use. We do this
2555 * because when a normal route referenced by an XFRM dst is
2556 * obsoleted we do not go looking around for all parent
2557 * referencing XFRM dsts so that we can invalidate them. It
2558 * is just too much work. Instead we make the checks here on
2559 * every use. For example:
2561 * XFRM dst A --> IPv4 dst X
2563 * X is the "xdst->route" of A (X is also the "dst->path" of A
2564 * in this example). If X is marked obsolete, "A" will not
2565 * notice. That's what we are validating here via the
2566 * stale_bundle() check.
2568 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
2570 * This will force stale_bundle() to fail on any xdst bundle with
2571 * this dst linked in it.
2573 if (dst->obsolete < 0 && !stale_bundle(dst))
2579 static int stale_bundle(struct dst_entry *dst)
2581 return !xfrm_bundle_ok((struct xfrm_dst *)dst);
2584 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
2586 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
2587 dst->dev = dev_net(dev)->loopback_dev;
2592 EXPORT_SYMBOL(xfrm_dst_ifdown);
2594 static void xfrm_link_failure(struct sk_buff *skb)
2596 /* Impossible. Such dst must be popped before reaches point of failure. */
2599 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2602 if (dst->obsolete) {
2610 static void xfrm_init_pmtu(struct dst_entry *dst)
2613 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2614 u32 pmtu, route_mtu_cached;
2616 pmtu = dst_mtu(dst->child);
2617 xdst->child_mtu_cached = pmtu;
2619 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
2621 route_mtu_cached = dst_mtu(xdst->route);
2622 xdst->route_mtu_cached = route_mtu_cached;
2624 if (pmtu > route_mtu_cached)
2625 pmtu = route_mtu_cached;
2627 dst_metric_set(dst, RTAX_MTU, pmtu);
2628 } while ((dst = dst->next));
2631 /* Check that the bundle accepts the flow and its components are
2635 static int xfrm_bundle_ok(struct xfrm_dst *first)
2637 struct dst_entry *dst = &first->u.dst;
2638 struct xfrm_dst *last;
2641 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
2642 (dst->dev && !netif_running(dst->dev)))
2645 if (dst->flags & DST_XFRM_QUEUE)
2651 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2653 if (dst->xfrm->km.state != XFRM_STATE_VALID)
2655 if (xdst->xfrm_genid != dst->xfrm->genid)
2657 if (xdst->num_pols > 0 &&
2658 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
2661 mtu = dst_mtu(dst->child);
2662 if (xdst->child_mtu_cached != mtu) {
2664 xdst->child_mtu_cached = mtu;
2667 if (!dst_check(xdst->route, xdst->route_cookie))
2669 mtu = dst_mtu(xdst->route);
2670 if (xdst->route_mtu_cached != mtu) {
2672 xdst->route_mtu_cached = mtu;
2676 } while (dst->xfrm);
2681 mtu = last->child_mtu_cached;
2685 mtu = xfrm_state_mtu(dst->xfrm, mtu);
2686 if (mtu > last->route_mtu_cached)
2687 mtu = last->route_mtu_cached;
2688 dst_metric_set(dst, RTAX_MTU, mtu);
2693 last = (struct xfrm_dst *)last->u.dst.next;
2694 last->child_mtu_cached = mtu;
2700 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
2702 return dst_metric_advmss(dst->path);
2705 static unsigned int xfrm_mtu(const struct dst_entry *dst)
2707 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2709 return mtu ? : dst_mtu(dst->path);
2712 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
2715 const struct dst_entry *path = dst->path;
2717 for (; dst != path; dst = dst->child) {
2718 const struct xfrm_state *xfrm = dst->xfrm;
2720 if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
2722 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
2723 daddr = xfrm->coaddr;
2724 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
2725 daddr = &xfrm->id.daddr;
2730 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
2731 struct sk_buff *skb,
2734 const struct dst_entry *path = dst->path;
2737 daddr = xfrm_get_dst_nexthop(dst, daddr);
2738 return path->ops->neigh_lookup(path, skb, daddr);
2741 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
2743 const struct dst_entry *path = dst->path;
2745 daddr = xfrm_get_dst_nexthop(dst, daddr);
2746 path->ops->confirm_neigh(path, daddr);
2749 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
2753 if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
2754 return -EAFNOSUPPORT;
2756 spin_lock(&xfrm_policy_afinfo_lock);
2757 if (unlikely(xfrm_policy_afinfo[family] != NULL))
2760 struct dst_ops *dst_ops = afinfo->dst_ops;
2761 if (likely(dst_ops->kmem_cachep == NULL))
2762 dst_ops->kmem_cachep = xfrm_dst_cache;
2763 if (likely(dst_ops->check == NULL))
2764 dst_ops->check = xfrm_dst_check;
2765 if (likely(dst_ops->default_advmss == NULL))
2766 dst_ops->default_advmss = xfrm_default_advmss;
2767 if (likely(dst_ops->mtu == NULL))
2768 dst_ops->mtu = xfrm_mtu;
2769 if (likely(dst_ops->negative_advice == NULL))
2770 dst_ops->negative_advice = xfrm_negative_advice;
2771 if (likely(dst_ops->link_failure == NULL))
2772 dst_ops->link_failure = xfrm_link_failure;
2773 if (likely(dst_ops->neigh_lookup == NULL))
2774 dst_ops->neigh_lookup = xfrm_neigh_lookup;
2775 if (likely(!dst_ops->confirm_neigh))
2776 dst_ops->confirm_neigh = xfrm_confirm_neigh;
2777 rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
2779 spin_unlock(&xfrm_policy_afinfo_lock);
2783 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2785 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
2787 struct dst_ops *dst_ops = afinfo->dst_ops;
2790 for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
2791 if (xfrm_policy_afinfo[i] != afinfo)
2793 RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
2799 dst_ops->kmem_cachep = NULL;
2800 dst_ops->check = NULL;
2801 dst_ops->negative_advice = NULL;
2802 dst_ops->link_failure = NULL;
2804 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2806 #ifdef CONFIG_XFRM_STATISTICS
2807 static int __net_init xfrm_statistics_init(struct net *net)
2810 net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
2811 if (!net->mib.xfrm_statistics)
2813 rv = xfrm_proc_init(net);
2815 free_percpu(net->mib.xfrm_statistics);
2819 static void xfrm_statistics_fini(struct net *net)
2821 xfrm_proc_fini(net);
2822 free_percpu(net->mib.xfrm_statistics);
2825 static int __net_init xfrm_statistics_init(struct net *net)
2830 static void xfrm_statistics_fini(struct net *net)
2835 static int __net_init xfrm_policy_init(struct net *net)
2837 unsigned int hmask, sz;
2840 if (net_eq(net, &init_net))
2841 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2842 sizeof(struct xfrm_dst),
2843 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2847 sz = (hmask+1) * sizeof(struct hlist_head);
2849 net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
2850 if (!net->xfrm.policy_byidx)
2852 net->xfrm.policy_idx_hmask = hmask;
2854 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
2855 struct xfrm_policy_hash *htab;
2857 net->xfrm.policy_count[dir] = 0;
2858 net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
2859 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
2861 htab = &net->xfrm.policy_bydst[dir];
2862 htab->table = xfrm_hash_alloc(sz);
2865 htab->hmask = hmask;
2871 net->xfrm.policy_hthresh.lbits4 = 32;
2872 net->xfrm.policy_hthresh.rbits4 = 32;
2873 net->xfrm.policy_hthresh.lbits6 = 128;
2874 net->xfrm.policy_hthresh.rbits6 = 128;
2876 seqlock_init(&net->xfrm.policy_hthresh.lock);
2878 INIT_LIST_HEAD(&net->xfrm.policy_all);
2879 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
2880 INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
2881 if (net_eq(net, &init_net))
2886 for (dir--; dir >= 0; dir--) {
2887 struct xfrm_policy_hash *htab;
2889 htab = &net->xfrm.policy_bydst[dir];
2890 xfrm_hash_free(htab->table, sz);
2892 xfrm_hash_free(net->xfrm.policy_byidx, sz);
2897 static void xfrm_policy_fini(struct net *net)
2902 flush_work(&net->xfrm.policy_hash_work);
2903 #ifdef CONFIG_XFRM_SUB_POLICY
2904 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
2906 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
2908 WARN_ON(!list_empty(&net->xfrm.policy_all));
2910 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
2911 struct xfrm_policy_hash *htab;
2913 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
2915 htab = &net->xfrm.policy_bydst[dir];
2916 sz = (htab->hmask + 1) * sizeof(struct hlist_head);
2917 WARN_ON(!hlist_empty(htab->table));
2918 xfrm_hash_free(htab->table, sz);
2921 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
2922 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
2923 xfrm_hash_free(net->xfrm.policy_byidx, sz);
2926 static int __net_init xfrm_net_init(struct net *net)
2930 /* Initialize the per-net locks here */
2931 spin_lock_init(&net->xfrm.xfrm_state_lock);
2932 spin_lock_init(&net->xfrm.xfrm_policy_lock);
2933 mutex_init(&net->xfrm.xfrm_cfg_mutex);
2935 rv = xfrm_statistics_init(net);
2937 goto out_statistics;
2938 rv = xfrm_state_init(net);
2941 rv = xfrm_policy_init(net);
2944 rv = xfrm_sysctl_init(net);
2951 xfrm_policy_fini(net);
2953 xfrm_state_fini(net);
2955 xfrm_statistics_fini(net);
2960 static void __net_exit xfrm_net_exit(struct net *net)
2962 xfrm_sysctl_fini(net);
2963 xfrm_policy_fini(net);
2964 xfrm_state_fini(net);
2965 xfrm_statistics_fini(net);
2968 static struct pernet_operations __net_initdata xfrm_net_ops = {
2969 .init = xfrm_net_init,
2970 .exit = xfrm_net_exit,
2973 void __init xfrm_init(void)
2977 xfrm_pcpu_work = kmalloc_array(NR_CPUS, sizeof(*xfrm_pcpu_work),
2979 BUG_ON(!xfrm_pcpu_work);
2981 for (i = 0; i < NR_CPUS; i++)
2982 INIT_WORK(&xfrm_pcpu_work[i], xfrm_pcpu_work_fn);
2984 register_pernet_subsys(&xfrm_net_ops);
2985 seqcount_init(&xfrm_policy_hash_generation);
2989 #ifdef CONFIG_AUDITSYSCALL
2990 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
2991 struct audit_buffer *audit_buf)
2993 struct xfrm_sec_ctx *ctx = xp->security;
2994 struct xfrm_selector *sel = &xp->selector;
2997 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2998 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
3000 switch (sel->family) {
3002 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
3003 if (sel->prefixlen_s != 32)
3004 audit_log_format(audit_buf, " src_prefixlen=%d",
3006 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
3007 if (sel->prefixlen_d != 32)
3008 audit_log_format(audit_buf, " dst_prefixlen=%d",
3012 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
3013 if (sel->prefixlen_s != 128)
3014 audit_log_format(audit_buf, " src_prefixlen=%d",
3016 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
3017 if (sel->prefixlen_d != 128)
3018 audit_log_format(audit_buf, " dst_prefixlen=%d",
3024 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
3026 struct audit_buffer *audit_buf;
3028 audit_buf = xfrm_audit_start("SPD-add");
3029 if (audit_buf == NULL)
3031 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3032 audit_log_format(audit_buf, " res=%u", result);
3033 xfrm_audit_common_policyinfo(xp, audit_buf);
3034 audit_log_end(audit_buf);
3036 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
3038 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
3041 struct audit_buffer *audit_buf;
3043 audit_buf = xfrm_audit_start("SPD-delete");
3044 if (audit_buf == NULL)
3046 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3047 audit_log_format(audit_buf, " res=%u", result);
3048 xfrm_audit_common_policyinfo(xp, audit_buf);
3049 audit_log_end(audit_buf);
3051 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
3054 #ifdef CONFIG_XFRM_MIGRATE
3055 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
3056 const struct xfrm_selector *sel_tgt)
3058 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
3059 if (sel_tgt->family == sel_cmp->family &&
3060 xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
3062 xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
3064 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
3065 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
3069 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
3076 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
3077 u8 dir, u8 type, struct net *net)
3079 struct xfrm_policy *pol, *ret = NULL;
3080 struct hlist_head *chain;
3083 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
3084 chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
3085 hlist_for_each_entry(pol, chain, bydst) {
3086 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3087 pol->type == type) {
3089 priority = ret->priority;
3093 chain = &net->xfrm.policy_inexact[dir];
3094 hlist_for_each_entry(pol, chain, bydst) {
3095 if ((pol->priority >= priority) && ret)
3098 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3099 pol->type == type) {
3107 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
3112 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
3116 if (t->mode == m->mode && t->id.proto == m->proto &&
3117 (m->reqid == 0 || t->reqid == m->reqid)) {
3119 case XFRM_MODE_TUNNEL:
3120 case XFRM_MODE_BEET:
3121 if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
3123 xfrm_addr_equal(&t->saddr, &m->old_saddr,
3128 case XFRM_MODE_TRANSPORT:
3129 /* in case of transport mode, template does not store
3130 any IP addresses, hence we just compare mode and
3141 /* update endpoint address(es) of template(s) */
3142 static int xfrm_policy_migrate(struct xfrm_policy *pol,
3143 struct xfrm_migrate *m, int num_migrate)
3145 struct xfrm_migrate *mp;
3148 write_lock_bh(&pol->lock);
3149 if (unlikely(pol->walk.dead)) {
3150 /* target policy has been deleted */
3151 write_unlock_bh(&pol->lock);
3155 for (i = 0; i < pol->xfrm_nr; i++) {
3156 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
3157 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
3160 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
3161 pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
3163 /* update endpoints */
3164 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
3165 sizeof(pol->xfrm_vec[i].id.daddr));
3166 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
3167 sizeof(pol->xfrm_vec[i].saddr));
3168 pol->xfrm_vec[i].encap_family = mp->new_family;
3170 atomic_inc(&pol->genid);
3174 write_unlock_bh(&pol->lock);
3182 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
3186 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
3189 for (i = 0; i < num_migrate; i++) {
3190 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
3191 xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
3194 /* check if there is any duplicated entry */
3195 for (j = i + 1; j < num_migrate; j++) {
3196 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
3197 sizeof(m[i].old_daddr)) &&
3198 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
3199 sizeof(m[i].old_saddr)) &&
3200 m[i].proto == m[j].proto &&
3201 m[i].mode == m[j].mode &&
3202 m[i].reqid == m[j].reqid &&
3203 m[i].old_family == m[j].old_family)
3211 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3212 struct xfrm_migrate *m, int num_migrate,
3213 struct xfrm_kmaddress *k, struct net *net,
3214 struct xfrm_encap_tmpl *encap)
3216 int i, err, nx_cur = 0, nx_new = 0;
3217 struct xfrm_policy *pol = NULL;
3218 struct xfrm_state *x, *xc;
3219 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
3220 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
3221 struct xfrm_migrate *mp;
3223 /* Stage 0 - sanity checks */
3224 if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
3227 if (dir >= XFRM_POLICY_MAX) {
3232 /* Stage 1 - find policy */
3233 if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
3238 /* Stage 2 - find and update state(s) */
3239 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
3240 if ((x = xfrm_migrate_state_find(mp, net))) {
3243 xc = xfrm_state_migrate(x, mp, encap);
3254 /* Stage 3 - update policy */
3255 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
3258 /* Stage 4 - delete old state(s) */
3260 xfrm_states_put(x_cur, nx_cur);
3261 xfrm_states_delete(x_cur, nx_cur);
3264 /* Stage 5 - announce */
3265 km_migrate(sel, dir, type, m, num_migrate, k, encap);
3277 xfrm_states_put(x_cur, nx_cur);
3279 xfrm_states_delete(x_new, nx_new);
3283 EXPORT_SYMBOL(xfrm_migrate);