]> asedeno.scripts.mit.edu Git - linux.git/blob - net/xfrm/xfrm_state.c
c32394b59776b12e1e9b946f8d80e32b4dbbd314
[linux.git] / net / xfrm / xfrm_state.c
1 /*
2  * xfrm_state.c
3  *
4  * Changes:
5  *      Mitsuru KANDA @USAGI
6  *      Kazunori MIYAZAWA @USAGI
7  *      Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8  *              IPv6 support
9  *      YOSHIFUJI Hideaki @USAGI
10  *              Split up af-specific functions
11  *      Derek Atkins <derek@ihtfp.com>
12  *              Add UDP Encapsulation
13  *
14  */
15
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <linux/uaccess.h>
24 #include <linux/ktime.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/kernel.h>
28
29 #include "xfrm_hash.h"
30
31 #define xfrm_state_deref_prot(table, net) \
32         rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
33
34 static void xfrm_state_gc_task(struct work_struct *work);
35
36 /* Each xfrm_state may be linked to two tables:
37
38    1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
39    2. Hash table by (daddr,family,reqid) to find what SAs exist for given
40       destination/tunnel endpoint. (output)
41  */
42
43 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
44 static __read_mostly seqcount_t xfrm_state_hash_generation = SEQCNT_ZERO(xfrm_state_hash_generation);
45 static struct kmem_cache *xfrm_state_cache __ro_after_init;
46
47 static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
48 static HLIST_HEAD(xfrm_state_gc_list);
49
50 static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x)
51 {
52         return refcount_inc_not_zero(&x->refcnt);
53 }
54
55 static inline unsigned int xfrm_dst_hash(struct net *net,
56                                          const xfrm_address_t *daddr,
57                                          const xfrm_address_t *saddr,
58                                          u32 reqid,
59                                          unsigned short family)
60 {
61         return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask);
62 }
63
64 static inline unsigned int xfrm_src_hash(struct net *net,
65                                          const xfrm_address_t *daddr,
66                                          const xfrm_address_t *saddr,
67                                          unsigned short family)
68 {
69         return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
70 }
71
72 static inline unsigned int
73 xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
74               __be32 spi, u8 proto, unsigned short family)
75 {
76         return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
77 }
78
79 static void xfrm_hash_transfer(struct hlist_head *list,
80                                struct hlist_head *ndsttable,
81                                struct hlist_head *nsrctable,
82                                struct hlist_head *nspitable,
83                                unsigned int nhashmask)
84 {
85         struct hlist_node *tmp;
86         struct xfrm_state *x;
87
88         hlist_for_each_entry_safe(x, tmp, list, bydst) {
89                 unsigned int h;
90
91                 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
92                                     x->props.reqid, x->props.family,
93                                     nhashmask);
94                 hlist_add_head_rcu(&x->bydst, ndsttable + h);
95
96                 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
97                                     x->props.family,
98                                     nhashmask);
99                 hlist_add_head_rcu(&x->bysrc, nsrctable + h);
100
101                 if (x->id.spi) {
102                         h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
103                                             x->id.proto, x->props.family,
104                                             nhashmask);
105                         hlist_add_head_rcu(&x->byspi, nspitable + h);
106                 }
107         }
108 }
109
110 static unsigned long xfrm_hash_new_size(unsigned int state_hmask)
111 {
112         return ((state_hmask + 1) << 1) * sizeof(struct hlist_head);
113 }
114
115 static void xfrm_hash_resize(struct work_struct *work)
116 {
117         struct net *net = container_of(work, struct net, xfrm.state_hash_work);
118         struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
119         unsigned long nsize, osize;
120         unsigned int nhashmask, ohashmask;
121         int i;
122
123         nsize = xfrm_hash_new_size(net->xfrm.state_hmask);
124         ndst = xfrm_hash_alloc(nsize);
125         if (!ndst)
126                 return;
127         nsrc = xfrm_hash_alloc(nsize);
128         if (!nsrc) {
129                 xfrm_hash_free(ndst, nsize);
130                 return;
131         }
132         nspi = xfrm_hash_alloc(nsize);
133         if (!nspi) {
134                 xfrm_hash_free(ndst, nsize);
135                 xfrm_hash_free(nsrc, nsize);
136                 return;
137         }
138
139         spin_lock_bh(&net->xfrm.xfrm_state_lock);
140         write_seqcount_begin(&xfrm_state_hash_generation);
141
142         nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
143         odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
144         for (i = net->xfrm.state_hmask; i >= 0; i--)
145                 xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nhashmask);
146
147         osrc = xfrm_state_deref_prot(net->xfrm.state_bysrc, net);
148         ospi = xfrm_state_deref_prot(net->xfrm.state_byspi, net);
149         ohashmask = net->xfrm.state_hmask;
150
151         rcu_assign_pointer(net->xfrm.state_bydst, ndst);
152         rcu_assign_pointer(net->xfrm.state_bysrc, nsrc);
153         rcu_assign_pointer(net->xfrm.state_byspi, nspi);
154         net->xfrm.state_hmask = nhashmask;
155
156         write_seqcount_end(&xfrm_state_hash_generation);
157         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
158
159         osize = (ohashmask + 1) * sizeof(struct hlist_head);
160
161         synchronize_rcu();
162
163         xfrm_hash_free(odst, osize);
164         xfrm_hash_free(osrc, osize);
165         xfrm_hash_free(ospi, osize);
166 }
167
168 static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
169 static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO];
170
171 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
172
173 int __xfrm_state_delete(struct xfrm_state *x);
174
175 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
176 bool km_is_alive(const struct km_event *c);
177 void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
178
179 static DEFINE_SPINLOCK(xfrm_type_lock);
180 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
181 {
182         struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
183         const struct xfrm_type **typemap;
184         int err = 0;
185
186         if (unlikely(afinfo == NULL))
187                 return -EAFNOSUPPORT;
188         typemap = afinfo->type_map;
189         spin_lock_bh(&xfrm_type_lock);
190
191         if (likely(typemap[type->proto] == NULL))
192                 typemap[type->proto] = type;
193         else
194                 err = -EEXIST;
195         spin_unlock_bh(&xfrm_type_lock);
196         rcu_read_unlock();
197         return err;
198 }
199 EXPORT_SYMBOL(xfrm_register_type);
200
201 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
202 {
203         struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
204         const struct xfrm_type **typemap;
205         int err = 0;
206
207         if (unlikely(afinfo == NULL))
208                 return -EAFNOSUPPORT;
209         typemap = afinfo->type_map;
210         spin_lock_bh(&xfrm_type_lock);
211
212         if (unlikely(typemap[type->proto] != type))
213                 err = -ENOENT;
214         else
215                 typemap[type->proto] = NULL;
216         spin_unlock_bh(&xfrm_type_lock);
217         rcu_read_unlock();
218         return err;
219 }
220 EXPORT_SYMBOL(xfrm_unregister_type);
221
222 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
223 {
224         struct xfrm_state_afinfo *afinfo;
225         const struct xfrm_type **typemap;
226         const struct xfrm_type *type;
227         int modload_attempted = 0;
228
229 retry:
230         afinfo = xfrm_state_get_afinfo(family);
231         if (unlikely(afinfo == NULL))
232                 return NULL;
233         typemap = afinfo->type_map;
234
235         type = READ_ONCE(typemap[proto]);
236         if (unlikely(type && !try_module_get(type->owner)))
237                 type = NULL;
238
239         rcu_read_unlock();
240
241         if (!type && !modload_attempted) {
242                 request_module("xfrm-type-%d-%d", family, proto);
243                 modload_attempted = 1;
244                 goto retry;
245         }
246
247         return type;
248 }
249
250 static void xfrm_put_type(const struct xfrm_type *type)
251 {
252         module_put(type->owner);
253 }
254
255 static DEFINE_SPINLOCK(xfrm_type_offload_lock);
256 int xfrm_register_type_offload(const struct xfrm_type_offload *type,
257                                unsigned short family)
258 {
259         struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
260         const struct xfrm_type_offload **typemap;
261         int err = 0;
262
263         if (unlikely(afinfo == NULL))
264                 return -EAFNOSUPPORT;
265         typemap = afinfo->type_offload_map;
266         spin_lock_bh(&xfrm_type_offload_lock);
267
268         if (likely(typemap[type->proto] == NULL))
269                 typemap[type->proto] = type;
270         else
271                 err = -EEXIST;
272         spin_unlock_bh(&xfrm_type_offload_lock);
273         rcu_read_unlock();
274         return err;
275 }
276 EXPORT_SYMBOL(xfrm_register_type_offload);
277
278 int xfrm_unregister_type_offload(const struct xfrm_type_offload *type,
279                                  unsigned short family)
280 {
281         struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
282         const struct xfrm_type_offload **typemap;
283         int err = 0;
284
285         if (unlikely(afinfo == NULL))
286                 return -EAFNOSUPPORT;
287         typemap = afinfo->type_offload_map;
288         spin_lock_bh(&xfrm_type_offload_lock);
289
290         if (unlikely(typemap[type->proto] != type))
291                 err = -ENOENT;
292         else
293                 typemap[type->proto] = NULL;
294         spin_unlock_bh(&xfrm_type_offload_lock);
295         rcu_read_unlock();
296         return err;
297 }
298 EXPORT_SYMBOL(xfrm_unregister_type_offload);
299
300 static const struct xfrm_type_offload *
301 xfrm_get_type_offload(u8 proto, unsigned short family, bool try_load)
302 {
303         struct xfrm_state_afinfo *afinfo;
304         const struct xfrm_type_offload **typemap;
305         const struct xfrm_type_offload *type;
306
307 retry:
308         afinfo = xfrm_state_get_afinfo(family);
309         if (unlikely(afinfo == NULL))
310                 return NULL;
311         typemap = afinfo->type_offload_map;
312
313         type = typemap[proto];
314         if ((type && !try_module_get(type->owner)))
315                 type = NULL;
316
317         rcu_read_unlock();
318
319         if (!type && try_load) {
320                 request_module("xfrm-offload-%d-%d", family, proto);
321                 try_load = false;
322                 goto retry;
323         }
324
325         return type;
326 }
327
328 static void xfrm_put_type_offload(const struct xfrm_type_offload *type)
329 {
330         module_put(type->owner);
331 }
332
333 static DEFINE_SPINLOCK(xfrm_mode_lock);
334 int xfrm_register_mode(struct xfrm_mode *mode)
335 {
336         struct xfrm_state_afinfo *afinfo;
337         struct xfrm_mode **modemap;
338         int err;
339
340         if (unlikely(mode->encap >= XFRM_MODE_MAX))
341                 return -EINVAL;
342
343         afinfo = xfrm_state_get_afinfo(mode->family);
344         if (unlikely(afinfo == NULL))
345                 return -EAFNOSUPPORT;
346
347         err = -EEXIST;
348         modemap = afinfo->mode_map;
349         spin_lock_bh(&xfrm_mode_lock);
350         if (modemap[mode->encap])
351                 goto out;
352
353         err = -ENOENT;
354         if (!try_module_get(afinfo->owner))
355                 goto out;
356
357         mode->afinfo = afinfo;
358         modemap[mode->encap] = mode;
359         err = 0;
360
361 out:
362         spin_unlock_bh(&xfrm_mode_lock);
363         rcu_read_unlock();
364         return err;
365 }
366 EXPORT_SYMBOL(xfrm_register_mode);
367
368 void xfrm_unregister_mode(struct xfrm_mode *mode)
369 {
370         struct xfrm_state_afinfo *afinfo;
371         struct xfrm_mode **modemap;
372
373         afinfo = xfrm_state_get_afinfo(mode->family);
374         if (WARN_ON_ONCE(!afinfo))
375                 return;
376
377         modemap = afinfo->mode_map;
378         spin_lock_bh(&xfrm_mode_lock);
379         if (likely(modemap[mode->encap] == mode)) {
380                 modemap[mode->encap] = NULL;
381                 module_put(mode->afinfo->owner);
382         }
383
384         spin_unlock_bh(&xfrm_mode_lock);
385         rcu_read_unlock();
386 }
387 EXPORT_SYMBOL(xfrm_unregister_mode);
388
389 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
390 {
391         struct xfrm_state_afinfo *afinfo;
392         struct xfrm_mode *mode;
393         int modload_attempted = 0;
394
395         if (unlikely(encap >= XFRM_MODE_MAX))
396                 return NULL;
397
398 retry:
399         afinfo = xfrm_state_get_afinfo(family);
400         if (unlikely(afinfo == NULL))
401                 return NULL;
402
403         mode = READ_ONCE(afinfo->mode_map[encap]);
404         if (unlikely(mode && !try_module_get(mode->owner)))
405                 mode = NULL;
406
407         rcu_read_unlock();
408         if (!mode && !modload_attempted) {
409                 request_module("xfrm-mode-%d-%d", family, encap);
410                 modload_attempted = 1;
411                 goto retry;
412         }
413
414         return mode;
415 }
416
417 static void xfrm_put_mode(struct xfrm_mode *mode)
418 {
419         module_put(mode->owner);
420 }
421
422 void xfrm_state_free(struct xfrm_state *x)
423 {
424         kmem_cache_free(xfrm_state_cache, x);
425 }
426 EXPORT_SYMBOL(xfrm_state_free);
427
428 static void ___xfrm_state_destroy(struct xfrm_state *x)
429 {
430         tasklet_hrtimer_cancel(&x->mtimer);
431         del_timer_sync(&x->rtimer);
432         kfree(x->aead);
433         kfree(x->aalg);
434         kfree(x->ealg);
435         kfree(x->calg);
436         kfree(x->encap);
437         kfree(x->coaddr);
438         kfree(x->replay_esn);
439         kfree(x->preplay_esn);
440         if (x->inner_mode)
441                 xfrm_put_mode(x->inner_mode);
442         if (x->inner_mode_iaf)
443                 xfrm_put_mode(x->inner_mode_iaf);
444         if (x->outer_mode)
445                 xfrm_put_mode(x->outer_mode);
446         if (x->type_offload)
447                 xfrm_put_type_offload(x->type_offload);
448         if (x->type) {
449                 x->type->destructor(x);
450                 xfrm_put_type(x->type);
451         }
452         xfrm_dev_state_free(x);
453         security_xfrm_state_free(x);
454         xfrm_state_free(x);
455 }
456
457 static void xfrm_state_gc_task(struct work_struct *work)
458 {
459         struct xfrm_state *x;
460         struct hlist_node *tmp;
461         struct hlist_head gc_list;
462
463         spin_lock_bh(&xfrm_state_gc_lock);
464         hlist_move_list(&xfrm_state_gc_list, &gc_list);
465         spin_unlock_bh(&xfrm_state_gc_lock);
466
467         synchronize_rcu();
468
469         hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
470                 ___xfrm_state_destroy(x);
471 }
472
473 static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
474 {
475         struct tasklet_hrtimer *thr = container_of(me, struct tasklet_hrtimer, timer);
476         struct xfrm_state *x = container_of(thr, struct xfrm_state, mtimer);
477         time64_t now = ktime_get_real_seconds();
478         time64_t next = TIME64_MAX;
479         int warn = 0;
480         int err = 0;
481
482         spin_lock(&x->lock);
483         if (x->km.state == XFRM_STATE_DEAD)
484                 goto out;
485         if (x->km.state == XFRM_STATE_EXPIRED)
486                 goto expired;
487         if (x->lft.hard_add_expires_seconds) {
488                 long tmo = x->lft.hard_add_expires_seconds +
489                         x->curlft.add_time - now;
490                 if (tmo <= 0) {
491                         if (x->xflags & XFRM_SOFT_EXPIRE) {
492                                 /* enter hard expire without soft expire first?!
493                                  * setting a new date could trigger this.
494                                  * workaround: fix x->curflt.add_time by below:
495                                  */
496                                 x->curlft.add_time = now - x->saved_tmo - 1;
497                                 tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
498                         } else
499                                 goto expired;
500                 }
501                 if (tmo < next)
502                         next = tmo;
503         }
504         if (x->lft.hard_use_expires_seconds) {
505                 long tmo = x->lft.hard_use_expires_seconds +
506                         (x->curlft.use_time ? : now) - now;
507                 if (tmo <= 0)
508                         goto expired;
509                 if (tmo < next)
510                         next = tmo;
511         }
512         if (x->km.dying)
513                 goto resched;
514         if (x->lft.soft_add_expires_seconds) {
515                 long tmo = x->lft.soft_add_expires_seconds +
516                         x->curlft.add_time - now;
517                 if (tmo <= 0) {
518                         warn = 1;
519                         x->xflags &= ~XFRM_SOFT_EXPIRE;
520                 } else if (tmo < next) {
521                         next = tmo;
522                         x->xflags |= XFRM_SOFT_EXPIRE;
523                         x->saved_tmo = tmo;
524                 }
525         }
526         if (x->lft.soft_use_expires_seconds) {
527                 long tmo = x->lft.soft_use_expires_seconds +
528                         (x->curlft.use_time ? : now) - now;
529                 if (tmo <= 0)
530                         warn = 1;
531                 else if (tmo < next)
532                         next = tmo;
533         }
534
535         x->km.dying = warn;
536         if (warn)
537                 km_state_expired(x, 0, 0);
538 resched:
539         if (next != TIME64_MAX) {
540                 tasklet_hrtimer_start(&x->mtimer, ktime_set(next, 0), HRTIMER_MODE_REL);
541         }
542
543         goto out;
544
545 expired:
546         if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0)
547                 x->km.state = XFRM_STATE_EXPIRED;
548
549         err = __xfrm_state_delete(x);
550         if (!err)
551                 km_state_expired(x, 1, 0);
552
553         xfrm_audit_state_delete(x, err ? 0 : 1, true);
554
555 out:
556         spin_unlock(&x->lock);
557         return HRTIMER_NORESTART;
558 }
559
560 static void xfrm_replay_timer_handler(struct timer_list *t);
561
562 struct xfrm_state *xfrm_state_alloc(struct net *net)
563 {
564         struct xfrm_state *x;
565
566         x = kmem_cache_alloc(xfrm_state_cache, GFP_ATOMIC | __GFP_ZERO);
567
568         if (x) {
569                 write_pnet(&x->xs_net, net);
570                 refcount_set(&x->refcnt, 1);
571                 atomic_set(&x->tunnel_users, 0);
572                 INIT_LIST_HEAD(&x->km.all);
573                 INIT_HLIST_NODE(&x->bydst);
574                 INIT_HLIST_NODE(&x->bysrc);
575                 INIT_HLIST_NODE(&x->byspi);
576                 tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler,
577                                         CLOCK_BOOTTIME, HRTIMER_MODE_ABS);
578                 timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0);
579                 x->curlft.add_time = ktime_get_real_seconds();
580                 x->lft.soft_byte_limit = XFRM_INF;
581                 x->lft.soft_packet_limit = XFRM_INF;
582                 x->lft.hard_byte_limit = XFRM_INF;
583                 x->lft.hard_packet_limit = XFRM_INF;
584                 x->replay_maxage = 0;
585                 x->replay_maxdiff = 0;
586                 x->inner_mode = NULL;
587                 x->inner_mode_iaf = NULL;
588                 spin_lock_init(&x->lock);
589         }
590         return x;
591 }
592 EXPORT_SYMBOL(xfrm_state_alloc);
593
594 void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
595 {
596         WARN_ON(x->km.state != XFRM_STATE_DEAD);
597
598         if (sync) {
599                 synchronize_rcu();
600                 ___xfrm_state_destroy(x);
601         } else {
602                 spin_lock_bh(&xfrm_state_gc_lock);
603                 hlist_add_head(&x->gclist, &xfrm_state_gc_list);
604                 spin_unlock_bh(&xfrm_state_gc_lock);
605                 schedule_work(&xfrm_state_gc_work);
606         }
607 }
608 EXPORT_SYMBOL(__xfrm_state_destroy);
609
610 int __xfrm_state_delete(struct xfrm_state *x)
611 {
612         struct net *net = xs_net(x);
613         int err = -ESRCH;
614
615         if (x->km.state != XFRM_STATE_DEAD) {
616                 x->km.state = XFRM_STATE_DEAD;
617                 spin_lock(&net->xfrm.xfrm_state_lock);
618                 list_del(&x->km.all);
619                 hlist_del_rcu(&x->bydst);
620                 hlist_del_rcu(&x->bysrc);
621                 if (x->id.spi)
622                         hlist_del_rcu(&x->byspi);
623                 net->xfrm.state_num--;
624                 spin_unlock(&net->xfrm.xfrm_state_lock);
625
626                 xfrm_dev_state_delete(x);
627
628                 /* All xfrm_state objects are created by xfrm_state_alloc.
629                  * The xfrm_state_alloc call gives a reference, and that
630                  * is what we are dropping here.
631                  */
632                 xfrm_state_put(x);
633                 err = 0;
634         }
635
636         return err;
637 }
638 EXPORT_SYMBOL(__xfrm_state_delete);
639
640 int xfrm_state_delete(struct xfrm_state *x)
641 {
642         int err;
643
644         spin_lock_bh(&x->lock);
645         err = __xfrm_state_delete(x);
646         spin_unlock_bh(&x->lock);
647
648         return err;
649 }
650 EXPORT_SYMBOL(xfrm_state_delete);
651
652 #ifdef CONFIG_SECURITY_NETWORK_XFRM
653 static inline int
654 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
655 {
656         int i, err = 0;
657
658         for (i = 0; i <= net->xfrm.state_hmask; i++) {
659                 struct xfrm_state *x;
660
661                 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
662                         if (xfrm_id_proto_match(x->id.proto, proto) &&
663                            (err = security_xfrm_state_delete(x)) != 0) {
664                                 xfrm_audit_state_delete(x, 0, task_valid);
665                                 return err;
666                         }
667                 }
668         }
669
670         return err;
671 }
672
673 static inline int
674 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
675 {
676         int i, err = 0;
677
678         for (i = 0; i <= net->xfrm.state_hmask; i++) {
679                 struct xfrm_state *x;
680                 struct xfrm_state_offload *xso;
681
682                 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
683                         xso = &x->xso;
684
685                         if (xso->dev == dev &&
686                            (err = security_xfrm_state_delete(x)) != 0) {
687                                 xfrm_audit_state_delete(x, 0, task_valid);
688                                 return err;
689                         }
690                 }
691         }
692
693         return err;
694 }
695 #else
696 static inline int
697 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
698 {
699         return 0;
700 }
701
702 static inline int
703 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
704 {
705         return 0;
706 }
707 #endif
708
709 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync)
710 {
711         int i, err = 0, cnt = 0;
712
713         spin_lock_bh(&net->xfrm.xfrm_state_lock);
714         err = xfrm_state_flush_secctx_check(net, proto, task_valid);
715         if (err)
716                 goto out;
717
718         err = -ESRCH;
719         for (i = 0; i <= net->xfrm.state_hmask; i++) {
720                 struct xfrm_state *x;
721 restart:
722                 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
723                         if (!xfrm_state_kern(x) &&
724                             xfrm_id_proto_match(x->id.proto, proto)) {
725                                 xfrm_state_hold(x);
726                                 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
727
728                                 err = xfrm_state_delete(x);
729                                 xfrm_audit_state_delete(x, err ? 0 : 1,
730                                                         task_valid);
731                                 if (sync)
732                                         xfrm_state_put_sync(x);
733                                 else
734                                         xfrm_state_put(x);
735                                 if (!err)
736                                         cnt++;
737
738                                 spin_lock_bh(&net->xfrm.xfrm_state_lock);
739                                 goto restart;
740                         }
741                 }
742         }
743 out:
744         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
745         if (cnt)
746                 err = 0;
747
748         return err;
749 }
750 EXPORT_SYMBOL(xfrm_state_flush);
751
752 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid)
753 {
754         int i, err = 0, cnt = 0;
755
756         spin_lock_bh(&net->xfrm.xfrm_state_lock);
757         err = xfrm_dev_state_flush_secctx_check(net, dev, task_valid);
758         if (err)
759                 goto out;
760
761         err = -ESRCH;
762         for (i = 0; i <= net->xfrm.state_hmask; i++) {
763                 struct xfrm_state *x;
764                 struct xfrm_state_offload *xso;
765 restart:
766                 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
767                         xso = &x->xso;
768
769                         if (!xfrm_state_kern(x) && xso->dev == dev) {
770                                 xfrm_state_hold(x);
771                                 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
772
773                                 err = xfrm_state_delete(x);
774                                 xfrm_audit_state_delete(x, err ? 0 : 1,
775                                                         task_valid);
776                                 xfrm_state_put(x);
777                                 if (!err)
778                                         cnt++;
779
780                                 spin_lock_bh(&net->xfrm.xfrm_state_lock);
781                                 goto restart;
782                         }
783                 }
784         }
785         if (cnt)
786                 err = 0;
787
788 out:
789         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
790         return err;
791 }
792 EXPORT_SYMBOL(xfrm_dev_state_flush);
793
794 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
795 {
796         spin_lock_bh(&net->xfrm.xfrm_state_lock);
797         si->sadcnt = net->xfrm.state_num;
798         si->sadhcnt = net->xfrm.state_hmask + 1;
799         si->sadhmcnt = xfrm_state_hashmax;
800         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
801 }
802 EXPORT_SYMBOL(xfrm_sad_getinfo);
803
804 static void
805 xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
806                     const struct xfrm_tmpl *tmpl,
807                     const xfrm_address_t *daddr, const xfrm_address_t *saddr,
808                     unsigned short family)
809 {
810         struct xfrm_state_afinfo *afinfo = xfrm_state_afinfo_get_rcu(family);
811
812         if (!afinfo)
813                 return;
814
815         afinfo->init_tempsel(&x->sel, fl);
816
817         if (family != tmpl->encap_family) {
818                 afinfo = xfrm_state_afinfo_get_rcu(tmpl->encap_family);
819                 if (!afinfo)
820                         return;
821         }
822         afinfo->init_temprop(x, tmpl, daddr, saddr);
823 }
824
825 static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
826                                               const xfrm_address_t *daddr,
827                                               __be32 spi, u8 proto,
828                                               unsigned short family)
829 {
830         unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
831         struct xfrm_state *x;
832
833         hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) {
834                 if (x->props.family != family ||
835                     x->id.spi       != spi ||
836                     x->id.proto     != proto ||
837                     !xfrm_addr_equal(&x->id.daddr, daddr, family))
838                         continue;
839
840                 if ((mark & x->mark.m) != x->mark.v)
841                         continue;
842                 if (!xfrm_state_hold_rcu(x))
843                         continue;
844                 return x;
845         }
846
847         return NULL;
848 }
849
850 static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
851                                                      const xfrm_address_t *daddr,
852                                                      const xfrm_address_t *saddr,
853                                                      u8 proto, unsigned short family)
854 {
855         unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
856         struct xfrm_state *x;
857
858         hlist_for_each_entry_rcu(x, net->xfrm.state_bysrc + h, bysrc) {
859                 if (x->props.family != family ||
860                     x->id.proto     != proto ||
861                     !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
862                     !xfrm_addr_equal(&x->props.saddr, saddr, family))
863                         continue;
864
865                 if ((mark & x->mark.m) != x->mark.v)
866                         continue;
867                 if (!xfrm_state_hold_rcu(x))
868                         continue;
869                 return x;
870         }
871
872         return NULL;
873 }
874
875 static inline struct xfrm_state *
876 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
877 {
878         struct net *net = xs_net(x);
879         u32 mark = x->mark.v & x->mark.m;
880
881         if (use_spi)
882                 return __xfrm_state_lookup(net, mark, &x->id.daddr,
883                                            x->id.spi, x->id.proto, family);
884         else
885                 return __xfrm_state_lookup_byaddr(net, mark,
886                                                   &x->id.daddr,
887                                                   &x->props.saddr,
888                                                   x->id.proto, family);
889 }
890
891 static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
892 {
893         if (have_hash_collision &&
894             (net->xfrm.state_hmask + 1) < xfrm_state_hashmax &&
895             net->xfrm.state_num > net->xfrm.state_hmask)
896                 schedule_work(&net->xfrm.state_hash_work);
897 }
898
899 static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
900                                const struct flowi *fl, unsigned short family,
901                                struct xfrm_state **best, int *acq_in_progress,
902                                int *error)
903 {
904         /* Resolution logic:
905          * 1. There is a valid state with matching selector. Done.
906          * 2. Valid state with inappropriate selector. Skip.
907          *
908          * Entering area of "sysdeps".
909          *
910          * 3. If state is not valid, selector is temporary, it selects
911          *    only session which triggered previous resolution. Key
912          *    manager will do something to install a state with proper
913          *    selector.
914          */
915         if (x->km.state == XFRM_STATE_VALID) {
916                 if ((x->sel.family &&
917                      !xfrm_selector_match(&x->sel, fl, x->sel.family)) ||
918                     !security_xfrm_state_pol_flow_match(x, pol, fl))
919                         return;
920
921                 if (!*best ||
922                     (*best)->km.dying > x->km.dying ||
923                     ((*best)->km.dying == x->km.dying &&
924                      (*best)->curlft.add_time < x->curlft.add_time))
925                         *best = x;
926         } else if (x->km.state == XFRM_STATE_ACQ) {
927                 *acq_in_progress = 1;
928         } else if (x->km.state == XFRM_STATE_ERROR ||
929                    x->km.state == XFRM_STATE_EXPIRED) {
930                 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
931                     security_xfrm_state_pol_flow_match(x, pol, fl))
932                         *error = -ESRCH;
933         }
934 }
935
936 struct xfrm_state *
937 xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
938                 const struct flowi *fl, struct xfrm_tmpl *tmpl,
939                 struct xfrm_policy *pol, int *err,
940                 unsigned short family, u32 if_id)
941 {
942         static xfrm_address_t saddr_wildcard = { };
943         struct net *net = xp_net(pol);
944         unsigned int h, h_wildcard;
945         struct xfrm_state *x, *x0, *to_put;
946         int acquire_in_progress = 0;
947         int error = 0;
948         struct xfrm_state *best = NULL;
949         u32 mark = pol->mark.v & pol->mark.m;
950         unsigned short encap_family = tmpl->encap_family;
951         unsigned int sequence;
952         struct km_event c;
953
954         to_put = NULL;
955
956         sequence = read_seqcount_begin(&xfrm_state_hash_generation);
957
958         rcu_read_lock();
959         h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
960         hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h, bydst) {
961                 if (x->props.family == encap_family &&
962                     x->props.reqid == tmpl->reqid &&
963                     (mark & x->mark.m) == x->mark.v &&
964                     x->if_id == if_id &&
965                     !(x->props.flags & XFRM_STATE_WILDRECV) &&
966                     xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
967                     tmpl->mode == x->props.mode &&
968                     tmpl->id.proto == x->id.proto &&
969                     (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
970                         xfrm_state_look_at(pol, x, fl, encap_family,
971                                            &best, &acquire_in_progress, &error);
972         }
973         if (best || acquire_in_progress)
974                 goto found;
975
976         h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
977         hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h_wildcard, bydst) {
978                 if (x->props.family == encap_family &&
979                     x->props.reqid == tmpl->reqid &&
980                     (mark & x->mark.m) == x->mark.v &&
981                     x->if_id == if_id &&
982                     !(x->props.flags & XFRM_STATE_WILDRECV) &&
983                     xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
984                     tmpl->mode == x->props.mode &&
985                     tmpl->id.proto == x->id.proto &&
986                     (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
987                         xfrm_state_look_at(pol, x, fl, encap_family,
988                                            &best, &acquire_in_progress, &error);
989         }
990
991 found:
992         x = best;
993         if (!x && !error && !acquire_in_progress) {
994                 if (tmpl->id.spi &&
995                     (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
996                                               tmpl->id.proto, encap_family)) != NULL) {
997                         to_put = x0;
998                         error = -EEXIST;
999                         goto out;
1000                 }
1001
1002                 c.net = net;
1003                 /* If the KMs have no listeners (yet...), avoid allocating an SA
1004                  * for each and every packet - garbage collection might not
1005                  * handle the flood.
1006                  */
1007                 if (!km_is_alive(&c)) {
1008                         error = -ESRCH;
1009                         goto out;
1010                 }
1011
1012                 x = xfrm_state_alloc(net);
1013                 if (x == NULL) {
1014                         error = -ENOMEM;
1015                         goto out;
1016                 }
1017                 /* Initialize temporary state matching only
1018                  * to current session. */
1019                 xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
1020                 memcpy(&x->mark, &pol->mark, sizeof(x->mark));
1021                 x->if_id = if_id;
1022
1023                 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid);
1024                 if (error) {
1025                         x->km.state = XFRM_STATE_DEAD;
1026                         to_put = x;
1027                         x = NULL;
1028                         goto out;
1029                 }
1030
1031                 if (km_query(x, tmpl, pol) == 0) {
1032                         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1033                         x->km.state = XFRM_STATE_ACQ;
1034                         list_add(&x->km.all, &net->xfrm.state_all);
1035                         hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
1036                         h = xfrm_src_hash(net, daddr, saddr, encap_family);
1037                         hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
1038                         if (x->id.spi) {
1039                                 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
1040                                 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
1041                         }
1042                         x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1043                         tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
1044                         net->xfrm.state_num++;
1045                         xfrm_hash_grow_check(net, x->bydst.next != NULL);
1046                         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1047                 } else {
1048                         x->km.state = XFRM_STATE_DEAD;
1049                         to_put = x;
1050                         x = NULL;
1051                         error = -ESRCH;
1052                 }
1053         }
1054 out:
1055         if (x) {
1056                 if (!xfrm_state_hold_rcu(x)) {
1057                         *err = -EAGAIN;
1058                         x = NULL;
1059                 }
1060         } else {
1061                 *err = acquire_in_progress ? -EAGAIN : error;
1062         }
1063         rcu_read_unlock();
1064         if (to_put)
1065                 xfrm_state_put(to_put);
1066
1067         if (read_seqcount_retry(&xfrm_state_hash_generation, sequence)) {
1068                 *err = -EAGAIN;
1069                 if (x) {
1070                         xfrm_state_put(x);
1071                         x = NULL;
1072                 }
1073         }
1074
1075         return x;
1076 }
1077
1078 struct xfrm_state *
1079 xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
1080                     xfrm_address_t *daddr, xfrm_address_t *saddr,
1081                     unsigned short family, u8 mode, u8 proto, u32 reqid)
1082 {
1083         unsigned int h;
1084         struct xfrm_state *rx = NULL, *x = NULL;
1085
1086         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1087         h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1088         hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1089                 if (x->props.family == family &&
1090                     x->props.reqid == reqid &&
1091                     (mark & x->mark.m) == x->mark.v &&
1092                     x->if_id == if_id &&
1093                     !(x->props.flags & XFRM_STATE_WILDRECV) &&
1094                     xfrm_state_addr_check(x, daddr, saddr, family) &&
1095                     mode == x->props.mode &&
1096                     proto == x->id.proto &&
1097                     x->km.state == XFRM_STATE_VALID) {
1098                         rx = x;
1099                         break;
1100                 }
1101         }
1102
1103         if (rx)
1104                 xfrm_state_hold(rx);
1105         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1106
1107
1108         return rx;
1109 }
1110 EXPORT_SYMBOL(xfrm_stateonly_find);
1111
1112 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1113                                               unsigned short family)
1114 {
1115         struct xfrm_state *x;
1116         struct xfrm_state_walk *w;
1117
1118         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1119         list_for_each_entry(w, &net->xfrm.state_all, all) {
1120                 x = container_of(w, struct xfrm_state, km);
1121                 if (x->props.family != family ||
1122                         x->id.spi != spi)
1123                         continue;
1124
1125                 xfrm_state_hold(x);
1126                 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1127                 return x;
1128         }
1129         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1130         return NULL;
1131 }
1132 EXPORT_SYMBOL(xfrm_state_lookup_byspi);
1133
1134 static void __xfrm_state_insert(struct xfrm_state *x)
1135 {
1136         struct net *net = xs_net(x);
1137         unsigned int h;
1138
1139         list_add(&x->km.all, &net->xfrm.state_all);
1140
1141         h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
1142                           x->props.reqid, x->props.family);
1143         hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
1144
1145         h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family);
1146         hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
1147
1148         if (x->id.spi) {
1149                 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto,
1150                                   x->props.family);
1151
1152                 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
1153         }
1154
1155         tasklet_hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
1156         if (x->replay_maxage)
1157                 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
1158
1159         net->xfrm.state_num++;
1160
1161         xfrm_hash_grow_check(net, x->bydst.next != NULL);
1162 }
1163
1164 /* net->xfrm.xfrm_state_lock is held */
1165 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
1166 {
1167         struct net *net = xs_net(xnew);
1168         unsigned short family = xnew->props.family;
1169         u32 reqid = xnew->props.reqid;
1170         struct xfrm_state *x;
1171         unsigned int h;
1172         u32 mark = xnew->mark.v & xnew->mark.m;
1173         u32 if_id = xnew->if_id;
1174
1175         h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
1176         hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1177                 if (x->props.family     == family &&
1178                     x->props.reqid      == reqid &&
1179                     x->if_id            == if_id &&
1180                     (mark & x->mark.m) == x->mark.v &&
1181                     xfrm_addr_equal(&x->id.daddr, &xnew->id.daddr, family) &&
1182                     xfrm_addr_equal(&x->props.saddr, &xnew->props.saddr, family))
1183                         x->genid++;
1184         }
1185 }
1186
1187 void xfrm_state_insert(struct xfrm_state *x)
1188 {
1189         struct net *net = xs_net(x);
1190
1191         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1192         __xfrm_state_bump_genids(x);
1193         __xfrm_state_insert(x);
1194         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1195 }
1196 EXPORT_SYMBOL(xfrm_state_insert);
1197
1198 /* net->xfrm.xfrm_state_lock is held */
1199 static struct xfrm_state *__find_acq_core(struct net *net,
1200                                           const struct xfrm_mark *m,
1201                                           unsigned short family, u8 mode,
1202                                           u32 reqid, u32 if_id, u8 proto,
1203                                           const xfrm_address_t *daddr,
1204                                           const xfrm_address_t *saddr,
1205                                           int create)
1206 {
1207         unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1208         struct xfrm_state *x;
1209         u32 mark = m->v & m->m;
1210
1211         hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1212                 if (x->props.reqid  != reqid ||
1213                     x->props.mode   != mode ||
1214                     x->props.family != family ||
1215                     x->km.state     != XFRM_STATE_ACQ ||
1216                     x->id.spi       != 0 ||
1217                     x->id.proto     != proto ||
1218                     (mark & x->mark.m) != x->mark.v ||
1219                     !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
1220                     !xfrm_addr_equal(&x->props.saddr, saddr, family))
1221                         continue;
1222
1223                 xfrm_state_hold(x);
1224                 return x;
1225         }
1226
1227         if (!create)
1228                 return NULL;
1229
1230         x = xfrm_state_alloc(net);
1231         if (likely(x)) {
1232                 switch (family) {
1233                 case AF_INET:
1234                         x->sel.daddr.a4 = daddr->a4;
1235                         x->sel.saddr.a4 = saddr->a4;
1236                         x->sel.prefixlen_d = 32;
1237                         x->sel.prefixlen_s = 32;
1238                         x->props.saddr.a4 = saddr->a4;
1239                         x->id.daddr.a4 = daddr->a4;
1240                         break;
1241
1242                 case AF_INET6:
1243                         x->sel.daddr.in6 = daddr->in6;
1244                         x->sel.saddr.in6 = saddr->in6;
1245                         x->sel.prefixlen_d = 128;
1246                         x->sel.prefixlen_s = 128;
1247                         x->props.saddr.in6 = saddr->in6;
1248                         x->id.daddr.in6 = daddr->in6;
1249                         break;
1250                 }
1251
1252                 x->km.state = XFRM_STATE_ACQ;
1253                 x->id.proto = proto;
1254                 x->props.family = family;
1255                 x->props.mode = mode;
1256                 x->props.reqid = reqid;
1257                 x->if_id = if_id;
1258                 x->mark.v = m->v;
1259                 x->mark.m = m->m;
1260                 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1261                 xfrm_state_hold(x);
1262                 tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
1263                 list_add(&x->km.all, &net->xfrm.state_all);
1264                 hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
1265                 h = xfrm_src_hash(net, daddr, saddr, family);
1266                 hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
1267
1268                 net->xfrm.state_num++;
1269
1270                 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1271         }
1272
1273         return x;
1274 }
1275
1276 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
1277
1278 int xfrm_state_add(struct xfrm_state *x)
1279 {
1280         struct net *net = xs_net(x);
1281         struct xfrm_state *x1, *to_put;
1282         int family;
1283         int err;
1284         u32 mark = x->mark.v & x->mark.m;
1285         int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1286
1287         family = x->props.family;
1288
1289         to_put = NULL;
1290
1291         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1292
1293         x1 = __xfrm_state_locate(x, use_spi, family);
1294         if (x1) {
1295                 to_put = x1;
1296                 x1 = NULL;
1297                 err = -EEXIST;
1298                 goto out;
1299         }
1300
1301         if (use_spi && x->km.seq) {
1302                 x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq);
1303                 if (x1 && ((x1->id.proto != x->id.proto) ||
1304                     !xfrm_addr_equal(&x1->id.daddr, &x->id.daddr, family))) {
1305                         to_put = x1;
1306                         x1 = NULL;
1307                 }
1308         }
1309
1310         if (use_spi && !x1)
1311                 x1 = __find_acq_core(net, &x->mark, family, x->props.mode,
1312                                      x->props.reqid, x->if_id, x->id.proto,
1313                                      &x->id.daddr, &x->props.saddr, 0);
1314
1315         __xfrm_state_bump_genids(x);
1316         __xfrm_state_insert(x);
1317         err = 0;
1318
1319 out:
1320         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1321
1322         if (x1) {
1323                 xfrm_state_delete(x1);
1324                 xfrm_state_put(x1);
1325         }
1326
1327         if (to_put)
1328                 xfrm_state_put(to_put);
1329
1330         return err;
1331 }
1332 EXPORT_SYMBOL(xfrm_state_add);
1333
1334 #ifdef CONFIG_XFRM_MIGRATE
1335 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
1336                                            struct xfrm_encap_tmpl *encap)
1337 {
1338         struct net *net = xs_net(orig);
1339         struct xfrm_state *x = xfrm_state_alloc(net);
1340         if (!x)
1341                 goto out;
1342
1343         memcpy(&x->id, &orig->id, sizeof(x->id));
1344         memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1345         memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1346         x->props.mode = orig->props.mode;
1347         x->props.replay_window = orig->props.replay_window;
1348         x->props.reqid = orig->props.reqid;
1349         x->props.family = orig->props.family;
1350         x->props.saddr = orig->props.saddr;
1351
1352         if (orig->aalg) {
1353                 x->aalg = xfrm_algo_auth_clone(orig->aalg);
1354                 if (!x->aalg)
1355                         goto error;
1356         }
1357         x->props.aalgo = orig->props.aalgo;
1358
1359         if (orig->aead) {
1360                 x->aead = xfrm_algo_aead_clone(orig->aead);
1361                 x->geniv = orig->geniv;
1362                 if (!x->aead)
1363                         goto error;
1364         }
1365         if (orig->ealg) {
1366                 x->ealg = xfrm_algo_clone(orig->ealg);
1367                 if (!x->ealg)
1368                         goto error;
1369         }
1370         x->props.ealgo = orig->props.ealgo;
1371
1372         if (orig->calg) {
1373                 x->calg = xfrm_algo_clone(orig->calg);
1374                 if (!x->calg)
1375                         goto error;
1376         }
1377         x->props.calgo = orig->props.calgo;
1378
1379         if (encap || orig->encap) {
1380                 if (encap)
1381                         x->encap = kmemdup(encap, sizeof(*x->encap),
1382                                         GFP_KERNEL);
1383                 else
1384                         x->encap = kmemdup(orig->encap, sizeof(*x->encap),
1385                                         GFP_KERNEL);
1386
1387                 if (!x->encap)
1388                         goto error;
1389         }
1390
1391         if (orig->coaddr) {
1392                 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1393                                     GFP_KERNEL);
1394                 if (!x->coaddr)
1395                         goto error;
1396         }
1397
1398         if (orig->replay_esn) {
1399                 if (xfrm_replay_clone(x, orig))
1400                         goto error;
1401         }
1402
1403         memcpy(&x->mark, &orig->mark, sizeof(x->mark));
1404
1405         if (xfrm_init_state(x) < 0)
1406                 goto error;
1407
1408         x->props.flags = orig->props.flags;
1409         x->props.extra_flags = orig->props.extra_flags;
1410
1411         x->if_id = orig->if_id;
1412         x->tfcpad = orig->tfcpad;
1413         x->replay_maxdiff = orig->replay_maxdiff;
1414         x->replay_maxage = orig->replay_maxage;
1415         x->curlft.add_time = orig->curlft.add_time;
1416         x->km.state = orig->km.state;
1417         x->km.seq = orig->km.seq;
1418         x->replay = orig->replay;
1419         x->preplay = orig->preplay;
1420
1421         return x;
1422
1423  error:
1424         xfrm_state_put(x);
1425 out:
1426         return NULL;
1427 }
1428
1429 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net)
1430 {
1431         unsigned int h;
1432         struct xfrm_state *x = NULL;
1433
1434         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1435
1436         if (m->reqid) {
1437                 h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr,
1438                                   m->reqid, m->old_family);
1439                 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1440                         if (x->props.mode != m->mode ||
1441                             x->id.proto != m->proto)
1442                                 continue;
1443                         if (m->reqid && x->props.reqid != m->reqid)
1444                                 continue;
1445                         if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
1446                                              m->old_family) ||
1447                             !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
1448                                              m->old_family))
1449                                 continue;
1450                         xfrm_state_hold(x);
1451                         break;
1452                 }
1453         } else {
1454                 h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr,
1455                                   m->old_family);
1456                 hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) {
1457                         if (x->props.mode != m->mode ||
1458                             x->id.proto != m->proto)
1459                                 continue;
1460                         if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
1461                                              m->old_family) ||
1462                             !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
1463                                              m->old_family))
1464                                 continue;
1465                         xfrm_state_hold(x);
1466                         break;
1467                 }
1468         }
1469
1470         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1471
1472         return x;
1473 }
1474 EXPORT_SYMBOL(xfrm_migrate_state_find);
1475
1476 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
1477                                       struct xfrm_migrate *m,
1478                                       struct xfrm_encap_tmpl *encap)
1479 {
1480         struct xfrm_state *xc;
1481
1482         xc = xfrm_state_clone(x, encap);
1483         if (!xc)
1484                 return NULL;
1485
1486         memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1487         memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1488
1489         /* add state */
1490         if (xfrm_addr_equal(&x->id.daddr, &m->new_daddr, m->new_family)) {
1491                 /* a care is needed when the destination address of the
1492                    state is to be updated as it is a part of triplet */
1493                 xfrm_state_insert(xc);
1494         } else {
1495                 if (xfrm_state_add(xc) < 0)
1496                         goto error;
1497         }
1498
1499         return xc;
1500 error:
1501         xfrm_state_put(xc);
1502         return NULL;
1503 }
1504 EXPORT_SYMBOL(xfrm_state_migrate);
1505 #endif
1506
1507 int xfrm_state_update(struct xfrm_state *x)
1508 {
1509         struct xfrm_state *x1, *to_put;
1510         int err;
1511         int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1512         struct net *net = xs_net(x);
1513
1514         to_put = NULL;
1515
1516         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1517         x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1518
1519         err = -ESRCH;
1520         if (!x1)
1521                 goto out;
1522
1523         if (xfrm_state_kern(x1)) {
1524                 to_put = x1;
1525                 err = -EEXIST;
1526                 goto out;
1527         }
1528
1529         if (x1->km.state == XFRM_STATE_ACQ) {
1530                 __xfrm_state_insert(x);
1531                 x = NULL;
1532         }
1533         err = 0;
1534
1535 out:
1536         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1537
1538         if (to_put)
1539                 xfrm_state_put(to_put);
1540
1541         if (err)
1542                 return err;
1543
1544         if (!x) {
1545                 xfrm_state_delete(x1);
1546                 xfrm_state_put(x1);
1547                 return 0;
1548         }
1549
1550         err = -EINVAL;
1551         spin_lock_bh(&x1->lock);
1552         if (likely(x1->km.state == XFRM_STATE_VALID)) {
1553                 if (x->encap && x1->encap &&
1554                     x->encap->encap_type == x1->encap->encap_type)
1555                         memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1556                 else if (x->encap || x1->encap)
1557                         goto fail;
1558
1559                 if (x->coaddr && x1->coaddr) {
1560                         memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1561                 }
1562                 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1563                         memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1564                 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1565                 x1->km.dying = 0;
1566
1567                 tasklet_hrtimer_start(&x1->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
1568                 if (x1->curlft.use_time)
1569                         xfrm_state_check_expire(x1);
1570
1571                 if (x->props.smark.m || x->props.smark.v || x->if_id) {
1572                         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1573
1574                         if (x->props.smark.m || x->props.smark.v)
1575                                 x1->props.smark = x->props.smark;
1576
1577                         if (x->if_id)
1578                                 x1->if_id = x->if_id;
1579
1580                         __xfrm_state_bump_genids(x1);
1581                         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1582                 }
1583
1584                 err = 0;
1585                 x->km.state = XFRM_STATE_DEAD;
1586                 __xfrm_state_put(x);
1587         }
1588
1589 fail:
1590         spin_unlock_bh(&x1->lock);
1591
1592         xfrm_state_put(x1);
1593
1594         return err;
1595 }
1596 EXPORT_SYMBOL(xfrm_state_update);
1597
1598 int xfrm_state_check_expire(struct xfrm_state *x)
1599 {
1600         if (!x->curlft.use_time)
1601                 x->curlft.use_time = ktime_get_real_seconds();
1602
1603         if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1604             x->curlft.packets >= x->lft.hard_packet_limit) {
1605                 x->km.state = XFRM_STATE_EXPIRED;
1606                 tasklet_hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL);
1607                 return -EINVAL;
1608         }
1609
1610         if (!x->km.dying &&
1611             (x->curlft.bytes >= x->lft.soft_byte_limit ||
1612              x->curlft.packets >= x->lft.soft_packet_limit)) {
1613                 x->km.dying = 1;
1614                 km_state_expired(x, 0, 0);
1615         }
1616         return 0;
1617 }
1618 EXPORT_SYMBOL(xfrm_state_check_expire);
1619
1620 struct xfrm_state *
1621 xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
1622                   u8 proto, unsigned short family)
1623 {
1624         struct xfrm_state *x;
1625
1626         rcu_read_lock();
1627         x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
1628         rcu_read_unlock();
1629         return x;
1630 }
1631 EXPORT_SYMBOL(xfrm_state_lookup);
1632
1633 struct xfrm_state *
1634 xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1635                          const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1636                          u8 proto, unsigned short family)
1637 {
1638         struct xfrm_state *x;
1639
1640         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1641         x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family);
1642         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1643         return x;
1644 }
1645 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1646
1647 struct xfrm_state *
1648 xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid,
1649               u32 if_id, u8 proto, const xfrm_address_t *daddr,
1650               const xfrm_address_t *saddr, int create, unsigned short family)
1651 {
1652         struct xfrm_state *x;
1653
1654         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1655         x = __find_acq_core(net, mark, family, mode, reqid, if_id, proto, daddr, saddr, create);
1656         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1657
1658         return x;
1659 }
1660 EXPORT_SYMBOL(xfrm_find_acq);
1661
1662 #ifdef CONFIG_XFRM_SUB_POLICY
1663 int
1664 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1665                unsigned short family, struct net *net)
1666 {
1667         int i;
1668         int err = 0;
1669         struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1670         if (!afinfo)
1671                 return -EAFNOSUPPORT;
1672
1673         spin_lock_bh(&net->xfrm.xfrm_state_lock); /*FIXME*/
1674         if (afinfo->tmpl_sort)
1675                 err = afinfo->tmpl_sort(dst, src, n);
1676         else
1677                 for (i = 0; i < n; i++)
1678                         dst[i] = src[i];
1679         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1680         rcu_read_unlock();
1681         return err;
1682 }
1683 EXPORT_SYMBOL(xfrm_tmpl_sort);
1684
1685 int
1686 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1687                 unsigned short family)
1688 {
1689         int i;
1690         int err = 0;
1691         struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1692         struct net *net = xs_net(*src);
1693
1694         if (!afinfo)
1695                 return -EAFNOSUPPORT;
1696
1697         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1698         if (afinfo->state_sort)
1699                 err = afinfo->state_sort(dst, src, n);
1700         else
1701                 for (i = 0; i < n; i++)
1702                         dst[i] = src[i];
1703         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1704         rcu_read_unlock();
1705         return err;
1706 }
1707 EXPORT_SYMBOL(xfrm_state_sort);
1708 #endif
1709
1710 /* Silly enough, but I'm lazy to build resolution list */
1711
1712 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
1713 {
1714         int i;
1715
1716         for (i = 0; i <= net->xfrm.state_hmask; i++) {
1717                 struct xfrm_state *x;
1718
1719                 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
1720                         if (x->km.seq == seq &&
1721                             (mark & x->mark.m) == x->mark.v &&
1722                             x->km.state == XFRM_STATE_ACQ) {
1723                                 xfrm_state_hold(x);
1724                                 return x;
1725                         }
1726                 }
1727         }
1728         return NULL;
1729 }
1730
1731 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
1732 {
1733         struct xfrm_state *x;
1734
1735         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1736         x = __xfrm_find_acq_byseq(net, mark, seq);
1737         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1738         return x;
1739 }
1740 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1741
1742 u32 xfrm_get_acqseq(void)
1743 {
1744         u32 res;
1745         static atomic_t acqseq;
1746
1747         do {
1748                 res = atomic_inc_return(&acqseq);
1749         } while (!res);
1750
1751         return res;
1752 }
1753 EXPORT_SYMBOL(xfrm_get_acqseq);
1754
1755 int verify_spi_info(u8 proto, u32 min, u32 max)
1756 {
1757         switch (proto) {
1758         case IPPROTO_AH:
1759         case IPPROTO_ESP:
1760                 break;
1761
1762         case IPPROTO_COMP:
1763                 /* IPCOMP spi is 16-bits. */
1764                 if (max >= 0x10000)
1765                         return -EINVAL;
1766                 break;
1767
1768         default:
1769                 return -EINVAL;
1770         }
1771
1772         if (min > max)
1773                 return -EINVAL;
1774
1775         return 0;
1776 }
1777 EXPORT_SYMBOL(verify_spi_info);
1778
1779 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1780 {
1781         struct net *net = xs_net(x);
1782         unsigned int h;
1783         struct xfrm_state *x0;
1784         int err = -ENOENT;
1785         __be32 minspi = htonl(low);
1786         __be32 maxspi = htonl(high);
1787         u32 mark = x->mark.v & x->mark.m;
1788
1789         spin_lock_bh(&x->lock);
1790         if (x->km.state == XFRM_STATE_DEAD)
1791                 goto unlock;
1792
1793         err = 0;
1794         if (x->id.spi)
1795                 goto unlock;
1796
1797         err = -ENOENT;
1798
1799         if (minspi == maxspi) {
1800                 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family);
1801                 if (x0) {
1802                         xfrm_state_put(x0);
1803                         goto unlock;
1804                 }
1805                 x->id.spi = minspi;
1806         } else {
1807                 u32 spi = 0;
1808                 for (h = 0; h < high-low+1; h++) {
1809                         spi = low + prandom_u32()%(high-low+1);
1810                         x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1811                         if (x0 == NULL) {
1812                                 x->id.spi = htonl(spi);
1813                                 break;
1814                         }
1815                         xfrm_state_put(x0);
1816                 }
1817         }
1818         if (x->id.spi) {
1819                 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1820                 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1821                 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
1822                 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1823
1824                 err = 0;
1825         }
1826
1827 unlock:
1828         spin_unlock_bh(&x->lock);
1829
1830         return err;
1831 }
1832 EXPORT_SYMBOL(xfrm_alloc_spi);
1833
1834 static bool __xfrm_state_filter_match(struct xfrm_state *x,
1835                                       struct xfrm_address_filter *filter)
1836 {
1837         if (filter) {
1838                 if ((filter->family == AF_INET ||
1839                      filter->family == AF_INET6) &&
1840                     x->props.family != filter->family)
1841                         return false;
1842
1843                 return addr_match(&x->props.saddr, &filter->saddr,
1844                                   filter->splen) &&
1845                        addr_match(&x->id.daddr, &filter->daddr,
1846                                   filter->dplen);
1847         }
1848         return true;
1849 }
1850
1851 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1852                     int (*func)(struct xfrm_state *, int, void*),
1853                     void *data)
1854 {
1855         struct xfrm_state *state;
1856         struct xfrm_state_walk *x;
1857         int err = 0;
1858
1859         if (walk->seq != 0 && list_empty(&walk->all))
1860                 return 0;
1861
1862         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1863         if (list_empty(&walk->all))
1864                 x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all);
1865         else
1866                 x = list_first_entry(&walk->all, struct xfrm_state_walk, all);
1867         list_for_each_entry_from(x, &net->xfrm.state_all, all) {
1868                 if (x->state == XFRM_STATE_DEAD)
1869                         continue;
1870                 state = container_of(x, struct xfrm_state, km);
1871                 if (!xfrm_id_proto_match(state->id.proto, walk->proto))
1872                         continue;
1873                 if (!__xfrm_state_filter_match(state, walk->filter))
1874                         continue;
1875                 err = func(state, walk->seq, data);
1876                 if (err) {
1877                         list_move_tail(&walk->all, &x->all);
1878                         goto out;
1879                 }
1880                 walk->seq++;
1881         }
1882         if (walk->seq == 0) {
1883                 err = -ENOENT;
1884                 goto out;
1885         }
1886         list_del_init(&walk->all);
1887 out:
1888         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1889         return err;
1890 }
1891 EXPORT_SYMBOL(xfrm_state_walk);
1892
1893 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
1894                           struct xfrm_address_filter *filter)
1895 {
1896         INIT_LIST_HEAD(&walk->all);
1897         walk->proto = proto;
1898         walk->state = XFRM_STATE_DEAD;
1899         walk->seq = 0;
1900         walk->filter = filter;
1901 }
1902 EXPORT_SYMBOL(xfrm_state_walk_init);
1903
1904 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net)
1905 {
1906         kfree(walk->filter);
1907
1908         if (list_empty(&walk->all))
1909                 return;
1910
1911         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1912         list_del(&walk->all);
1913         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1914 }
1915 EXPORT_SYMBOL(xfrm_state_walk_done);
1916
1917 static void xfrm_replay_timer_handler(struct timer_list *t)
1918 {
1919         struct xfrm_state *x = from_timer(x, t, rtimer);
1920
1921         spin_lock(&x->lock);
1922
1923         if (x->km.state == XFRM_STATE_VALID) {
1924                 if (xfrm_aevent_is_on(xs_net(x)))
1925                         x->repl->notify(x, XFRM_REPLAY_TIMEOUT);
1926                 else
1927                         x->xflags |= XFRM_TIME_DEFER;
1928         }
1929
1930         spin_unlock(&x->lock);
1931 }
1932
1933 static LIST_HEAD(xfrm_km_list);
1934
1935 void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
1936 {
1937         struct xfrm_mgr *km;
1938
1939         rcu_read_lock();
1940         list_for_each_entry_rcu(km, &xfrm_km_list, list)
1941                 if (km->notify_policy)
1942                         km->notify_policy(xp, dir, c);
1943         rcu_read_unlock();
1944 }
1945
1946 void km_state_notify(struct xfrm_state *x, const struct km_event *c)
1947 {
1948         struct xfrm_mgr *km;
1949         rcu_read_lock();
1950         list_for_each_entry_rcu(km, &xfrm_km_list, list)
1951                 if (km->notify)
1952                         km->notify(x, c);
1953         rcu_read_unlock();
1954 }
1955
1956 EXPORT_SYMBOL(km_policy_notify);
1957 EXPORT_SYMBOL(km_state_notify);
1958
1959 void km_state_expired(struct xfrm_state *x, int hard, u32 portid)
1960 {
1961         struct km_event c;
1962
1963         c.data.hard = hard;
1964         c.portid = portid;
1965         c.event = XFRM_MSG_EXPIRE;
1966         km_state_notify(x, &c);
1967 }
1968
1969 EXPORT_SYMBOL(km_state_expired);
1970 /*
1971  * We send to all registered managers regardless of failure
1972  * We are happy with one success
1973 */
1974 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1975 {
1976         int err = -EINVAL, acqret;
1977         struct xfrm_mgr *km;
1978
1979         rcu_read_lock();
1980         list_for_each_entry_rcu(km, &xfrm_km_list, list) {
1981                 acqret = km->acquire(x, t, pol);
1982                 if (!acqret)
1983                         err = acqret;
1984         }
1985         rcu_read_unlock();
1986         return err;
1987 }
1988 EXPORT_SYMBOL(km_query);
1989
1990 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1991 {
1992         int err = -EINVAL;
1993         struct xfrm_mgr *km;
1994
1995         rcu_read_lock();
1996         list_for_each_entry_rcu(km, &xfrm_km_list, list) {
1997                 if (km->new_mapping)
1998                         err = km->new_mapping(x, ipaddr, sport);
1999                 if (!err)
2000                         break;
2001         }
2002         rcu_read_unlock();
2003         return err;
2004 }
2005 EXPORT_SYMBOL(km_new_mapping);
2006
2007 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid)
2008 {
2009         struct km_event c;
2010
2011         c.data.hard = hard;
2012         c.portid = portid;
2013         c.event = XFRM_MSG_POLEXPIRE;
2014         km_policy_notify(pol, dir, &c);
2015 }
2016 EXPORT_SYMBOL(km_policy_expired);
2017
2018 #ifdef CONFIG_XFRM_MIGRATE
2019 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2020                const struct xfrm_migrate *m, int num_migrate,
2021                const struct xfrm_kmaddress *k,
2022                const struct xfrm_encap_tmpl *encap)
2023 {
2024         int err = -EINVAL;
2025         int ret;
2026         struct xfrm_mgr *km;
2027
2028         rcu_read_lock();
2029         list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2030                 if (km->migrate) {
2031                         ret = km->migrate(sel, dir, type, m, num_migrate, k,
2032                                           encap);
2033                         if (!ret)
2034                                 err = ret;
2035                 }
2036         }
2037         rcu_read_unlock();
2038         return err;
2039 }
2040 EXPORT_SYMBOL(km_migrate);
2041 #endif
2042
2043 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
2044 {
2045         int err = -EINVAL;
2046         int ret;
2047         struct xfrm_mgr *km;
2048
2049         rcu_read_lock();
2050         list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2051                 if (km->report) {
2052                         ret = km->report(net, proto, sel, addr);
2053                         if (!ret)
2054                                 err = ret;
2055                 }
2056         }
2057         rcu_read_unlock();
2058         return err;
2059 }
2060 EXPORT_SYMBOL(km_report);
2061
2062 bool km_is_alive(const struct km_event *c)
2063 {
2064         struct xfrm_mgr *km;
2065         bool is_alive = false;
2066
2067         rcu_read_lock();
2068         list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2069                 if (km->is_alive && km->is_alive(c)) {
2070                         is_alive = true;
2071                         break;
2072                 }
2073         }
2074         rcu_read_unlock();
2075
2076         return is_alive;
2077 }
2078 EXPORT_SYMBOL(km_is_alive);
2079
2080 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
2081 {
2082         int err;
2083         u8 *data;
2084         struct xfrm_mgr *km;
2085         struct xfrm_policy *pol = NULL;
2086
2087         if (in_compat_syscall())
2088                 return -EOPNOTSUPP;
2089
2090         if (!optval && !optlen) {
2091                 xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
2092                 xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
2093                 __sk_dst_reset(sk);
2094                 return 0;
2095         }
2096
2097         if (optlen <= 0 || optlen > PAGE_SIZE)
2098                 return -EMSGSIZE;
2099
2100         data = memdup_user(optval, optlen);
2101         if (IS_ERR(data))
2102                 return PTR_ERR(data);
2103
2104         err = -EINVAL;
2105         rcu_read_lock();
2106         list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2107                 pol = km->compile_policy(sk, optname, data,
2108                                          optlen, &err);
2109                 if (err >= 0)
2110                         break;
2111         }
2112         rcu_read_unlock();
2113
2114         if (err >= 0) {
2115                 xfrm_sk_policy_insert(sk, err, pol);
2116                 xfrm_pol_put(pol);
2117                 __sk_dst_reset(sk);
2118                 err = 0;
2119         }
2120
2121         kfree(data);
2122         return err;
2123 }
2124 EXPORT_SYMBOL(xfrm_user_policy);
2125
2126 static DEFINE_SPINLOCK(xfrm_km_lock);
2127
2128 int xfrm_register_km(struct xfrm_mgr *km)
2129 {
2130         spin_lock_bh(&xfrm_km_lock);
2131         list_add_tail_rcu(&km->list, &xfrm_km_list);
2132         spin_unlock_bh(&xfrm_km_lock);
2133         return 0;
2134 }
2135 EXPORT_SYMBOL(xfrm_register_km);
2136
2137 int xfrm_unregister_km(struct xfrm_mgr *km)
2138 {
2139         spin_lock_bh(&xfrm_km_lock);
2140         list_del_rcu(&km->list);
2141         spin_unlock_bh(&xfrm_km_lock);
2142         synchronize_rcu();
2143         return 0;
2144 }
2145 EXPORT_SYMBOL(xfrm_unregister_km);
2146
2147 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
2148 {
2149         int err = 0;
2150
2151         if (WARN_ON(afinfo->family >= NPROTO))
2152                 return -EAFNOSUPPORT;
2153
2154         spin_lock_bh(&xfrm_state_afinfo_lock);
2155         if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
2156                 err = -EEXIST;
2157         else
2158                 rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo);
2159         spin_unlock_bh(&xfrm_state_afinfo_lock);
2160         return err;
2161 }
2162 EXPORT_SYMBOL(xfrm_state_register_afinfo);
2163
2164 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
2165 {
2166         int err = 0, family = afinfo->family;
2167
2168         if (WARN_ON(family >= NPROTO))
2169                 return -EAFNOSUPPORT;
2170
2171         spin_lock_bh(&xfrm_state_afinfo_lock);
2172         if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
2173                 if (rcu_access_pointer(xfrm_state_afinfo[family]) != afinfo)
2174                         err = -EINVAL;
2175                 else
2176                         RCU_INIT_POINTER(xfrm_state_afinfo[afinfo->family], NULL);
2177         }
2178         spin_unlock_bh(&xfrm_state_afinfo_lock);
2179         synchronize_rcu();
2180         return err;
2181 }
2182 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
2183
2184 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family)
2185 {
2186         if (unlikely(family >= NPROTO))
2187                 return NULL;
2188
2189         return rcu_dereference(xfrm_state_afinfo[family]);
2190 }
2191
2192 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
2193 {
2194         struct xfrm_state_afinfo *afinfo;
2195         if (unlikely(family >= NPROTO))
2196                 return NULL;
2197         rcu_read_lock();
2198         afinfo = rcu_dereference(xfrm_state_afinfo[family]);
2199         if (unlikely(!afinfo))
2200                 rcu_read_unlock();
2201         return afinfo;
2202 }
2203
2204 void xfrm_flush_gc(void)
2205 {
2206         flush_work(&xfrm_state_gc_work);
2207 }
2208 EXPORT_SYMBOL(xfrm_flush_gc);
2209
2210 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
2211 void xfrm_state_delete_tunnel(struct xfrm_state *x)
2212 {
2213         if (x->tunnel) {
2214                 struct xfrm_state *t = x->tunnel;
2215
2216                 if (atomic_read(&t->tunnel_users) == 2)
2217                         xfrm_state_delete(t);
2218                 atomic_dec(&t->tunnel_users);
2219                 xfrm_state_put_sync(t);
2220                 x->tunnel = NULL;
2221         }
2222 }
2223 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
2224
2225 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
2226 {
2227         const struct xfrm_type *type = READ_ONCE(x->type);
2228
2229         if (x->km.state == XFRM_STATE_VALID &&
2230             type && type->get_mtu)
2231                 return type->get_mtu(x, mtu);
2232
2233         return mtu - x->props.header_len;
2234 }
2235
2236 int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
2237 {
2238         struct xfrm_state_afinfo *afinfo;
2239         struct xfrm_mode *inner_mode;
2240         int family = x->props.family;
2241         int err;
2242
2243         err = -EAFNOSUPPORT;
2244         afinfo = xfrm_state_get_afinfo(family);
2245         if (!afinfo)
2246                 goto error;
2247
2248         err = 0;
2249         if (afinfo->init_flags)
2250                 err = afinfo->init_flags(x);
2251
2252         rcu_read_unlock();
2253
2254         if (err)
2255                 goto error;
2256
2257         err = -EPROTONOSUPPORT;
2258
2259         if (x->sel.family != AF_UNSPEC) {
2260                 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
2261                 if (inner_mode == NULL)
2262                         goto error;
2263
2264                 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
2265                     family != x->sel.family) {
2266                         xfrm_put_mode(inner_mode);
2267                         goto error;
2268                 }
2269
2270                 x->inner_mode = inner_mode;
2271         } else {
2272                 struct xfrm_mode *inner_mode_iaf;
2273                 int iafamily = AF_INET;
2274
2275                 inner_mode = xfrm_get_mode(x->props.mode, x->props.family);
2276                 if (inner_mode == NULL)
2277                         goto error;
2278
2279                 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
2280                         xfrm_put_mode(inner_mode);
2281                         goto error;
2282                 }
2283                 x->inner_mode = inner_mode;
2284
2285                 if (x->props.family == AF_INET)
2286                         iafamily = AF_INET6;
2287
2288                 inner_mode_iaf = xfrm_get_mode(x->props.mode, iafamily);
2289                 if (inner_mode_iaf) {
2290                         if (inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)
2291                                 x->inner_mode_iaf = inner_mode_iaf;
2292                         else
2293                                 xfrm_put_mode(inner_mode_iaf);
2294                 }
2295         }
2296
2297         x->type = xfrm_get_type(x->id.proto, family);
2298         if (x->type == NULL)
2299                 goto error;
2300
2301         x->type_offload = xfrm_get_type_offload(x->id.proto, family, offload);
2302
2303         err = x->type->init_state(x);
2304         if (err)
2305                 goto error;
2306
2307         x->outer_mode = xfrm_get_mode(x->props.mode, family);
2308         if (x->outer_mode == NULL) {
2309                 err = -EPROTONOSUPPORT;
2310                 goto error;
2311         }
2312
2313         if (init_replay) {
2314                 err = xfrm_init_replay(x);
2315                 if (err)
2316                         goto error;
2317         }
2318
2319 error:
2320         return err;
2321 }
2322
2323 EXPORT_SYMBOL(__xfrm_init_state);
2324
2325 int xfrm_init_state(struct xfrm_state *x)
2326 {
2327         int err;
2328
2329         err = __xfrm_init_state(x, true, false);
2330         if (!err)
2331                 x->km.state = XFRM_STATE_VALID;
2332
2333         return err;
2334 }
2335
2336 EXPORT_SYMBOL(xfrm_init_state);
2337
2338 int __net_init xfrm_state_init(struct net *net)
2339 {
2340         unsigned int sz;
2341
2342         if (net_eq(net, &init_net))
2343                 xfrm_state_cache = KMEM_CACHE(xfrm_state,
2344                                               SLAB_HWCACHE_ALIGN | SLAB_PANIC);
2345
2346         INIT_LIST_HEAD(&net->xfrm.state_all);
2347
2348         sz = sizeof(struct hlist_head) * 8;
2349
2350         net->xfrm.state_bydst = xfrm_hash_alloc(sz);
2351         if (!net->xfrm.state_bydst)
2352                 goto out_bydst;
2353         net->xfrm.state_bysrc = xfrm_hash_alloc(sz);
2354         if (!net->xfrm.state_bysrc)
2355                 goto out_bysrc;
2356         net->xfrm.state_byspi = xfrm_hash_alloc(sz);
2357         if (!net->xfrm.state_byspi)
2358                 goto out_byspi;
2359         net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
2360
2361         net->xfrm.state_num = 0;
2362         INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
2363         spin_lock_init(&net->xfrm.xfrm_state_lock);
2364         return 0;
2365
2366 out_byspi:
2367         xfrm_hash_free(net->xfrm.state_bysrc, sz);
2368 out_bysrc:
2369         xfrm_hash_free(net->xfrm.state_bydst, sz);
2370 out_bydst:
2371         return -ENOMEM;
2372 }
2373
2374 void xfrm_state_fini(struct net *net)
2375 {
2376         unsigned int sz;
2377
2378         flush_work(&net->xfrm.state_hash_work);
2379         flush_work(&xfrm_state_gc_work);
2380         xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
2381
2382         WARN_ON(!list_empty(&net->xfrm.state_all));
2383
2384         sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head);
2385         WARN_ON(!hlist_empty(net->xfrm.state_byspi));
2386         xfrm_hash_free(net->xfrm.state_byspi, sz);
2387         WARN_ON(!hlist_empty(net->xfrm.state_bysrc));
2388         xfrm_hash_free(net->xfrm.state_bysrc, sz);
2389         WARN_ON(!hlist_empty(net->xfrm.state_bydst));
2390         xfrm_hash_free(net->xfrm.state_bydst, sz);
2391 }
2392
2393 #ifdef CONFIG_AUDITSYSCALL
2394 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2395                                      struct audit_buffer *audit_buf)
2396 {
2397         struct xfrm_sec_ctx *ctx = x->security;
2398         u32 spi = ntohl(x->id.spi);
2399
2400         if (ctx)
2401                 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2402                                  ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2403
2404         switch (x->props.family) {
2405         case AF_INET:
2406                 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2407                                  &x->props.saddr.a4, &x->id.daddr.a4);
2408                 break;
2409         case AF_INET6:
2410                 audit_log_format(audit_buf, " src=%pI6 dst=%pI6",
2411                                  x->props.saddr.a6, x->id.daddr.a6);
2412                 break;
2413         }
2414
2415         audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2416 }
2417
2418 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2419                                       struct audit_buffer *audit_buf)
2420 {
2421         const struct iphdr *iph4;
2422         const struct ipv6hdr *iph6;
2423
2424         switch (family) {
2425         case AF_INET:
2426                 iph4 = ip_hdr(skb);
2427                 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2428                                  &iph4->saddr, &iph4->daddr);
2429                 break;
2430         case AF_INET6:
2431                 iph6 = ipv6_hdr(skb);
2432                 audit_log_format(audit_buf,
2433                                  " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x",
2434                                  &iph6->saddr, &iph6->daddr,
2435                                  iph6->flow_lbl[0] & 0x0f,
2436                                  iph6->flow_lbl[1],
2437                                  iph6->flow_lbl[2]);
2438                 break;
2439         }
2440 }
2441
2442 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid)
2443 {
2444         struct audit_buffer *audit_buf;
2445
2446         audit_buf = xfrm_audit_start("SAD-add");
2447         if (audit_buf == NULL)
2448                 return;
2449         xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2450         xfrm_audit_helper_sainfo(x, audit_buf);
2451         audit_log_format(audit_buf, " res=%u", result);
2452         audit_log_end(audit_buf);
2453 }
2454 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2455
2456 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid)
2457 {
2458         struct audit_buffer *audit_buf;
2459
2460         audit_buf = xfrm_audit_start("SAD-delete");
2461         if (audit_buf == NULL)
2462                 return;
2463         xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2464         xfrm_audit_helper_sainfo(x, audit_buf);
2465         audit_log_format(audit_buf, " res=%u", result);
2466         audit_log_end(audit_buf);
2467 }
2468 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2469
2470 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2471                                       struct sk_buff *skb)
2472 {
2473         struct audit_buffer *audit_buf;
2474         u32 spi;
2475
2476         audit_buf = xfrm_audit_start("SA-replay-overflow");
2477         if (audit_buf == NULL)
2478                 return;
2479         xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2480         /* don't record the sequence number because it's inherent in this kind
2481          * of audit message */
2482         spi = ntohl(x->id.spi);
2483         audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2484         audit_log_end(audit_buf);
2485 }
2486 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2487
2488 void xfrm_audit_state_replay(struct xfrm_state *x,
2489                              struct sk_buff *skb, __be32 net_seq)
2490 {
2491         struct audit_buffer *audit_buf;
2492         u32 spi;
2493
2494         audit_buf = xfrm_audit_start("SA-replayed-pkt");
2495         if (audit_buf == NULL)
2496                 return;
2497         xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2498         spi = ntohl(x->id.spi);
2499         audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2500                          spi, spi, ntohl(net_seq));
2501         audit_log_end(audit_buf);
2502 }
2503 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay);
2504
2505 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2506 {
2507         struct audit_buffer *audit_buf;
2508
2509         audit_buf = xfrm_audit_start("SA-notfound");
2510         if (audit_buf == NULL)
2511                 return;
2512         xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2513         audit_log_end(audit_buf);
2514 }
2515 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2516
2517 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2518                                __be32 net_spi, __be32 net_seq)
2519 {
2520         struct audit_buffer *audit_buf;
2521         u32 spi;
2522
2523         audit_buf = xfrm_audit_start("SA-notfound");
2524         if (audit_buf == NULL)
2525                 return;
2526         xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2527         spi = ntohl(net_spi);
2528         audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2529                          spi, spi, ntohl(net_seq));
2530         audit_log_end(audit_buf);
2531 }
2532 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2533
2534 void xfrm_audit_state_icvfail(struct xfrm_state *x,
2535                               struct sk_buff *skb, u8 proto)
2536 {
2537         struct audit_buffer *audit_buf;
2538         __be32 net_spi;
2539         __be32 net_seq;
2540
2541         audit_buf = xfrm_audit_start("SA-icv-failure");
2542         if (audit_buf == NULL)
2543                 return;
2544         xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2545         if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2546                 u32 spi = ntohl(net_spi);
2547                 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2548                                  spi, spi, ntohl(net_seq));
2549         }
2550         audit_log_end(audit_buf);
2551 }
2552 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
2553 #endif /* CONFIG_AUDITSYSCALL */