6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <asm/uaccess.h>
25 #include "xfrm_hash.h"
28 EXPORT_SYMBOL(xfrm_nl);
30 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
31 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
33 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
34 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
36 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
38 /* Each xfrm_state may be linked to two tables:
40 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
41 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
42 destination/tunnel endpoint. (output)
45 static DEFINE_SPINLOCK(xfrm_state_lock);
47 /* Hash table to find appropriate SA towards given target (endpoint
48 * of tunnel or destination of transport mode) allowed by selector.
50 * Main use is finding SA after policy selected tunnel or transport mode.
51 * Also, it can be used by ah/esp icmp error handler to find offending SA.
53 static LIST_HEAD(xfrm_state_all);
54 static struct hlist_head *xfrm_state_bydst __read_mostly;
55 static struct hlist_head *xfrm_state_bysrc __read_mostly;
56 static struct hlist_head *xfrm_state_byspi __read_mostly;
57 static unsigned int xfrm_state_hmask __read_mostly;
58 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
59 static unsigned int xfrm_state_num;
60 static unsigned int xfrm_state_genid;
62 /* Counter indicating ongoing walk, protected by xfrm_state_lock. */
63 static unsigned long xfrm_state_walk_ongoing;
64 /* Counter indicating walk completion, protected by xfrm_cfg_mutex. */
65 static unsigned long xfrm_state_walk_completed;
67 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
68 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
70 #ifdef CONFIG_AUDITSYSCALL
71 static void xfrm_audit_state_replay(struct xfrm_state *x,
72 struct sk_buff *skb, __be32 net_seq);
74 #define xfrm_audit_state_replay(x, s, sq) do { ; } while (0)
75 #endif /* CONFIG_AUDITSYSCALL */
77 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
78 xfrm_address_t *saddr,
80 unsigned short family)
82 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
85 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
86 xfrm_address_t *saddr,
87 unsigned short family)
89 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
92 static inline unsigned int
93 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
95 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
98 static void xfrm_hash_transfer(struct hlist_head *list,
99 struct hlist_head *ndsttable,
100 struct hlist_head *nsrctable,
101 struct hlist_head *nspitable,
102 unsigned int nhashmask)
104 struct hlist_node *entry, *tmp;
105 struct xfrm_state *x;
107 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
110 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
111 x->props.reqid, x->props.family,
113 hlist_add_head(&x->bydst, ndsttable+h);
115 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
118 hlist_add_head(&x->bysrc, nsrctable+h);
121 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
122 x->id.proto, x->props.family,
124 hlist_add_head(&x->byspi, nspitable+h);
129 static unsigned long xfrm_hash_new_size(void)
131 return ((xfrm_state_hmask + 1) << 1) *
132 sizeof(struct hlist_head);
135 static DEFINE_MUTEX(hash_resize_mutex);
137 static void xfrm_hash_resize(struct work_struct *__unused)
139 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
140 unsigned long nsize, osize;
141 unsigned int nhashmask, ohashmask;
144 mutex_lock(&hash_resize_mutex);
146 nsize = xfrm_hash_new_size();
147 ndst = xfrm_hash_alloc(nsize);
150 nsrc = xfrm_hash_alloc(nsize);
152 xfrm_hash_free(ndst, nsize);
155 nspi = xfrm_hash_alloc(nsize);
157 xfrm_hash_free(ndst, nsize);
158 xfrm_hash_free(nsrc, nsize);
162 spin_lock_bh(&xfrm_state_lock);
164 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
165 for (i = xfrm_state_hmask; i >= 0; i--)
166 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
169 odst = xfrm_state_bydst;
170 osrc = xfrm_state_bysrc;
171 ospi = xfrm_state_byspi;
172 ohashmask = xfrm_state_hmask;
174 xfrm_state_bydst = ndst;
175 xfrm_state_bysrc = nsrc;
176 xfrm_state_byspi = nspi;
177 xfrm_state_hmask = nhashmask;
179 spin_unlock_bh(&xfrm_state_lock);
181 osize = (ohashmask + 1) * sizeof(struct hlist_head);
182 xfrm_hash_free(odst, osize);
183 xfrm_hash_free(osrc, osize);
184 xfrm_hash_free(ospi, osize);
187 mutex_unlock(&hash_resize_mutex);
190 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
192 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
193 EXPORT_SYMBOL(km_waitq);
195 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
196 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
198 static struct work_struct xfrm_state_gc_work;
199 static LIST_HEAD(xfrm_state_gc_leftovers);
200 static LIST_HEAD(xfrm_state_gc_list);
201 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
203 int __xfrm_state_delete(struct xfrm_state *x);
205 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
206 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
208 static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
210 struct xfrm_state_afinfo *afinfo;
211 if (unlikely(family >= NPROTO))
213 write_lock_bh(&xfrm_state_afinfo_lock);
214 afinfo = xfrm_state_afinfo[family];
215 if (unlikely(!afinfo))
216 write_unlock_bh(&xfrm_state_afinfo_lock);
220 static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
221 __releases(xfrm_state_afinfo_lock)
223 write_unlock_bh(&xfrm_state_afinfo_lock);
226 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
228 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
229 const struct xfrm_type **typemap;
232 if (unlikely(afinfo == NULL))
233 return -EAFNOSUPPORT;
234 typemap = afinfo->type_map;
236 if (likely(typemap[type->proto] == NULL))
237 typemap[type->proto] = type;
240 xfrm_state_unlock_afinfo(afinfo);
243 EXPORT_SYMBOL(xfrm_register_type);
245 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
247 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
248 const struct xfrm_type **typemap;
251 if (unlikely(afinfo == NULL))
252 return -EAFNOSUPPORT;
253 typemap = afinfo->type_map;
255 if (unlikely(typemap[type->proto] != type))
258 typemap[type->proto] = NULL;
259 xfrm_state_unlock_afinfo(afinfo);
262 EXPORT_SYMBOL(xfrm_unregister_type);
264 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
266 struct xfrm_state_afinfo *afinfo;
267 const struct xfrm_type **typemap;
268 const struct xfrm_type *type;
269 int modload_attempted = 0;
272 afinfo = xfrm_state_get_afinfo(family);
273 if (unlikely(afinfo == NULL))
275 typemap = afinfo->type_map;
277 type = typemap[proto];
278 if (unlikely(type && !try_module_get(type->owner)))
280 if (!type && !modload_attempted) {
281 xfrm_state_put_afinfo(afinfo);
282 request_module("xfrm-type-%d-%d", family, proto);
283 modload_attempted = 1;
287 xfrm_state_put_afinfo(afinfo);
291 static void xfrm_put_type(const struct xfrm_type *type)
293 module_put(type->owner);
296 int xfrm_register_mode(struct xfrm_mode *mode, int family)
298 struct xfrm_state_afinfo *afinfo;
299 struct xfrm_mode **modemap;
302 if (unlikely(mode->encap >= XFRM_MODE_MAX))
305 afinfo = xfrm_state_lock_afinfo(family);
306 if (unlikely(afinfo == NULL))
307 return -EAFNOSUPPORT;
310 modemap = afinfo->mode_map;
311 if (modemap[mode->encap])
315 if (!try_module_get(afinfo->owner))
318 mode->afinfo = afinfo;
319 modemap[mode->encap] = mode;
323 xfrm_state_unlock_afinfo(afinfo);
326 EXPORT_SYMBOL(xfrm_register_mode);
328 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
330 struct xfrm_state_afinfo *afinfo;
331 struct xfrm_mode **modemap;
334 if (unlikely(mode->encap >= XFRM_MODE_MAX))
337 afinfo = xfrm_state_lock_afinfo(family);
338 if (unlikely(afinfo == NULL))
339 return -EAFNOSUPPORT;
342 modemap = afinfo->mode_map;
343 if (likely(modemap[mode->encap] == mode)) {
344 modemap[mode->encap] = NULL;
345 module_put(mode->afinfo->owner);
349 xfrm_state_unlock_afinfo(afinfo);
352 EXPORT_SYMBOL(xfrm_unregister_mode);
354 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
356 struct xfrm_state_afinfo *afinfo;
357 struct xfrm_mode *mode;
358 int modload_attempted = 0;
360 if (unlikely(encap >= XFRM_MODE_MAX))
364 afinfo = xfrm_state_get_afinfo(family);
365 if (unlikely(afinfo == NULL))
368 mode = afinfo->mode_map[encap];
369 if (unlikely(mode && !try_module_get(mode->owner)))
371 if (!mode && !modload_attempted) {
372 xfrm_state_put_afinfo(afinfo);
373 request_module("xfrm-mode-%d-%d", family, encap);
374 modload_attempted = 1;
378 xfrm_state_put_afinfo(afinfo);
382 static void xfrm_put_mode(struct xfrm_mode *mode)
384 module_put(mode->owner);
387 static void xfrm_state_gc_destroy(struct xfrm_state *x)
389 del_timer_sync(&x->timer);
390 del_timer_sync(&x->rtimer);
397 xfrm_put_mode(x->inner_mode);
398 if (x->inner_mode_iaf)
399 xfrm_put_mode(x->inner_mode_iaf);
401 xfrm_put_mode(x->outer_mode);
403 x->type->destructor(x);
404 xfrm_put_type(x->type);
406 security_xfrm_state_free(x);
410 static void xfrm_state_gc_task(struct work_struct *data)
412 struct xfrm_state *x, *tmp;
413 unsigned long completed;
415 mutex_lock(&xfrm_cfg_mutex);
416 spin_lock_bh(&xfrm_state_gc_lock);
417 list_splice_tail_init(&xfrm_state_gc_list, &xfrm_state_gc_leftovers);
418 spin_unlock_bh(&xfrm_state_gc_lock);
420 completed = xfrm_state_walk_completed;
421 mutex_unlock(&xfrm_cfg_mutex);
423 list_for_each_entry_safe(x, tmp, &xfrm_state_gc_leftovers, gclist) {
424 if ((long)(x->lastused - completed) > 0)
426 list_del(&x->gclist);
427 xfrm_state_gc_destroy(x);
433 static inline unsigned long make_jiffies(long secs)
435 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
436 return MAX_SCHEDULE_TIMEOUT-1;
441 static void xfrm_timer_handler(unsigned long data)
443 struct xfrm_state *x = (struct xfrm_state*)data;
444 unsigned long now = get_seconds();
445 long next = LONG_MAX;
450 if (x->km.state == XFRM_STATE_DEAD)
452 if (x->km.state == XFRM_STATE_EXPIRED)
454 if (x->lft.hard_add_expires_seconds) {
455 long tmo = x->lft.hard_add_expires_seconds +
456 x->curlft.add_time - now;
462 if (x->lft.hard_use_expires_seconds) {
463 long tmo = x->lft.hard_use_expires_seconds +
464 (x->curlft.use_time ? : now) - now;
472 if (x->lft.soft_add_expires_seconds) {
473 long tmo = x->lft.soft_add_expires_seconds +
474 x->curlft.add_time - now;
480 if (x->lft.soft_use_expires_seconds) {
481 long tmo = x->lft.soft_use_expires_seconds +
482 (x->curlft.use_time ? : now) - now;
491 km_state_expired(x, 0, 0);
493 if (next != LONG_MAX)
494 mod_timer(&x->timer, jiffies + make_jiffies(next));
499 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
500 x->km.state = XFRM_STATE_EXPIRED;
506 err = __xfrm_state_delete(x);
507 if (!err && x->id.spi)
508 km_state_expired(x, 1, 0);
510 xfrm_audit_state_delete(x, err ? 0 : 1,
511 audit_get_loginuid(current),
512 audit_get_sessionid(current), 0);
515 spin_unlock(&x->lock);
518 static void xfrm_replay_timer_handler(unsigned long data);
520 struct xfrm_state *xfrm_state_alloc(void)
522 struct xfrm_state *x;
524 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
527 atomic_set(&x->refcnt, 1);
528 atomic_set(&x->tunnel_users, 0);
529 INIT_LIST_HEAD(&x->all);
530 INIT_HLIST_NODE(&x->bydst);
531 INIT_HLIST_NODE(&x->bysrc);
532 INIT_HLIST_NODE(&x->byspi);
533 setup_timer(&x->timer, xfrm_timer_handler, (unsigned long)x);
534 setup_timer(&x->rtimer, xfrm_replay_timer_handler,
536 x->curlft.add_time = get_seconds();
537 x->lft.soft_byte_limit = XFRM_INF;
538 x->lft.soft_packet_limit = XFRM_INF;
539 x->lft.hard_byte_limit = XFRM_INF;
540 x->lft.hard_packet_limit = XFRM_INF;
541 x->replay_maxage = 0;
542 x->replay_maxdiff = 0;
543 x->inner_mode = NULL;
544 x->inner_mode_iaf = NULL;
545 spin_lock_init(&x->lock);
549 EXPORT_SYMBOL(xfrm_state_alloc);
551 void __xfrm_state_destroy(struct xfrm_state *x)
553 WARN_ON(x->km.state != XFRM_STATE_DEAD);
555 spin_lock_bh(&xfrm_state_gc_lock);
556 list_add_tail(&x->gclist, &xfrm_state_gc_list);
557 spin_unlock_bh(&xfrm_state_gc_lock);
558 schedule_work(&xfrm_state_gc_work);
560 EXPORT_SYMBOL(__xfrm_state_destroy);
562 int __xfrm_state_delete(struct xfrm_state *x)
566 if (x->km.state != XFRM_STATE_DEAD) {
567 x->km.state = XFRM_STATE_DEAD;
568 spin_lock(&xfrm_state_lock);
569 x->lastused = xfrm_state_walk_ongoing;
570 list_del_rcu(&x->all);
571 hlist_del(&x->bydst);
572 hlist_del(&x->bysrc);
574 hlist_del(&x->byspi);
576 spin_unlock(&xfrm_state_lock);
578 /* All xfrm_state objects are created by xfrm_state_alloc.
579 * The xfrm_state_alloc call gives a reference, and that
580 * is what we are dropping here.
588 EXPORT_SYMBOL(__xfrm_state_delete);
590 int xfrm_state_delete(struct xfrm_state *x)
594 spin_lock_bh(&x->lock);
595 err = __xfrm_state_delete(x);
596 spin_unlock_bh(&x->lock);
600 EXPORT_SYMBOL(xfrm_state_delete);
602 #ifdef CONFIG_SECURITY_NETWORK_XFRM
604 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
608 for (i = 0; i <= xfrm_state_hmask; i++) {
609 struct hlist_node *entry;
610 struct xfrm_state *x;
612 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
613 if (xfrm_id_proto_match(x->id.proto, proto) &&
614 (err = security_xfrm_state_delete(x)) != 0) {
615 xfrm_audit_state_delete(x, 0,
616 audit_info->loginuid,
617 audit_info->sessionid,
628 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
634 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
638 spin_lock_bh(&xfrm_state_lock);
639 err = xfrm_state_flush_secctx_check(proto, audit_info);
643 for (i = 0; i <= xfrm_state_hmask; i++) {
644 struct hlist_node *entry;
645 struct xfrm_state *x;
647 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
648 if (!xfrm_state_kern(x) &&
649 xfrm_id_proto_match(x->id.proto, proto)) {
651 spin_unlock_bh(&xfrm_state_lock);
653 err = xfrm_state_delete(x);
654 xfrm_audit_state_delete(x, err ? 0 : 1,
655 audit_info->loginuid,
656 audit_info->sessionid,
660 spin_lock_bh(&xfrm_state_lock);
668 spin_unlock_bh(&xfrm_state_lock);
672 EXPORT_SYMBOL(xfrm_state_flush);
674 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
676 spin_lock_bh(&xfrm_state_lock);
677 si->sadcnt = xfrm_state_num;
678 si->sadhcnt = xfrm_state_hmask;
679 si->sadhmcnt = xfrm_state_hashmax;
680 spin_unlock_bh(&xfrm_state_lock);
682 EXPORT_SYMBOL(xfrm_sad_getinfo);
685 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
686 struct xfrm_tmpl *tmpl,
687 xfrm_address_t *daddr, xfrm_address_t *saddr,
688 unsigned short family)
690 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
693 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
694 xfrm_state_put_afinfo(afinfo);
698 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
700 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
701 struct xfrm_state *x;
702 struct hlist_node *entry;
704 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
705 if (x->props.family != family ||
707 x->id.proto != proto)
712 if (x->id.daddr.a4 != daddr->a4)
716 if (!ipv6_addr_equal((struct in6_addr *)daddr,
730 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
732 unsigned int h = xfrm_src_hash(daddr, saddr, family);
733 struct xfrm_state *x;
734 struct hlist_node *entry;
736 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
737 if (x->props.family != family ||
738 x->id.proto != proto)
743 if (x->id.daddr.a4 != daddr->a4 ||
744 x->props.saddr.a4 != saddr->a4)
748 if (!ipv6_addr_equal((struct in6_addr *)daddr,
751 !ipv6_addr_equal((struct in6_addr *)saddr,
765 static inline struct xfrm_state *
766 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
769 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
770 x->id.proto, family);
772 return __xfrm_state_lookup_byaddr(&x->id.daddr,
774 x->id.proto, family);
777 static void xfrm_hash_grow_check(int have_hash_collision)
779 if (have_hash_collision &&
780 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
781 xfrm_state_num > xfrm_state_hmask)
782 schedule_work(&xfrm_hash_work);
786 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
787 struct flowi *fl, struct xfrm_tmpl *tmpl,
788 struct xfrm_policy *pol, int *err,
789 unsigned short family)
792 struct hlist_node *entry;
793 struct xfrm_state *x, *x0, *to_put;
794 int acquire_in_progress = 0;
796 struct xfrm_state *best = NULL;
800 spin_lock_bh(&xfrm_state_lock);
801 h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
802 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
803 if (x->props.family == family &&
804 x->props.reqid == tmpl->reqid &&
805 !(x->props.flags & XFRM_STATE_WILDRECV) &&
806 xfrm_state_addr_check(x, daddr, saddr, family) &&
807 tmpl->mode == x->props.mode &&
808 tmpl->id.proto == x->id.proto &&
809 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
811 1. There is a valid state with matching selector.
813 2. Valid state with inappropriate selector. Skip.
815 Entering area of "sysdeps".
817 3. If state is not valid, selector is temporary,
818 it selects only session which triggered
819 previous resolution. Key manager will do
820 something to install a state with proper
823 if (x->km.state == XFRM_STATE_VALID) {
824 if ((x->sel.family && !xfrm_selector_match(&x->sel, fl, x->sel.family)) ||
825 !security_xfrm_state_pol_flow_match(x, pol, fl))
828 best->km.dying > x->km.dying ||
829 (best->km.dying == x->km.dying &&
830 best->curlft.add_time < x->curlft.add_time))
832 } else if (x->km.state == XFRM_STATE_ACQ) {
833 acquire_in_progress = 1;
834 } else if (x->km.state == XFRM_STATE_ERROR ||
835 x->km.state == XFRM_STATE_EXPIRED) {
836 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
837 security_xfrm_state_pol_flow_match(x, pol, fl))
844 if (!x && !error && !acquire_in_progress) {
846 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
847 tmpl->id.proto, family)) != NULL) {
852 x = xfrm_state_alloc();
857 /* Initialize temporary selector matching only
858 * to current session. */
859 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
861 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
863 x->km.state = XFRM_STATE_DEAD;
869 if (km_query(x, tmpl, pol) == 0) {
870 x->km.state = XFRM_STATE_ACQ;
871 list_add_tail(&x->all, &xfrm_state_all);
872 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
873 h = xfrm_src_hash(daddr, saddr, family);
874 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
876 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
877 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
879 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
880 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
881 add_timer(&x->timer);
883 xfrm_hash_grow_check(x->bydst.next != NULL);
885 x->km.state = XFRM_STATE_DEAD;
895 *err = acquire_in_progress ? -EAGAIN : error;
896 spin_unlock_bh(&xfrm_state_lock);
898 xfrm_state_put(to_put);
903 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
904 unsigned short family, u8 mode, u8 proto, u32 reqid)
907 struct xfrm_state *rx = NULL, *x = NULL;
908 struct hlist_node *entry;
910 spin_lock(&xfrm_state_lock);
911 h = xfrm_dst_hash(daddr, saddr, reqid, family);
912 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
913 if (x->props.family == family &&
914 x->props.reqid == reqid &&
915 !(x->props.flags & XFRM_STATE_WILDRECV) &&
916 xfrm_state_addr_check(x, daddr, saddr, family) &&
917 mode == x->props.mode &&
918 proto == x->id.proto &&
919 x->km.state == XFRM_STATE_VALID) {
927 spin_unlock(&xfrm_state_lock);
932 EXPORT_SYMBOL(xfrm_stateonly_find);
934 static void __xfrm_state_insert(struct xfrm_state *x)
938 x->genid = ++xfrm_state_genid;
940 list_add_tail(&x->all, &xfrm_state_all);
942 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
943 x->props.reqid, x->props.family);
944 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
946 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
947 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
950 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
953 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
956 mod_timer(&x->timer, jiffies + HZ);
957 if (x->replay_maxage)
958 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
964 xfrm_hash_grow_check(x->bydst.next != NULL);
967 /* xfrm_state_lock is held */
968 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
970 unsigned short family = xnew->props.family;
971 u32 reqid = xnew->props.reqid;
972 struct xfrm_state *x;
973 struct hlist_node *entry;
976 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
977 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
978 if (x->props.family == family &&
979 x->props.reqid == reqid &&
980 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
981 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
982 x->genid = xfrm_state_genid;
986 void xfrm_state_insert(struct xfrm_state *x)
988 spin_lock_bh(&xfrm_state_lock);
989 __xfrm_state_bump_genids(x);
990 __xfrm_state_insert(x);
991 spin_unlock_bh(&xfrm_state_lock);
993 EXPORT_SYMBOL(xfrm_state_insert);
995 /* xfrm_state_lock is held */
996 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
998 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
999 struct hlist_node *entry;
1000 struct xfrm_state *x;
1002 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1003 if (x->props.reqid != reqid ||
1004 x->props.mode != mode ||
1005 x->props.family != family ||
1006 x->km.state != XFRM_STATE_ACQ ||
1008 x->id.proto != proto)
1013 if (x->id.daddr.a4 != daddr->a4 ||
1014 x->props.saddr.a4 != saddr->a4)
1018 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
1019 (struct in6_addr *)daddr) ||
1020 !ipv6_addr_equal((struct in6_addr *)
1022 (struct in6_addr *)saddr))
1034 x = xfrm_state_alloc();
1038 x->sel.daddr.a4 = daddr->a4;
1039 x->sel.saddr.a4 = saddr->a4;
1040 x->sel.prefixlen_d = 32;
1041 x->sel.prefixlen_s = 32;
1042 x->props.saddr.a4 = saddr->a4;
1043 x->id.daddr.a4 = daddr->a4;
1047 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1048 (struct in6_addr *)daddr);
1049 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1050 (struct in6_addr *)saddr);
1051 x->sel.prefixlen_d = 128;
1052 x->sel.prefixlen_s = 128;
1053 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1054 (struct in6_addr *)saddr);
1055 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1056 (struct in6_addr *)daddr);
1060 x->km.state = XFRM_STATE_ACQ;
1061 x->id.proto = proto;
1062 x->props.family = family;
1063 x->props.mode = mode;
1064 x->props.reqid = reqid;
1065 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
1067 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
1068 add_timer(&x->timer);
1069 list_add_tail(&x->all, &xfrm_state_all);
1070 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
1071 h = xfrm_src_hash(daddr, saddr, family);
1072 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
1076 xfrm_hash_grow_check(x->bydst.next != NULL);
1082 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
1084 int xfrm_state_add(struct xfrm_state *x)
1086 struct xfrm_state *x1, *to_put;
1089 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1091 family = x->props.family;
1095 spin_lock_bh(&xfrm_state_lock);
1097 x1 = __xfrm_state_locate(x, use_spi, family);
1105 if (use_spi && x->km.seq) {
1106 x1 = __xfrm_find_acq_byseq(x->km.seq);
1107 if (x1 && ((x1->id.proto != x->id.proto) ||
1108 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1115 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
1117 &x->id.daddr, &x->props.saddr, 0);
1119 __xfrm_state_bump_genids(x);
1120 __xfrm_state_insert(x);
1124 spin_unlock_bh(&xfrm_state_lock);
1127 xfrm_state_delete(x1);
1132 xfrm_state_put(to_put);
1136 EXPORT_SYMBOL(xfrm_state_add);
1138 #ifdef CONFIG_XFRM_MIGRATE
1139 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1142 struct xfrm_state *x = xfrm_state_alloc();
1146 memcpy(&x->id, &orig->id, sizeof(x->id));
1147 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1148 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1149 x->props.mode = orig->props.mode;
1150 x->props.replay_window = orig->props.replay_window;
1151 x->props.reqid = orig->props.reqid;
1152 x->props.family = orig->props.family;
1153 x->props.saddr = orig->props.saddr;
1156 x->aalg = xfrm_algo_clone(orig->aalg);
1160 x->props.aalgo = orig->props.aalgo;
1163 x->ealg = xfrm_algo_clone(orig->ealg);
1167 x->props.ealgo = orig->props.ealgo;
1170 x->calg = xfrm_algo_clone(orig->calg);
1174 x->props.calgo = orig->props.calgo;
1177 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
1183 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1189 err = xfrm_init_state(x);
1193 x->props.flags = orig->props.flags;
1195 x->curlft.add_time = orig->curlft.add_time;
1196 x->km.state = orig->km.state;
1197 x->km.seq = orig->km.seq;
1215 /* xfrm_state_lock is held */
1216 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1219 struct xfrm_state *x;
1220 struct hlist_node *entry;
1223 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
1224 m->reqid, m->old_family);
1225 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1226 if (x->props.mode != m->mode ||
1227 x->id.proto != m->proto)
1229 if (m->reqid && x->props.reqid != m->reqid)
1231 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1233 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1240 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1242 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
1243 if (x->props.mode != m->mode ||
1244 x->id.proto != m->proto)
1246 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1248 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1258 EXPORT_SYMBOL(xfrm_migrate_state_find);
1260 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1261 struct xfrm_migrate *m)
1263 struct xfrm_state *xc;
1266 xc = xfrm_state_clone(x, &err);
1270 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1271 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1274 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1275 /* a care is needed when the destination address of the
1276 state is to be updated as it is a part of triplet */
1277 xfrm_state_insert(xc);
1279 if ((err = xfrm_state_add(xc)) < 0)
1288 EXPORT_SYMBOL(xfrm_state_migrate);
1291 int xfrm_state_update(struct xfrm_state *x)
1293 struct xfrm_state *x1, *to_put;
1295 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1299 spin_lock_bh(&xfrm_state_lock);
1300 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1306 if (xfrm_state_kern(x1)) {
1312 if (x1->km.state == XFRM_STATE_ACQ) {
1313 __xfrm_state_insert(x);
1319 spin_unlock_bh(&xfrm_state_lock);
1322 xfrm_state_put(to_put);
1328 xfrm_state_delete(x1);
1334 spin_lock_bh(&x1->lock);
1335 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1336 if (x->encap && x1->encap)
1337 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1338 if (x->coaddr && x1->coaddr) {
1339 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1341 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1342 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1343 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1346 mod_timer(&x1->timer, jiffies + HZ);
1347 if (x1->curlft.use_time)
1348 xfrm_state_check_expire(x1);
1352 spin_unlock_bh(&x1->lock);
1358 EXPORT_SYMBOL(xfrm_state_update);
1360 int xfrm_state_check_expire(struct xfrm_state *x)
1362 if (!x->curlft.use_time)
1363 x->curlft.use_time = get_seconds();
1365 if (x->km.state != XFRM_STATE_VALID)
1368 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1369 x->curlft.packets >= x->lft.hard_packet_limit) {
1370 x->km.state = XFRM_STATE_EXPIRED;
1371 mod_timer(&x->timer, jiffies);
1376 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1377 x->curlft.packets >= x->lft.soft_packet_limit)) {
1379 km_state_expired(x, 0, 0);
1383 EXPORT_SYMBOL(xfrm_state_check_expire);
1386 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1387 unsigned short family)
1389 struct xfrm_state *x;
1391 spin_lock_bh(&xfrm_state_lock);
1392 x = __xfrm_state_lookup(daddr, spi, proto, family);
1393 spin_unlock_bh(&xfrm_state_lock);
1396 EXPORT_SYMBOL(xfrm_state_lookup);
1399 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1400 u8 proto, unsigned short family)
1402 struct xfrm_state *x;
1404 spin_lock_bh(&xfrm_state_lock);
1405 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1406 spin_unlock_bh(&xfrm_state_lock);
1409 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1412 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1413 xfrm_address_t *daddr, xfrm_address_t *saddr,
1414 int create, unsigned short family)
1416 struct xfrm_state *x;
1418 spin_lock_bh(&xfrm_state_lock);
1419 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1420 spin_unlock_bh(&xfrm_state_lock);
1424 EXPORT_SYMBOL(xfrm_find_acq);
1426 #ifdef CONFIG_XFRM_SUB_POLICY
1428 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1429 unsigned short family)
1432 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1434 return -EAFNOSUPPORT;
1436 spin_lock_bh(&xfrm_state_lock);
1437 if (afinfo->tmpl_sort)
1438 err = afinfo->tmpl_sort(dst, src, n);
1439 spin_unlock_bh(&xfrm_state_lock);
1440 xfrm_state_put_afinfo(afinfo);
1443 EXPORT_SYMBOL(xfrm_tmpl_sort);
1446 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1447 unsigned short family)
1450 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1452 return -EAFNOSUPPORT;
1454 spin_lock_bh(&xfrm_state_lock);
1455 if (afinfo->state_sort)
1456 err = afinfo->state_sort(dst, src, n);
1457 spin_unlock_bh(&xfrm_state_lock);
1458 xfrm_state_put_afinfo(afinfo);
1461 EXPORT_SYMBOL(xfrm_state_sort);
1464 /* Silly enough, but I'm lazy to build resolution list */
1466 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1470 for (i = 0; i <= xfrm_state_hmask; i++) {
1471 struct hlist_node *entry;
1472 struct xfrm_state *x;
1474 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1475 if (x->km.seq == seq &&
1476 x->km.state == XFRM_STATE_ACQ) {
1485 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1487 struct xfrm_state *x;
1489 spin_lock_bh(&xfrm_state_lock);
1490 x = __xfrm_find_acq_byseq(seq);
1491 spin_unlock_bh(&xfrm_state_lock);
1494 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1496 u32 xfrm_get_acqseq(void)
1500 static DEFINE_SPINLOCK(acqseq_lock);
1502 spin_lock_bh(&acqseq_lock);
1503 res = (++acqseq ? : ++acqseq);
1504 spin_unlock_bh(&acqseq_lock);
1507 EXPORT_SYMBOL(xfrm_get_acqseq);
1509 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1512 struct xfrm_state *x0;
1514 __be32 minspi = htonl(low);
1515 __be32 maxspi = htonl(high);
1517 spin_lock_bh(&x->lock);
1518 if (x->km.state == XFRM_STATE_DEAD)
1527 if (minspi == maxspi) {
1528 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1536 for (h=0; h<high-low+1; h++) {
1537 spi = low + net_random()%(high-low+1);
1538 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1540 x->id.spi = htonl(spi);
1547 spin_lock_bh(&xfrm_state_lock);
1548 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1549 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1550 spin_unlock_bh(&xfrm_state_lock);
1556 spin_unlock_bh(&x->lock);
1560 EXPORT_SYMBOL(xfrm_alloc_spi);
1562 int xfrm_state_walk(struct xfrm_state_walk *walk,
1563 int (*func)(struct xfrm_state *, int, void*),
1566 struct xfrm_state *old, *x, *last = NULL;
1569 if (walk->state == NULL && walk->count != 0)
1572 old = x = walk->state;
1574 spin_lock_bh(&xfrm_state_lock);
1576 x = list_first_entry(&xfrm_state_all, struct xfrm_state, all);
1577 list_for_each_entry_from(x, &xfrm_state_all, all) {
1578 if (x->km.state == XFRM_STATE_DEAD)
1580 if (!xfrm_id_proto_match(x->id.proto, walk->proto))
1583 err = func(last, walk->count, data);
1585 xfrm_state_hold(last);
1587 xfrm_state_walk_ongoing++;
1594 if (walk->count == 0) {
1599 err = func(last, 0, data);
1601 spin_unlock_bh(&xfrm_state_lock);
1603 xfrm_state_put(old);
1604 xfrm_state_walk_completed++;
1605 if (!list_empty(&xfrm_state_gc_leftovers))
1606 schedule_work(&xfrm_state_gc_work);
1610 EXPORT_SYMBOL(xfrm_state_walk);
1612 void xfrm_state_walk_done(struct xfrm_state_walk *walk)
1614 if (walk->state != NULL) {
1615 xfrm_state_put(walk->state);
1617 xfrm_state_walk_completed++;
1618 if (!list_empty(&xfrm_state_gc_leftovers))
1619 schedule_work(&xfrm_state_gc_work);
1622 EXPORT_SYMBOL(xfrm_state_walk_done);
1625 void xfrm_replay_notify(struct xfrm_state *x, int event)
1628 /* we send notify messages in case
1629 * 1. we updated on of the sequence numbers, and the seqno difference
1630 * is at least x->replay_maxdiff, in this case we also update the
1631 * timeout of our timer function
1632 * 2. if x->replay_maxage has elapsed since last update,
1633 * and there were changes
1635 * The state structure must be locked!
1639 case XFRM_REPLAY_UPDATE:
1640 if (x->replay_maxdiff &&
1641 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1642 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1643 if (x->xflags & XFRM_TIME_DEFER)
1644 event = XFRM_REPLAY_TIMEOUT;
1651 case XFRM_REPLAY_TIMEOUT:
1652 if ((x->replay.seq == x->preplay.seq) &&
1653 (x->replay.bitmap == x->preplay.bitmap) &&
1654 (x->replay.oseq == x->preplay.oseq)) {
1655 x->xflags |= XFRM_TIME_DEFER;
1662 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1663 c.event = XFRM_MSG_NEWAE;
1664 c.data.aevent = event;
1665 km_state_notify(x, &c);
1667 if (x->replay_maxage &&
1668 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1669 x->xflags &= ~XFRM_TIME_DEFER;
1672 static void xfrm_replay_timer_handler(unsigned long data)
1674 struct xfrm_state *x = (struct xfrm_state*)data;
1676 spin_lock(&x->lock);
1678 if (x->km.state == XFRM_STATE_VALID) {
1679 if (xfrm_aevent_is_on())
1680 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1682 x->xflags |= XFRM_TIME_DEFER;
1685 spin_unlock(&x->lock);
1688 int xfrm_replay_check(struct xfrm_state *x,
1689 struct sk_buff *skb, __be32 net_seq)
1692 u32 seq = ntohl(net_seq);
1694 if (unlikely(seq == 0))
1697 if (likely(seq > x->replay.seq))
1700 diff = x->replay.seq - seq;
1701 if (diff >= min_t(unsigned int, x->props.replay_window,
1702 sizeof(x->replay.bitmap) * 8)) {
1703 x->stats.replay_window++;
1707 if (x->replay.bitmap & (1U << diff)) {
1714 xfrm_audit_state_replay(x, skb, net_seq);
1718 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1721 u32 seq = ntohl(net_seq);
1723 if (seq > x->replay.seq) {
1724 diff = seq - x->replay.seq;
1725 if (diff < x->props.replay_window)
1726 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1728 x->replay.bitmap = 1;
1729 x->replay.seq = seq;
1731 diff = x->replay.seq - seq;
1732 x->replay.bitmap |= (1U << diff);
1735 if (xfrm_aevent_is_on())
1736 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1739 static LIST_HEAD(xfrm_km_list);
1740 static DEFINE_RWLOCK(xfrm_km_lock);
1742 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1744 struct xfrm_mgr *km;
1746 read_lock(&xfrm_km_lock);
1747 list_for_each_entry(km, &xfrm_km_list, list)
1748 if (km->notify_policy)
1749 km->notify_policy(xp, dir, c);
1750 read_unlock(&xfrm_km_lock);
1753 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1755 struct xfrm_mgr *km;
1756 read_lock(&xfrm_km_lock);
1757 list_for_each_entry(km, &xfrm_km_list, list)
1760 read_unlock(&xfrm_km_lock);
1763 EXPORT_SYMBOL(km_policy_notify);
1764 EXPORT_SYMBOL(km_state_notify);
1766 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1772 c.event = XFRM_MSG_EXPIRE;
1773 km_state_notify(x, &c);
1779 EXPORT_SYMBOL(km_state_expired);
1781 * We send to all registered managers regardless of failure
1782 * We are happy with one success
1784 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1786 int err = -EINVAL, acqret;
1787 struct xfrm_mgr *km;
1789 read_lock(&xfrm_km_lock);
1790 list_for_each_entry(km, &xfrm_km_list, list) {
1791 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1795 read_unlock(&xfrm_km_lock);
1798 EXPORT_SYMBOL(km_query);
1800 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1803 struct xfrm_mgr *km;
1805 read_lock(&xfrm_km_lock);
1806 list_for_each_entry(km, &xfrm_km_list, list) {
1807 if (km->new_mapping)
1808 err = km->new_mapping(x, ipaddr, sport);
1812 read_unlock(&xfrm_km_lock);
1815 EXPORT_SYMBOL(km_new_mapping);
1817 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1823 c.event = XFRM_MSG_POLEXPIRE;
1824 km_policy_notify(pol, dir, &c);
1829 EXPORT_SYMBOL(km_policy_expired);
1831 #ifdef CONFIG_XFRM_MIGRATE
1832 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1833 struct xfrm_migrate *m, int num_migrate)
1837 struct xfrm_mgr *km;
1839 read_lock(&xfrm_km_lock);
1840 list_for_each_entry(km, &xfrm_km_list, list) {
1842 ret = km->migrate(sel, dir, type, m, num_migrate);
1847 read_unlock(&xfrm_km_lock);
1850 EXPORT_SYMBOL(km_migrate);
1853 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1857 struct xfrm_mgr *km;
1859 read_lock(&xfrm_km_lock);
1860 list_for_each_entry(km, &xfrm_km_list, list) {
1862 ret = km->report(proto, sel, addr);
1867 read_unlock(&xfrm_km_lock);
1870 EXPORT_SYMBOL(km_report);
1872 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1876 struct xfrm_mgr *km;
1877 struct xfrm_policy *pol = NULL;
1879 if (optlen <= 0 || optlen > PAGE_SIZE)
1882 data = kmalloc(optlen, GFP_KERNEL);
1887 if (copy_from_user(data, optval, optlen))
1891 read_lock(&xfrm_km_lock);
1892 list_for_each_entry(km, &xfrm_km_list, list) {
1893 pol = km->compile_policy(sk, optname, data,
1898 read_unlock(&xfrm_km_lock);
1901 xfrm_sk_policy_insert(sk, err, pol);
1910 EXPORT_SYMBOL(xfrm_user_policy);
1912 int xfrm_register_km(struct xfrm_mgr *km)
1914 write_lock_bh(&xfrm_km_lock);
1915 list_add_tail(&km->list, &xfrm_km_list);
1916 write_unlock_bh(&xfrm_km_lock);
1919 EXPORT_SYMBOL(xfrm_register_km);
1921 int xfrm_unregister_km(struct xfrm_mgr *km)
1923 write_lock_bh(&xfrm_km_lock);
1924 list_del(&km->list);
1925 write_unlock_bh(&xfrm_km_lock);
1928 EXPORT_SYMBOL(xfrm_unregister_km);
1930 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1933 if (unlikely(afinfo == NULL))
1935 if (unlikely(afinfo->family >= NPROTO))
1936 return -EAFNOSUPPORT;
1937 write_lock_bh(&xfrm_state_afinfo_lock);
1938 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1941 xfrm_state_afinfo[afinfo->family] = afinfo;
1942 write_unlock_bh(&xfrm_state_afinfo_lock);
1945 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1947 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1950 if (unlikely(afinfo == NULL))
1952 if (unlikely(afinfo->family >= NPROTO))
1953 return -EAFNOSUPPORT;
1954 write_lock_bh(&xfrm_state_afinfo_lock);
1955 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1956 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1959 xfrm_state_afinfo[afinfo->family] = NULL;
1961 write_unlock_bh(&xfrm_state_afinfo_lock);
1964 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1966 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1968 struct xfrm_state_afinfo *afinfo;
1969 if (unlikely(family >= NPROTO))
1971 read_lock(&xfrm_state_afinfo_lock);
1972 afinfo = xfrm_state_afinfo[family];
1973 if (unlikely(!afinfo))
1974 read_unlock(&xfrm_state_afinfo_lock);
1978 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1979 __releases(xfrm_state_afinfo_lock)
1981 read_unlock(&xfrm_state_afinfo_lock);
1984 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1985 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1988 struct xfrm_state *t = x->tunnel;
1990 if (atomic_read(&t->tunnel_users) == 2)
1991 xfrm_state_delete(t);
1992 atomic_dec(&t->tunnel_users);
1997 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1999 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
2003 spin_lock_bh(&x->lock);
2004 if (x->km.state == XFRM_STATE_VALID &&
2005 x->type && x->type->get_mtu)
2006 res = x->type->get_mtu(x, mtu);
2008 res = mtu - x->props.header_len;
2009 spin_unlock_bh(&x->lock);
2013 int xfrm_init_state(struct xfrm_state *x)
2015 struct xfrm_state_afinfo *afinfo;
2016 struct xfrm_mode *inner_mode;
2017 int family = x->props.family;
2020 err = -EAFNOSUPPORT;
2021 afinfo = xfrm_state_get_afinfo(family);
2026 if (afinfo->init_flags)
2027 err = afinfo->init_flags(x);
2029 xfrm_state_put_afinfo(afinfo);
2034 err = -EPROTONOSUPPORT;
2036 if (x->sel.family != AF_UNSPEC) {
2037 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
2038 if (inner_mode == NULL)
2041 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
2042 family != x->sel.family) {
2043 xfrm_put_mode(inner_mode);
2047 x->inner_mode = inner_mode;
2049 struct xfrm_mode *inner_mode_iaf;
2051 inner_mode = xfrm_get_mode(x->props.mode, AF_INET);
2052 if (inner_mode == NULL)
2055 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
2056 xfrm_put_mode(inner_mode);
2060 inner_mode_iaf = xfrm_get_mode(x->props.mode, AF_INET6);
2061 if (inner_mode_iaf == NULL)
2064 if (!(inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)) {
2065 xfrm_put_mode(inner_mode_iaf);
2069 if (x->props.family == AF_INET) {
2070 x->inner_mode = inner_mode;
2071 x->inner_mode_iaf = inner_mode_iaf;
2073 x->inner_mode = inner_mode_iaf;
2074 x->inner_mode_iaf = inner_mode;
2078 x->type = xfrm_get_type(x->id.proto, family);
2079 if (x->type == NULL)
2082 err = x->type->init_state(x);
2086 x->outer_mode = xfrm_get_mode(x->props.mode, family);
2087 if (x->outer_mode == NULL)
2090 x->km.state = XFRM_STATE_VALID;
2096 EXPORT_SYMBOL(xfrm_init_state);
2098 void __init xfrm_state_init(void)
2102 sz = sizeof(struct hlist_head) * 8;
2104 xfrm_state_bydst = xfrm_hash_alloc(sz);
2105 xfrm_state_bysrc = xfrm_hash_alloc(sz);
2106 xfrm_state_byspi = xfrm_hash_alloc(sz);
2107 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
2108 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
2109 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
2111 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
2114 #ifdef CONFIG_AUDITSYSCALL
2115 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2116 struct audit_buffer *audit_buf)
2118 struct xfrm_sec_ctx *ctx = x->security;
2119 u32 spi = ntohl(x->id.spi);
2122 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2123 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2125 switch(x->props.family) {
2127 audit_log_format(audit_buf,
2128 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT,
2129 NIPQUAD(x->props.saddr.a4),
2130 NIPQUAD(x->id.daddr.a4));
2133 audit_log_format(audit_buf,
2134 " src=" NIP6_FMT " dst=" NIP6_FMT,
2135 NIP6(*(struct in6_addr *)x->props.saddr.a6),
2136 NIP6(*(struct in6_addr *)x->id.daddr.a6));
2140 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2143 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2144 struct audit_buffer *audit_buf)
2147 struct ipv6hdr *iph6;
2152 audit_log_format(audit_buf,
2153 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT,
2154 NIPQUAD(iph4->saddr),
2155 NIPQUAD(iph4->daddr));
2158 iph6 = ipv6_hdr(skb);
2159 audit_log_format(audit_buf,
2160 " src=" NIP6_FMT " dst=" NIP6_FMT
2161 " flowlbl=0x%x%02x%02x",
2164 iph6->flow_lbl[0] & 0x0f,
2171 void xfrm_audit_state_add(struct xfrm_state *x, int result,
2172 uid_t auid, u32 sessionid, u32 secid)
2174 struct audit_buffer *audit_buf;
2176 audit_buf = xfrm_audit_start("SAD-add");
2177 if (audit_buf == NULL)
2179 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2180 xfrm_audit_helper_sainfo(x, audit_buf);
2181 audit_log_format(audit_buf, " res=%u", result);
2182 audit_log_end(audit_buf);
2184 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2186 void xfrm_audit_state_delete(struct xfrm_state *x, int result,
2187 uid_t auid, u32 sessionid, u32 secid)
2189 struct audit_buffer *audit_buf;
2191 audit_buf = xfrm_audit_start("SAD-delete");
2192 if (audit_buf == NULL)
2194 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2195 xfrm_audit_helper_sainfo(x, audit_buf);
2196 audit_log_format(audit_buf, " res=%u", result);
2197 audit_log_end(audit_buf);
2199 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2201 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2202 struct sk_buff *skb)
2204 struct audit_buffer *audit_buf;
2207 audit_buf = xfrm_audit_start("SA-replay-overflow");
2208 if (audit_buf == NULL)
2210 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2211 /* don't record the sequence number because it's inherent in this kind
2212 * of audit message */
2213 spi = ntohl(x->id.spi);
2214 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2215 audit_log_end(audit_buf);
2217 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2219 static void xfrm_audit_state_replay(struct xfrm_state *x,
2220 struct sk_buff *skb, __be32 net_seq)
2222 struct audit_buffer *audit_buf;
2225 audit_buf = xfrm_audit_start("SA-replayed-pkt");
2226 if (audit_buf == NULL)
2228 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2229 spi = ntohl(x->id.spi);
2230 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2231 spi, spi, ntohl(net_seq));
2232 audit_log_end(audit_buf);
2235 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2237 struct audit_buffer *audit_buf;
2239 audit_buf = xfrm_audit_start("SA-notfound");
2240 if (audit_buf == NULL)
2242 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2243 audit_log_end(audit_buf);
2245 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2247 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2248 __be32 net_spi, __be32 net_seq)
2250 struct audit_buffer *audit_buf;
2253 audit_buf = xfrm_audit_start("SA-notfound");
2254 if (audit_buf == NULL)
2256 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2257 spi = ntohl(net_spi);
2258 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2259 spi, spi, ntohl(net_seq));
2260 audit_log_end(audit_buf);
2262 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2264 void xfrm_audit_state_icvfail(struct xfrm_state *x,
2265 struct sk_buff *skb, u8 proto)
2267 struct audit_buffer *audit_buf;
2271 audit_buf = xfrm_audit_start("SA-icv-failure");
2272 if (audit_buf == NULL)
2274 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2275 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2276 u32 spi = ntohl(net_spi);
2277 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2278 spi, spi, ntohl(net_seq));
2280 audit_log_end(audit_buf);
2282 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
2283 #endif /* CONFIG_AUDITSYSCALL */