6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <asm/uaccess.h>
25 #include "xfrm_hash.h"
28 EXPORT_SYMBOL(xfrm_nl);
30 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
31 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
33 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
34 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
36 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
38 /* Each xfrm_state may be linked to two tables:
40 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
41 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
42 destination/tunnel endpoint. (output)
45 static DEFINE_SPINLOCK(xfrm_state_lock);
47 /* Hash table to find appropriate SA towards given target (endpoint
48 * of tunnel or destination of transport mode) allowed by selector.
50 * Main use is finding SA after policy selected tunnel or transport mode.
51 * Also, it can be used by ah/esp icmp error handler to find offending SA.
53 static LIST_HEAD(xfrm_state_all);
54 static struct hlist_head *xfrm_state_bydst __read_mostly;
55 static struct hlist_head *xfrm_state_bysrc __read_mostly;
56 static struct hlist_head *xfrm_state_byspi __read_mostly;
57 static unsigned int xfrm_state_hmask __read_mostly;
58 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
59 static unsigned int xfrm_state_num;
60 static unsigned int xfrm_state_genid;
62 /* Counter indicating ongoing walk, protected by xfrm_state_lock. */
63 static unsigned long xfrm_state_walk_ongoing;
64 /* Counter indicating walk completion, protected by xfrm_cfg_mutex. */
65 static unsigned long xfrm_state_walk_completed;
67 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
68 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
70 #ifdef CONFIG_AUDITSYSCALL
71 static void xfrm_audit_state_replay(struct xfrm_state *x,
72 struct sk_buff *skb, __be32 net_seq);
74 #define xfrm_audit_state_replay(x, s, sq) do { ; } while (0)
75 #endif /* CONFIG_AUDITSYSCALL */
77 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
78 xfrm_address_t *saddr,
80 unsigned short family)
82 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
85 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
86 xfrm_address_t *saddr,
87 unsigned short family)
89 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
92 static inline unsigned int
93 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
95 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
98 static void xfrm_hash_transfer(struct hlist_head *list,
99 struct hlist_head *ndsttable,
100 struct hlist_head *nsrctable,
101 struct hlist_head *nspitable,
102 unsigned int nhashmask)
104 struct hlist_node *entry, *tmp;
105 struct xfrm_state *x;
107 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
110 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
111 x->props.reqid, x->props.family,
113 hlist_add_head(&x->bydst, ndsttable+h);
115 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
118 hlist_add_head(&x->bysrc, nsrctable+h);
121 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
122 x->id.proto, x->props.family,
124 hlist_add_head(&x->byspi, nspitable+h);
129 static unsigned long xfrm_hash_new_size(void)
131 return ((xfrm_state_hmask + 1) << 1) *
132 sizeof(struct hlist_head);
135 static DEFINE_MUTEX(hash_resize_mutex);
137 static void xfrm_hash_resize(struct work_struct *__unused)
139 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
140 unsigned long nsize, osize;
141 unsigned int nhashmask, ohashmask;
144 mutex_lock(&hash_resize_mutex);
146 nsize = xfrm_hash_new_size();
147 ndst = xfrm_hash_alloc(nsize);
150 nsrc = xfrm_hash_alloc(nsize);
152 xfrm_hash_free(ndst, nsize);
155 nspi = xfrm_hash_alloc(nsize);
157 xfrm_hash_free(ndst, nsize);
158 xfrm_hash_free(nsrc, nsize);
162 spin_lock_bh(&xfrm_state_lock);
164 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
165 for (i = xfrm_state_hmask; i >= 0; i--)
166 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
169 odst = xfrm_state_bydst;
170 osrc = xfrm_state_bysrc;
171 ospi = xfrm_state_byspi;
172 ohashmask = xfrm_state_hmask;
174 xfrm_state_bydst = ndst;
175 xfrm_state_bysrc = nsrc;
176 xfrm_state_byspi = nspi;
177 xfrm_state_hmask = nhashmask;
179 spin_unlock_bh(&xfrm_state_lock);
181 osize = (ohashmask + 1) * sizeof(struct hlist_head);
182 xfrm_hash_free(odst, osize);
183 xfrm_hash_free(osrc, osize);
184 xfrm_hash_free(ospi, osize);
187 mutex_unlock(&hash_resize_mutex);
190 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
192 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
193 EXPORT_SYMBOL(km_waitq);
195 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
196 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
198 static struct work_struct xfrm_state_gc_work;
199 static LIST_HEAD(xfrm_state_gc_leftovers);
200 static LIST_HEAD(xfrm_state_gc_list);
201 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
203 int __xfrm_state_delete(struct xfrm_state *x);
205 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
206 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
208 static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
210 struct xfrm_state_afinfo *afinfo;
211 if (unlikely(family >= NPROTO))
213 write_lock_bh(&xfrm_state_afinfo_lock);
214 afinfo = xfrm_state_afinfo[family];
215 if (unlikely(!afinfo))
216 write_unlock_bh(&xfrm_state_afinfo_lock);
220 static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
221 __releases(xfrm_state_afinfo_lock)
223 write_unlock_bh(&xfrm_state_afinfo_lock);
226 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
228 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
229 const struct xfrm_type **typemap;
232 if (unlikely(afinfo == NULL))
233 return -EAFNOSUPPORT;
234 typemap = afinfo->type_map;
236 if (likely(typemap[type->proto] == NULL))
237 typemap[type->proto] = type;
240 xfrm_state_unlock_afinfo(afinfo);
243 EXPORT_SYMBOL(xfrm_register_type);
245 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
247 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
248 const struct xfrm_type **typemap;
251 if (unlikely(afinfo == NULL))
252 return -EAFNOSUPPORT;
253 typemap = afinfo->type_map;
255 if (unlikely(typemap[type->proto] != type))
258 typemap[type->proto] = NULL;
259 xfrm_state_unlock_afinfo(afinfo);
262 EXPORT_SYMBOL(xfrm_unregister_type);
264 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
266 struct xfrm_state_afinfo *afinfo;
267 const struct xfrm_type **typemap;
268 const struct xfrm_type *type;
269 int modload_attempted = 0;
272 afinfo = xfrm_state_get_afinfo(family);
273 if (unlikely(afinfo == NULL))
275 typemap = afinfo->type_map;
277 type = typemap[proto];
278 if (unlikely(type && !try_module_get(type->owner)))
280 if (!type && !modload_attempted) {
281 xfrm_state_put_afinfo(afinfo);
282 request_module("xfrm-type-%d-%d", family, proto);
283 modload_attempted = 1;
287 xfrm_state_put_afinfo(afinfo);
291 static void xfrm_put_type(const struct xfrm_type *type)
293 module_put(type->owner);
296 int xfrm_register_mode(struct xfrm_mode *mode, int family)
298 struct xfrm_state_afinfo *afinfo;
299 struct xfrm_mode **modemap;
302 if (unlikely(mode->encap >= XFRM_MODE_MAX))
305 afinfo = xfrm_state_lock_afinfo(family);
306 if (unlikely(afinfo == NULL))
307 return -EAFNOSUPPORT;
310 modemap = afinfo->mode_map;
311 if (modemap[mode->encap])
315 if (!try_module_get(afinfo->owner))
318 mode->afinfo = afinfo;
319 modemap[mode->encap] = mode;
323 xfrm_state_unlock_afinfo(afinfo);
326 EXPORT_SYMBOL(xfrm_register_mode);
328 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
330 struct xfrm_state_afinfo *afinfo;
331 struct xfrm_mode **modemap;
334 if (unlikely(mode->encap >= XFRM_MODE_MAX))
337 afinfo = xfrm_state_lock_afinfo(family);
338 if (unlikely(afinfo == NULL))
339 return -EAFNOSUPPORT;
342 modemap = afinfo->mode_map;
343 if (likely(modemap[mode->encap] == mode)) {
344 modemap[mode->encap] = NULL;
345 module_put(mode->afinfo->owner);
349 xfrm_state_unlock_afinfo(afinfo);
352 EXPORT_SYMBOL(xfrm_unregister_mode);
354 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
356 struct xfrm_state_afinfo *afinfo;
357 struct xfrm_mode *mode;
358 int modload_attempted = 0;
360 if (unlikely(encap >= XFRM_MODE_MAX))
364 afinfo = xfrm_state_get_afinfo(family);
365 if (unlikely(afinfo == NULL))
368 mode = afinfo->mode_map[encap];
369 if (unlikely(mode && !try_module_get(mode->owner)))
371 if (!mode && !modload_attempted) {
372 xfrm_state_put_afinfo(afinfo);
373 request_module("xfrm-mode-%d-%d", family, encap);
374 modload_attempted = 1;
378 xfrm_state_put_afinfo(afinfo);
382 static void xfrm_put_mode(struct xfrm_mode *mode)
384 module_put(mode->owner);
387 static void xfrm_state_gc_destroy(struct xfrm_state *x)
389 del_timer_sync(&x->timer);
390 del_timer_sync(&x->rtimer);
397 xfrm_put_mode(x->inner_mode);
398 if (x->inner_mode_iaf)
399 xfrm_put_mode(x->inner_mode_iaf);
401 xfrm_put_mode(x->outer_mode);
403 x->type->destructor(x);
404 xfrm_put_type(x->type);
406 security_xfrm_state_free(x);
410 static void xfrm_state_gc_task(struct work_struct *data)
412 struct xfrm_state *x, *tmp;
413 unsigned long completed;
415 mutex_lock(&xfrm_cfg_mutex);
416 spin_lock_bh(&xfrm_state_gc_lock);
417 list_splice_tail_init(&xfrm_state_gc_list, &xfrm_state_gc_leftovers);
418 spin_unlock_bh(&xfrm_state_gc_lock);
420 completed = xfrm_state_walk_completed;
421 mutex_unlock(&xfrm_cfg_mutex);
423 list_for_each_entry_safe(x, tmp, &xfrm_state_gc_leftovers, gclist) {
424 if ((long)(x->lastused - completed) > 0)
426 xfrm_state_gc_destroy(x);
432 static inline unsigned long make_jiffies(long secs)
434 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
435 return MAX_SCHEDULE_TIMEOUT-1;
440 static void xfrm_timer_handler(unsigned long data)
442 struct xfrm_state *x = (struct xfrm_state*)data;
443 unsigned long now = get_seconds();
444 long next = LONG_MAX;
449 if (x->km.state == XFRM_STATE_DEAD)
451 if (x->km.state == XFRM_STATE_EXPIRED)
453 if (x->lft.hard_add_expires_seconds) {
454 long tmo = x->lft.hard_add_expires_seconds +
455 x->curlft.add_time - now;
461 if (x->lft.hard_use_expires_seconds) {
462 long tmo = x->lft.hard_use_expires_seconds +
463 (x->curlft.use_time ? : now) - now;
471 if (x->lft.soft_add_expires_seconds) {
472 long tmo = x->lft.soft_add_expires_seconds +
473 x->curlft.add_time - now;
479 if (x->lft.soft_use_expires_seconds) {
480 long tmo = x->lft.soft_use_expires_seconds +
481 (x->curlft.use_time ? : now) - now;
490 km_state_expired(x, 0, 0);
492 if (next != LONG_MAX)
493 mod_timer(&x->timer, jiffies + make_jiffies(next));
498 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
499 x->km.state = XFRM_STATE_EXPIRED;
505 err = __xfrm_state_delete(x);
506 if (!err && x->id.spi)
507 km_state_expired(x, 1, 0);
509 xfrm_audit_state_delete(x, err ? 0 : 1,
510 audit_get_loginuid(current),
511 audit_get_sessionid(current), 0);
514 spin_unlock(&x->lock);
517 static void xfrm_replay_timer_handler(unsigned long data);
519 struct xfrm_state *xfrm_state_alloc(void)
521 struct xfrm_state *x;
523 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
526 atomic_set(&x->refcnt, 1);
527 atomic_set(&x->tunnel_users, 0);
528 INIT_LIST_HEAD(&x->all);
529 INIT_HLIST_NODE(&x->bydst);
530 INIT_HLIST_NODE(&x->bysrc);
531 INIT_HLIST_NODE(&x->byspi);
532 setup_timer(&x->timer, xfrm_timer_handler, (unsigned long)x);
533 setup_timer(&x->rtimer, xfrm_replay_timer_handler,
535 x->curlft.add_time = get_seconds();
536 x->lft.soft_byte_limit = XFRM_INF;
537 x->lft.soft_packet_limit = XFRM_INF;
538 x->lft.hard_byte_limit = XFRM_INF;
539 x->lft.hard_packet_limit = XFRM_INF;
540 x->replay_maxage = 0;
541 x->replay_maxdiff = 0;
542 x->inner_mode = NULL;
543 x->inner_mode_iaf = NULL;
544 spin_lock_init(&x->lock);
548 EXPORT_SYMBOL(xfrm_state_alloc);
550 void __xfrm_state_destroy(struct xfrm_state *x)
552 WARN_ON(x->km.state != XFRM_STATE_DEAD);
554 spin_lock_bh(&xfrm_state_gc_lock);
555 list_add_tail(&x->gclist, &xfrm_state_gc_list);
556 spin_unlock_bh(&xfrm_state_gc_lock);
557 schedule_work(&xfrm_state_gc_work);
559 EXPORT_SYMBOL(__xfrm_state_destroy);
561 int __xfrm_state_delete(struct xfrm_state *x)
565 if (x->km.state != XFRM_STATE_DEAD) {
566 x->km.state = XFRM_STATE_DEAD;
567 spin_lock(&xfrm_state_lock);
568 x->lastused = xfrm_state_walk_ongoing;
569 list_del_rcu(&x->all);
570 hlist_del(&x->bydst);
571 hlist_del(&x->bysrc);
573 hlist_del(&x->byspi);
575 spin_unlock(&xfrm_state_lock);
577 /* All xfrm_state objects are created by xfrm_state_alloc.
578 * The xfrm_state_alloc call gives a reference, and that
579 * is what we are dropping here.
587 EXPORT_SYMBOL(__xfrm_state_delete);
589 int xfrm_state_delete(struct xfrm_state *x)
593 spin_lock_bh(&x->lock);
594 err = __xfrm_state_delete(x);
595 spin_unlock_bh(&x->lock);
599 EXPORT_SYMBOL(xfrm_state_delete);
601 #ifdef CONFIG_SECURITY_NETWORK_XFRM
603 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
607 for (i = 0; i <= xfrm_state_hmask; i++) {
608 struct hlist_node *entry;
609 struct xfrm_state *x;
611 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
612 if (xfrm_id_proto_match(x->id.proto, proto) &&
613 (err = security_xfrm_state_delete(x)) != 0) {
614 xfrm_audit_state_delete(x, 0,
615 audit_info->loginuid,
616 audit_info->sessionid,
627 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
633 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
637 spin_lock_bh(&xfrm_state_lock);
638 err = xfrm_state_flush_secctx_check(proto, audit_info);
642 for (i = 0; i <= xfrm_state_hmask; i++) {
643 struct hlist_node *entry;
644 struct xfrm_state *x;
646 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
647 if (!xfrm_state_kern(x) &&
648 xfrm_id_proto_match(x->id.proto, proto)) {
650 spin_unlock_bh(&xfrm_state_lock);
652 err = xfrm_state_delete(x);
653 xfrm_audit_state_delete(x, err ? 0 : 1,
654 audit_info->loginuid,
655 audit_info->sessionid,
659 spin_lock_bh(&xfrm_state_lock);
667 spin_unlock_bh(&xfrm_state_lock);
671 EXPORT_SYMBOL(xfrm_state_flush);
673 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
675 spin_lock_bh(&xfrm_state_lock);
676 si->sadcnt = xfrm_state_num;
677 si->sadhcnt = xfrm_state_hmask;
678 si->sadhmcnt = xfrm_state_hashmax;
679 spin_unlock_bh(&xfrm_state_lock);
681 EXPORT_SYMBOL(xfrm_sad_getinfo);
684 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
685 struct xfrm_tmpl *tmpl,
686 xfrm_address_t *daddr, xfrm_address_t *saddr,
687 unsigned short family)
689 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
692 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
693 xfrm_state_put_afinfo(afinfo);
697 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
699 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
700 struct xfrm_state *x;
701 struct hlist_node *entry;
703 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
704 if (x->props.family != family ||
706 x->id.proto != proto)
711 if (x->id.daddr.a4 != daddr->a4)
715 if (!ipv6_addr_equal((struct in6_addr *)daddr,
729 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
731 unsigned int h = xfrm_src_hash(daddr, saddr, family);
732 struct xfrm_state *x;
733 struct hlist_node *entry;
735 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
736 if (x->props.family != family ||
737 x->id.proto != proto)
742 if (x->id.daddr.a4 != daddr->a4 ||
743 x->props.saddr.a4 != saddr->a4)
747 if (!ipv6_addr_equal((struct in6_addr *)daddr,
750 !ipv6_addr_equal((struct in6_addr *)saddr,
764 static inline struct xfrm_state *
765 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
768 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
769 x->id.proto, family);
771 return __xfrm_state_lookup_byaddr(&x->id.daddr,
773 x->id.proto, family);
776 static void xfrm_hash_grow_check(int have_hash_collision)
778 if (have_hash_collision &&
779 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
780 xfrm_state_num > xfrm_state_hmask)
781 schedule_work(&xfrm_hash_work);
785 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
786 struct flowi *fl, struct xfrm_tmpl *tmpl,
787 struct xfrm_policy *pol, int *err,
788 unsigned short family)
791 struct hlist_node *entry;
792 struct xfrm_state *x, *x0, *to_put;
793 int acquire_in_progress = 0;
795 struct xfrm_state *best = NULL;
799 spin_lock_bh(&xfrm_state_lock);
800 h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
801 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
802 if (x->props.family == family &&
803 x->props.reqid == tmpl->reqid &&
804 !(x->props.flags & XFRM_STATE_WILDRECV) &&
805 xfrm_state_addr_check(x, daddr, saddr, family) &&
806 tmpl->mode == x->props.mode &&
807 tmpl->id.proto == x->id.proto &&
808 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
810 1. There is a valid state with matching selector.
812 2. Valid state with inappropriate selector. Skip.
814 Entering area of "sysdeps".
816 3. If state is not valid, selector is temporary,
817 it selects only session which triggered
818 previous resolution. Key manager will do
819 something to install a state with proper
822 if (x->km.state == XFRM_STATE_VALID) {
823 if ((x->sel.family && !xfrm_selector_match(&x->sel, fl, x->sel.family)) ||
824 !security_xfrm_state_pol_flow_match(x, pol, fl))
827 best->km.dying > x->km.dying ||
828 (best->km.dying == x->km.dying &&
829 best->curlft.add_time < x->curlft.add_time))
831 } else if (x->km.state == XFRM_STATE_ACQ) {
832 acquire_in_progress = 1;
833 } else if (x->km.state == XFRM_STATE_ERROR ||
834 x->km.state == XFRM_STATE_EXPIRED) {
835 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
836 security_xfrm_state_pol_flow_match(x, pol, fl))
843 if (!x && !error && !acquire_in_progress) {
845 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
846 tmpl->id.proto, family)) != NULL) {
851 x = xfrm_state_alloc();
856 /* Initialize temporary selector matching only
857 * to current session. */
858 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
860 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
862 x->km.state = XFRM_STATE_DEAD;
868 if (km_query(x, tmpl, pol) == 0) {
869 x->km.state = XFRM_STATE_ACQ;
870 list_add_tail(&x->all, &xfrm_state_all);
871 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
872 h = xfrm_src_hash(daddr, saddr, family);
873 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
875 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
876 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
878 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
879 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
880 add_timer(&x->timer);
882 xfrm_hash_grow_check(x->bydst.next != NULL);
884 x->km.state = XFRM_STATE_DEAD;
894 *err = acquire_in_progress ? -EAGAIN : error;
895 spin_unlock_bh(&xfrm_state_lock);
897 xfrm_state_put(to_put);
902 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
903 unsigned short family, u8 mode, u8 proto, u32 reqid)
906 struct xfrm_state *rx = NULL, *x = NULL;
907 struct hlist_node *entry;
909 spin_lock(&xfrm_state_lock);
910 h = xfrm_dst_hash(daddr, saddr, reqid, family);
911 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
912 if (x->props.family == family &&
913 x->props.reqid == reqid &&
914 !(x->props.flags & XFRM_STATE_WILDRECV) &&
915 xfrm_state_addr_check(x, daddr, saddr, family) &&
916 mode == x->props.mode &&
917 proto == x->id.proto &&
918 x->km.state == XFRM_STATE_VALID) {
926 spin_unlock(&xfrm_state_lock);
931 EXPORT_SYMBOL(xfrm_stateonly_find);
933 static void __xfrm_state_insert(struct xfrm_state *x)
937 x->genid = ++xfrm_state_genid;
939 list_add_tail(&x->all, &xfrm_state_all);
941 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
942 x->props.reqid, x->props.family);
943 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
945 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
946 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
949 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
952 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
955 mod_timer(&x->timer, jiffies + HZ);
956 if (x->replay_maxage)
957 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
963 xfrm_hash_grow_check(x->bydst.next != NULL);
966 /* xfrm_state_lock is held */
967 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
969 unsigned short family = xnew->props.family;
970 u32 reqid = xnew->props.reqid;
971 struct xfrm_state *x;
972 struct hlist_node *entry;
975 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
976 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
977 if (x->props.family == family &&
978 x->props.reqid == reqid &&
979 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
980 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
981 x->genid = xfrm_state_genid;
985 void xfrm_state_insert(struct xfrm_state *x)
987 spin_lock_bh(&xfrm_state_lock);
988 __xfrm_state_bump_genids(x);
989 __xfrm_state_insert(x);
990 spin_unlock_bh(&xfrm_state_lock);
992 EXPORT_SYMBOL(xfrm_state_insert);
994 /* xfrm_state_lock is held */
995 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
997 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
998 struct hlist_node *entry;
999 struct xfrm_state *x;
1001 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1002 if (x->props.reqid != reqid ||
1003 x->props.mode != mode ||
1004 x->props.family != family ||
1005 x->km.state != XFRM_STATE_ACQ ||
1007 x->id.proto != proto)
1012 if (x->id.daddr.a4 != daddr->a4 ||
1013 x->props.saddr.a4 != saddr->a4)
1017 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
1018 (struct in6_addr *)daddr) ||
1019 !ipv6_addr_equal((struct in6_addr *)
1021 (struct in6_addr *)saddr))
1033 x = xfrm_state_alloc();
1037 x->sel.daddr.a4 = daddr->a4;
1038 x->sel.saddr.a4 = saddr->a4;
1039 x->sel.prefixlen_d = 32;
1040 x->sel.prefixlen_s = 32;
1041 x->props.saddr.a4 = saddr->a4;
1042 x->id.daddr.a4 = daddr->a4;
1046 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1047 (struct in6_addr *)daddr);
1048 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1049 (struct in6_addr *)saddr);
1050 x->sel.prefixlen_d = 128;
1051 x->sel.prefixlen_s = 128;
1052 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1053 (struct in6_addr *)saddr);
1054 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1055 (struct in6_addr *)daddr);
1059 x->km.state = XFRM_STATE_ACQ;
1060 x->id.proto = proto;
1061 x->props.family = family;
1062 x->props.mode = mode;
1063 x->props.reqid = reqid;
1064 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
1066 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
1067 add_timer(&x->timer);
1068 list_add_tail(&x->all, &xfrm_state_all);
1069 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
1070 h = xfrm_src_hash(daddr, saddr, family);
1071 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
1075 xfrm_hash_grow_check(x->bydst.next != NULL);
1081 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
1083 int xfrm_state_add(struct xfrm_state *x)
1085 struct xfrm_state *x1, *to_put;
1088 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1090 family = x->props.family;
1094 spin_lock_bh(&xfrm_state_lock);
1096 x1 = __xfrm_state_locate(x, use_spi, family);
1104 if (use_spi && x->km.seq) {
1105 x1 = __xfrm_find_acq_byseq(x->km.seq);
1106 if (x1 && ((x1->id.proto != x->id.proto) ||
1107 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1114 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
1116 &x->id.daddr, &x->props.saddr, 0);
1118 __xfrm_state_bump_genids(x);
1119 __xfrm_state_insert(x);
1123 spin_unlock_bh(&xfrm_state_lock);
1126 xfrm_state_delete(x1);
1131 xfrm_state_put(to_put);
1135 EXPORT_SYMBOL(xfrm_state_add);
1137 #ifdef CONFIG_XFRM_MIGRATE
1138 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1141 struct xfrm_state *x = xfrm_state_alloc();
1145 memcpy(&x->id, &orig->id, sizeof(x->id));
1146 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1147 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1148 x->props.mode = orig->props.mode;
1149 x->props.replay_window = orig->props.replay_window;
1150 x->props.reqid = orig->props.reqid;
1151 x->props.family = orig->props.family;
1152 x->props.saddr = orig->props.saddr;
1155 x->aalg = xfrm_algo_clone(orig->aalg);
1159 x->props.aalgo = orig->props.aalgo;
1162 x->ealg = xfrm_algo_clone(orig->ealg);
1166 x->props.ealgo = orig->props.ealgo;
1169 x->calg = xfrm_algo_clone(orig->calg);
1173 x->props.calgo = orig->props.calgo;
1176 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
1182 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1188 err = xfrm_init_state(x);
1192 x->props.flags = orig->props.flags;
1194 x->curlft.add_time = orig->curlft.add_time;
1195 x->km.state = orig->km.state;
1196 x->km.seq = orig->km.seq;
1214 /* xfrm_state_lock is held */
1215 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1218 struct xfrm_state *x;
1219 struct hlist_node *entry;
1222 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
1223 m->reqid, m->old_family);
1224 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1225 if (x->props.mode != m->mode ||
1226 x->id.proto != m->proto)
1228 if (m->reqid && x->props.reqid != m->reqid)
1230 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1232 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1239 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1241 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
1242 if (x->props.mode != m->mode ||
1243 x->id.proto != m->proto)
1245 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1247 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1257 EXPORT_SYMBOL(xfrm_migrate_state_find);
1259 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1260 struct xfrm_migrate *m)
1262 struct xfrm_state *xc;
1265 xc = xfrm_state_clone(x, &err);
1269 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1270 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1273 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1274 /* a care is needed when the destination address of the
1275 state is to be updated as it is a part of triplet */
1276 xfrm_state_insert(xc);
1278 if ((err = xfrm_state_add(xc)) < 0)
1287 EXPORT_SYMBOL(xfrm_state_migrate);
1290 int xfrm_state_update(struct xfrm_state *x)
1292 struct xfrm_state *x1, *to_put;
1294 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1298 spin_lock_bh(&xfrm_state_lock);
1299 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1305 if (xfrm_state_kern(x1)) {
1311 if (x1->km.state == XFRM_STATE_ACQ) {
1312 __xfrm_state_insert(x);
1318 spin_unlock_bh(&xfrm_state_lock);
1321 xfrm_state_put(to_put);
1327 xfrm_state_delete(x1);
1333 spin_lock_bh(&x1->lock);
1334 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1335 if (x->encap && x1->encap)
1336 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1337 if (x->coaddr && x1->coaddr) {
1338 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1340 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1341 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1342 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1345 mod_timer(&x1->timer, jiffies + HZ);
1346 if (x1->curlft.use_time)
1347 xfrm_state_check_expire(x1);
1351 spin_unlock_bh(&x1->lock);
1357 EXPORT_SYMBOL(xfrm_state_update);
1359 int xfrm_state_check_expire(struct xfrm_state *x)
1361 if (!x->curlft.use_time)
1362 x->curlft.use_time = get_seconds();
1364 if (x->km.state != XFRM_STATE_VALID)
1367 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1368 x->curlft.packets >= x->lft.hard_packet_limit) {
1369 x->km.state = XFRM_STATE_EXPIRED;
1370 mod_timer(&x->timer, jiffies);
1375 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1376 x->curlft.packets >= x->lft.soft_packet_limit)) {
1378 km_state_expired(x, 0, 0);
1382 EXPORT_SYMBOL(xfrm_state_check_expire);
1385 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1386 unsigned short family)
1388 struct xfrm_state *x;
1390 spin_lock_bh(&xfrm_state_lock);
1391 x = __xfrm_state_lookup(daddr, spi, proto, family);
1392 spin_unlock_bh(&xfrm_state_lock);
1395 EXPORT_SYMBOL(xfrm_state_lookup);
1398 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1399 u8 proto, unsigned short family)
1401 struct xfrm_state *x;
1403 spin_lock_bh(&xfrm_state_lock);
1404 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1405 spin_unlock_bh(&xfrm_state_lock);
1408 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1411 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1412 xfrm_address_t *daddr, xfrm_address_t *saddr,
1413 int create, unsigned short family)
1415 struct xfrm_state *x;
1417 spin_lock_bh(&xfrm_state_lock);
1418 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1419 spin_unlock_bh(&xfrm_state_lock);
1423 EXPORT_SYMBOL(xfrm_find_acq);
1425 #ifdef CONFIG_XFRM_SUB_POLICY
1427 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1428 unsigned short family)
1431 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1433 return -EAFNOSUPPORT;
1435 spin_lock_bh(&xfrm_state_lock);
1436 if (afinfo->tmpl_sort)
1437 err = afinfo->tmpl_sort(dst, src, n);
1438 spin_unlock_bh(&xfrm_state_lock);
1439 xfrm_state_put_afinfo(afinfo);
1442 EXPORT_SYMBOL(xfrm_tmpl_sort);
1445 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1446 unsigned short family)
1449 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1451 return -EAFNOSUPPORT;
1453 spin_lock_bh(&xfrm_state_lock);
1454 if (afinfo->state_sort)
1455 err = afinfo->state_sort(dst, src, n);
1456 spin_unlock_bh(&xfrm_state_lock);
1457 xfrm_state_put_afinfo(afinfo);
1460 EXPORT_SYMBOL(xfrm_state_sort);
1463 /* Silly enough, but I'm lazy to build resolution list */
1465 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1469 for (i = 0; i <= xfrm_state_hmask; i++) {
1470 struct hlist_node *entry;
1471 struct xfrm_state *x;
1473 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1474 if (x->km.seq == seq &&
1475 x->km.state == XFRM_STATE_ACQ) {
1484 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1486 struct xfrm_state *x;
1488 spin_lock_bh(&xfrm_state_lock);
1489 x = __xfrm_find_acq_byseq(seq);
1490 spin_unlock_bh(&xfrm_state_lock);
1493 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1495 u32 xfrm_get_acqseq(void)
1499 static DEFINE_SPINLOCK(acqseq_lock);
1501 spin_lock_bh(&acqseq_lock);
1502 res = (++acqseq ? : ++acqseq);
1503 spin_unlock_bh(&acqseq_lock);
1506 EXPORT_SYMBOL(xfrm_get_acqseq);
1508 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1511 struct xfrm_state *x0;
1513 __be32 minspi = htonl(low);
1514 __be32 maxspi = htonl(high);
1516 spin_lock_bh(&x->lock);
1517 if (x->km.state == XFRM_STATE_DEAD)
1526 if (minspi == maxspi) {
1527 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1535 for (h=0; h<high-low+1; h++) {
1536 spi = low + net_random()%(high-low+1);
1537 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1539 x->id.spi = htonl(spi);
1546 spin_lock_bh(&xfrm_state_lock);
1547 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1548 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1549 spin_unlock_bh(&xfrm_state_lock);
1555 spin_unlock_bh(&x->lock);
1559 EXPORT_SYMBOL(xfrm_alloc_spi);
1561 int xfrm_state_walk(struct xfrm_state_walk *walk,
1562 int (*func)(struct xfrm_state *, int, void*),
1565 struct xfrm_state *old, *x, *last = NULL;
1568 if (walk->state == NULL && walk->count != 0)
1571 old = x = walk->state;
1573 spin_lock_bh(&xfrm_state_lock);
1575 x = list_first_entry(&xfrm_state_all, struct xfrm_state, all);
1576 list_for_each_entry_from(x, &xfrm_state_all, all) {
1577 if (x->km.state == XFRM_STATE_DEAD)
1579 if (!xfrm_id_proto_match(x->id.proto, walk->proto))
1582 err = func(last, walk->count, data);
1584 xfrm_state_hold(last);
1586 xfrm_state_walk_ongoing++;
1593 if (walk->count == 0) {
1598 err = func(last, 0, data);
1600 spin_unlock_bh(&xfrm_state_lock);
1602 xfrm_state_put(old);
1603 xfrm_state_walk_completed++;
1604 if (!list_empty(&xfrm_state_gc_leftovers))
1605 schedule_work(&xfrm_state_gc_work);
1609 EXPORT_SYMBOL(xfrm_state_walk);
1611 void xfrm_state_walk_done(struct xfrm_state_walk *walk)
1613 if (walk->state != NULL) {
1614 xfrm_state_put(walk->state);
1616 xfrm_state_walk_completed++;
1617 if (!list_empty(&xfrm_state_gc_leftovers))
1618 schedule_work(&xfrm_state_gc_work);
1621 EXPORT_SYMBOL(xfrm_state_walk_done);
1624 void xfrm_replay_notify(struct xfrm_state *x, int event)
1627 /* we send notify messages in case
1628 * 1. we updated on of the sequence numbers, and the seqno difference
1629 * is at least x->replay_maxdiff, in this case we also update the
1630 * timeout of our timer function
1631 * 2. if x->replay_maxage has elapsed since last update,
1632 * and there were changes
1634 * The state structure must be locked!
1638 case XFRM_REPLAY_UPDATE:
1639 if (x->replay_maxdiff &&
1640 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1641 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1642 if (x->xflags & XFRM_TIME_DEFER)
1643 event = XFRM_REPLAY_TIMEOUT;
1650 case XFRM_REPLAY_TIMEOUT:
1651 if ((x->replay.seq == x->preplay.seq) &&
1652 (x->replay.bitmap == x->preplay.bitmap) &&
1653 (x->replay.oseq == x->preplay.oseq)) {
1654 x->xflags |= XFRM_TIME_DEFER;
1661 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1662 c.event = XFRM_MSG_NEWAE;
1663 c.data.aevent = event;
1664 km_state_notify(x, &c);
1666 if (x->replay_maxage &&
1667 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1668 x->xflags &= ~XFRM_TIME_DEFER;
1671 static void xfrm_replay_timer_handler(unsigned long data)
1673 struct xfrm_state *x = (struct xfrm_state*)data;
1675 spin_lock(&x->lock);
1677 if (x->km.state == XFRM_STATE_VALID) {
1678 if (xfrm_aevent_is_on())
1679 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1681 x->xflags |= XFRM_TIME_DEFER;
1684 spin_unlock(&x->lock);
1687 int xfrm_replay_check(struct xfrm_state *x,
1688 struct sk_buff *skb, __be32 net_seq)
1691 u32 seq = ntohl(net_seq);
1693 if (unlikely(seq == 0))
1696 if (likely(seq > x->replay.seq))
1699 diff = x->replay.seq - seq;
1700 if (diff >= min_t(unsigned int, x->props.replay_window,
1701 sizeof(x->replay.bitmap) * 8)) {
1702 x->stats.replay_window++;
1706 if (x->replay.bitmap & (1U << diff)) {
1713 xfrm_audit_state_replay(x, skb, net_seq);
1717 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1720 u32 seq = ntohl(net_seq);
1722 if (seq > x->replay.seq) {
1723 diff = seq - x->replay.seq;
1724 if (diff < x->props.replay_window)
1725 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1727 x->replay.bitmap = 1;
1728 x->replay.seq = seq;
1730 diff = x->replay.seq - seq;
1731 x->replay.bitmap |= (1U << diff);
1734 if (xfrm_aevent_is_on())
1735 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1738 static LIST_HEAD(xfrm_km_list);
1739 static DEFINE_RWLOCK(xfrm_km_lock);
1741 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1743 struct xfrm_mgr *km;
1745 read_lock(&xfrm_km_lock);
1746 list_for_each_entry(km, &xfrm_km_list, list)
1747 if (km->notify_policy)
1748 km->notify_policy(xp, dir, c);
1749 read_unlock(&xfrm_km_lock);
1752 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1754 struct xfrm_mgr *km;
1755 read_lock(&xfrm_km_lock);
1756 list_for_each_entry(km, &xfrm_km_list, list)
1759 read_unlock(&xfrm_km_lock);
1762 EXPORT_SYMBOL(km_policy_notify);
1763 EXPORT_SYMBOL(km_state_notify);
1765 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1771 c.event = XFRM_MSG_EXPIRE;
1772 km_state_notify(x, &c);
1778 EXPORT_SYMBOL(km_state_expired);
1780 * We send to all registered managers regardless of failure
1781 * We are happy with one success
1783 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1785 int err = -EINVAL, acqret;
1786 struct xfrm_mgr *km;
1788 read_lock(&xfrm_km_lock);
1789 list_for_each_entry(km, &xfrm_km_list, list) {
1790 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1794 read_unlock(&xfrm_km_lock);
1797 EXPORT_SYMBOL(km_query);
1799 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1802 struct xfrm_mgr *km;
1804 read_lock(&xfrm_km_lock);
1805 list_for_each_entry(km, &xfrm_km_list, list) {
1806 if (km->new_mapping)
1807 err = km->new_mapping(x, ipaddr, sport);
1811 read_unlock(&xfrm_km_lock);
1814 EXPORT_SYMBOL(km_new_mapping);
1816 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1822 c.event = XFRM_MSG_POLEXPIRE;
1823 km_policy_notify(pol, dir, &c);
1828 EXPORT_SYMBOL(km_policy_expired);
1830 #ifdef CONFIG_XFRM_MIGRATE
1831 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1832 struct xfrm_migrate *m, int num_migrate)
1836 struct xfrm_mgr *km;
1838 read_lock(&xfrm_km_lock);
1839 list_for_each_entry(km, &xfrm_km_list, list) {
1841 ret = km->migrate(sel, dir, type, m, num_migrate);
1846 read_unlock(&xfrm_km_lock);
1849 EXPORT_SYMBOL(km_migrate);
1852 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1856 struct xfrm_mgr *km;
1858 read_lock(&xfrm_km_lock);
1859 list_for_each_entry(km, &xfrm_km_list, list) {
1861 ret = km->report(proto, sel, addr);
1866 read_unlock(&xfrm_km_lock);
1869 EXPORT_SYMBOL(km_report);
1871 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1875 struct xfrm_mgr *km;
1876 struct xfrm_policy *pol = NULL;
1878 if (optlen <= 0 || optlen > PAGE_SIZE)
1881 data = kmalloc(optlen, GFP_KERNEL);
1886 if (copy_from_user(data, optval, optlen))
1890 read_lock(&xfrm_km_lock);
1891 list_for_each_entry(km, &xfrm_km_list, list) {
1892 pol = km->compile_policy(sk, optname, data,
1897 read_unlock(&xfrm_km_lock);
1900 xfrm_sk_policy_insert(sk, err, pol);
1909 EXPORT_SYMBOL(xfrm_user_policy);
1911 int xfrm_register_km(struct xfrm_mgr *km)
1913 write_lock_bh(&xfrm_km_lock);
1914 list_add_tail(&km->list, &xfrm_km_list);
1915 write_unlock_bh(&xfrm_km_lock);
1918 EXPORT_SYMBOL(xfrm_register_km);
1920 int xfrm_unregister_km(struct xfrm_mgr *km)
1922 write_lock_bh(&xfrm_km_lock);
1923 list_del(&km->list);
1924 write_unlock_bh(&xfrm_km_lock);
1927 EXPORT_SYMBOL(xfrm_unregister_km);
1929 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1932 if (unlikely(afinfo == NULL))
1934 if (unlikely(afinfo->family >= NPROTO))
1935 return -EAFNOSUPPORT;
1936 write_lock_bh(&xfrm_state_afinfo_lock);
1937 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1940 xfrm_state_afinfo[afinfo->family] = afinfo;
1941 write_unlock_bh(&xfrm_state_afinfo_lock);
1944 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1946 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1949 if (unlikely(afinfo == NULL))
1951 if (unlikely(afinfo->family >= NPROTO))
1952 return -EAFNOSUPPORT;
1953 write_lock_bh(&xfrm_state_afinfo_lock);
1954 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1955 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1958 xfrm_state_afinfo[afinfo->family] = NULL;
1960 write_unlock_bh(&xfrm_state_afinfo_lock);
1963 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1965 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1967 struct xfrm_state_afinfo *afinfo;
1968 if (unlikely(family >= NPROTO))
1970 read_lock(&xfrm_state_afinfo_lock);
1971 afinfo = xfrm_state_afinfo[family];
1972 if (unlikely(!afinfo))
1973 read_unlock(&xfrm_state_afinfo_lock);
1977 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1978 __releases(xfrm_state_afinfo_lock)
1980 read_unlock(&xfrm_state_afinfo_lock);
1983 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1984 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1987 struct xfrm_state *t = x->tunnel;
1989 if (atomic_read(&t->tunnel_users) == 2)
1990 xfrm_state_delete(t);
1991 atomic_dec(&t->tunnel_users);
1996 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1998 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
2002 spin_lock_bh(&x->lock);
2003 if (x->km.state == XFRM_STATE_VALID &&
2004 x->type && x->type->get_mtu)
2005 res = x->type->get_mtu(x, mtu);
2007 res = mtu - x->props.header_len;
2008 spin_unlock_bh(&x->lock);
2012 int xfrm_init_state(struct xfrm_state *x)
2014 struct xfrm_state_afinfo *afinfo;
2015 struct xfrm_mode *inner_mode;
2016 int family = x->props.family;
2019 err = -EAFNOSUPPORT;
2020 afinfo = xfrm_state_get_afinfo(family);
2025 if (afinfo->init_flags)
2026 err = afinfo->init_flags(x);
2028 xfrm_state_put_afinfo(afinfo);
2033 err = -EPROTONOSUPPORT;
2035 if (x->sel.family != AF_UNSPEC) {
2036 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
2037 if (inner_mode == NULL)
2040 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
2041 family != x->sel.family) {
2042 xfrm_put_mode(inner_mode);
2046 x->inner_mode = inner_mode;
2048 struct xfrm_mode *inner_mode_iaf;
2050 inner_mode = xfrm_get_mode(x->props.mode, AF_INET);
2051 if (inner_mode == NULL)
2054 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
2055 xfrm_put_mode(inner_mode);
2059 inner_mode_iaf = xfrm_get_mode(x->props.mode, AF_INET6);
2060 if (inner_mode_iaf == NULL)
2063 if (!(inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)) {
2064 xfrm_put_mode(inner_mode_iaf);
2068 if (x->props.family == AF_INET) {
2069 x->inner_mode = inner_mode;
2070 x->inner_mode_iaf = inner_mode_iaf;
2072 x->inner_mode = inner_mode_iaf;
2073 x->inner_mode_iaf = inner_mode;
2077 x->type = xfrm_get_type(x->id.proto, family);
2078 if (x->type == NULL)
2081 err = x->type->init_state(x);
2085 x->outer_mode = xfrm_get_mode(x->props.mode, family);
2086 if (x->outer_mode == NULL)
2089 x->km.state = XFRM_STATE_VALID;
2095 EXPORT_SYMBOL(xfrm_init_state);
2097 void __init xfrm_state_init(void)
2101 sz = sizeof(struct hlist_head) * 8;
2103 xfrm_state_bydst = xfrm_hash_alloc(sz);
2104 xfrm_state_bysrc = xfrm_hash_alloc(sz);
2105 xfrm_state_byspi = xfrm_hash_alloc(sz);
2106 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
2107 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
2108 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
2110 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
2113 #ifdef CONFIG_AUDITSYSCALL
2114 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2115 struct audit_buffer *audit_buf)
2117 struct xfrm_sec_ctx *ctx = x->security;
2118 u32 spi = ntohl(x->id.spi);
2121 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2122 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2124 switch(x->props.family) {
2126 audit_log_format(audit_buf,
2127 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT,
2128 NIPQUAD(x->props.saddr.a4),
2129 NIPQUAD(x->id.daddr.a4));
2132 audit_log_format(audit_buf,
2133 " src=" NIP6_FMT " dst=" NIP6_FMT,
2134 NIP6(*(struct in6_addr *)x->props.saddr.a6),
2135 NIP6(*(struct in6_addr *)x->id.daddr.a6));
2139 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2142 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2143 struct audit_buffer *audit_buf)
2146 struct ipv6hdr *iph6;
2151 audit_log_format(audit_buf,
2152 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT,
2153 NIPQUAD(iph4->saddr),
2154 NIPQUAD(iph4->daddr));
2157 iph6 = ipv6_hdr(skb);
2158 audit_log_format(audit_buf,
2159 " src=" NIP6_FMT " dst=" NIP6_FMT
2160 " flowlbl=0x%x%02x%02x",
2163 iph6->flow_lbl[0] & 0x0f,
2170 void xfrm_audit_state_add(struct xfrm_state *x, int result,
2171 uid_t auid, u32 sessionid, u32 secid)
2173 struct audit_buffer *audit_buf;
2175 audit_buf = xfrm_audit_start("SAD-add");
2176 if (audit_buf == NULL)
2178 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2179 xfrm_audit_helper_sainfo(x, audit_buf);
2180 audit_log_format(audit_buf, " res=%u", result);
2181 audit_log_end(audit_buf);
2183 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2185 void xfrm_audit_state_delete(struct xfrm_state *x, int result,
2186 uid_t auid, u32 sessionid, u32 secid)
2188 struct audit_buffer *audit_buf;
2190 audit_buf = xfrm_audit_start("SAD-delete");
2191 if (audit_buf == NULL)
2193 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2194 xfrm_audit_helper_sainfo(x, audit_buf);
2195 audit_log_format(audit_buf, " res=%u", result);
2196 audit_log_end(audit_buf);
2198 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2200 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2201 struct sk_buff *skb)
2203 struct audit_buffer *audit_buf;
2206 audit_buf = xfrm_audit_start("SA-replay-overflow");
2207 if (audit_buf == NULL)
2209 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2210 /* don't record the sequence number because it's inherent in this kind
2211 * of audit message */
2212 spi = ntohl(x->id.spi);
2213 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2214 audit_log_end(audit_buf);
2216 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2218 static void xfrm_audit_state_replay(struct xfrm_state *x,
2219 struct sk_buff *skb, __be32 net_seq)
2221 struct audit_buffer *audit_buf;
2224 audit_buf = xfrm_audit_start("SA-replayed-pkt");
2225 if (audit_buf == NULL)
2227 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2228 spi = ntohl(x->id.spi);
2229 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2230 spi, spi, ntohl(net_seq));
2231 audit_log_end(audit_buf);
2234 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2236 struct audit_buffer *audit_buf;
2238 audit_buf = xfrm_audit_start("SA-notfound");
2239 if (audit_buf == NULL)
2241 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2242 audit_log_end(audit_buf);
2244 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2246 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2247 __be32 net_spi, __be32 net_seq)
2249 struct audit_buffer *audit_buf;
2252 audit_buf = xfrm_audit_start("SA-notfound");
2253 if (audit_buf == NULL)
2255 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2256 spi = ntohl(net_spi);
2257 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2258 spi, spi, ntohl(net_seq));
2259 audit_log_end(audit_buf);
2261 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2263 void xfrm_audit_state_icvfail(struct xfrm_state *x,
2264 struct sk_buff *skb, u8 proto)
2266 struct audit_buffer *audit_buf;
2270 audit_buf = xfrm_audit_start("SA-icv-failure");
2271 if (audit_buf == NULL)
2273 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2274 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2275 u32 spi = ntohl(net_spi);
2276 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2277 spi, spi, ntohl(net_seq));
2279 audit_log_end(audit_buf);
2281 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
2282 #endif /* CONFIG_AUDITSYSCALL */