4 #include <linux/compiler.h>
6 #include <linux/xfrm.h>
7 #include <linux/spinlock.h>
8 #include <linux/list.h>
9 #include <linux/skbuff.h>
10 #include <linux/socket.h>
11 #include <linux/pfkeyv2.h>
12 #include <linux/ipsec.h>
13 #include <linux/in6.h>
14 #include <linux/mutex.h>
18 #include <net/route.h>
20 #include <net/ip6_fib.h>
22 #define XFRM_ALIGN8(len) (((len) + 7) & ~7)
23 #define MODULE_ALIAS_XFRM_MODE(family, encap) \
24 MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
26 extern struct sock *xfrm_nl;
27 extern u32 sysctl_xfrm_aevent_etime;
28 extern u32 sysctl_xfrm_aevent_rseqth;
30 extern struct mutex xfrm_cfg_mutex;
32 /* Organization of SPD aka "XFRM rules"
33 ------------------------------------
36 - policy rule, struct xfrm_policy (=SPD entry)
37 - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
38 - instance of a transformer, struct xfrm_state (=SA)
39 - template to clone xfrm_state, struct xfrm_tmpl
41 SPD is plain linear list of xfrm_policy rules, ordered by priority.
42 (To be compatible with existing pfkeyv2 implementations,
43 many rules with priority of 0x7fffffff are allowed to exist and
44 such rules are ordered in an unpredictable way, thanks to bsd folks.)
46 Lookup is plain linear search until the first match with selector.
48 If "action" is "block", then we prohibit the flow, otherwise:
49 if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
50 policy entry has list of up to XFRM_MAX_DEPTH transformations,
51 described by templates xfrm_tmpl. Each template is resolved
52 to a complete xfrm_state (see below) and we pack bundle of transformations
53 to a dst_entry returned to requestor.
55 dst -. xfrm .-> xfrm_state #1
56 |---. child .-> dst -. xfrm .-> xfrm_state #2
57 |---. child .-> dst -. xfrm .-> xfrm_state #3
60 Bundles are cached at xrfm_policy struct (field ->bundles).
63 Resolution of xrfm_tmpl
64 -----------------------
66 1. ->mode Mode: transport or tunnel
67 2. ->id.proto Protocol: AH/ESP/IPCOMP
68 3. ->id.daddr Remote tunnel endpoint, ignored for transport mode.
69 Q: allow to resolve security gateway?
70 4. ->id.spi If not zero, static SPI.
71 5. ->saddr Local tunnel endpoint, ignored for transport mode.
72 6. ->algos List of allowed algos. Plain bitmask now.
73 Q: ealgos, aalgos, calgos. What a mess...
74 7. ->share Sharing mode.
75 Q: how to implement private sharing mode? To add struct sock* to
78 Having this template we search through SAD searching for entries
79 with appropriate mode/proto/algo, permitted by selector.
80 If no appropriate entry found, it is requested from key manager.
83 Q: How to find all the bundles referring to a physical path for
84 PMTU discovery? Seems, dst should contain list of all parents...
85 and enter to infinite locking hierarchy disaster.
86 No! It is easier, we will not search for them, let them find us.
87 We add genid to each dst plus pointer to genid of raw IP route,
88 pmtu disc will update pmtu on raw IP route and increase its genid.
89 dst_check() will see this for top level and trigger resyncing
90 metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
93 /* Full description of state of transformer. */
96 /* Note: bydst is re-used during gc */
97 struct list_head bydst;
98 struct list_head bysrc;
99 struct list_head byspi;
105 struct xfrm_selector sel;
107 /* Key manger bits */
114 /* Parameters of this state. */
119 u8 aalgo, ealgo, calgo;
122 xfrm_address_t saddr;
127 struct xfrm_lifetime_cfg lft;
129 /* Data for transformer */
130 struct xfrm_algo *aalg;
131 struct xfrm_algo *ealg;
132 struct xfrm_algo *calg;
134 /* Data for encapsulator */
135 struct xfrm_encap_tmpl *encap;
137 /* Data for care-of address */
138 xfrm_address_t *coaddr;
140 /* IPComp needs an IPIP tunnel for handling uncompressed packets */
141 struct xfrm_state *tunnel;
143 /* If a tunnel, number of users + 1 */
144 atomic_t tunnel_users;
146 /* State for replay detection */
147 struct xfrm_replay_state replay;
149 /* Replay detection state at the time we sent the last notification */
150 struct xfrm_replay_state preplay;
152 /* internal flag that only holds state for delayed aevent at the
157 /* Replay detection notification settings */
161 /* Replay detection notification timer */
162 struct timer_list rtimer;
165 struct xfrm_stats stats;
167 struct xfrm_lifetime_cur curlft;
168 struct timer_list timer;
173 /* Reference to data common to all the instances of this
175 struct xfrm_type *type;
176 struct xfrm_mode *mode;
178 /* Security context */
179 struct xfrm_sec_ctx *security;
181 /* Private data of this transformer, format is opaque,
182 * interpreted by xfrm_type methods. */
186 /* xflags - make enum if more show up */
187 #define XFRM_TIME_DEFER 1
198 /* callback structure passed from either netlink or pfkey */
216 struct xfrm_policy_afinfo {
217 unsigned short family;
218 struct xfrm_type *type_map[IPPROTO_MAX];
219 struct xfrm_mode *mode_map[XFRM_MODE_MAX];
220 struct dst_ops *dst_ops;
221 void (*garbage_collect)(void);
222 int (*dst_lookup)(struct xfrm_dst **dst, struct flowi *fl);
223 struct dst_entry *(*find_bundle)(struct flowi *fl, struct xfrm_policy *policy);
224 int (*bundle_create)(struct xfrm_policy *policy,
225 struct xfrm_state **xfrm,
228 struct dst_entry **dst_p);
229 void (*decode_session)(struct sk_buff *skb,
233 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
234 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
235 extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c);
236 extern void km_state_notify(struct xfrm_state *x, struct km_event *c);
237 #define XFRM_ACQ_EXPIRES 30
240 extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
241 extern void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
242 extern int __xfrm_state_delete(struct xfrm_state *x);
244 struct xfrm_state_afinfo {
245 unsigned short family;
246 struct list_head *state_bysrc;
247 struct list_head *state_byspi;
248 int (*init_flags)(struct xfrm_state *x);
249 void (*init_tempsel)(struct xfrm_state *x, struct flowi *fl,
250 struct xfrm_tmpl *tmpl,
251 xfrm_address_t *daddr, xfrm_address_t *saddr);
252 struct xfrm_state *(*state_lookup)(xfrm_address_t *daddr, u32 spi, u8 proto);
253 struct xfrm_state *(*state_lookup_byaddr)(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto);
254 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
255 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
258 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
259 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
261 extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
266 struct module *owner;
269 #define XFRM_TYPE_NON_FRAGMENT 1
271 int (*init_state)(struct xfrm_state *x);
272 void (*destructor)(struct xfrm_state *);
273 int (*input)(struct xfrm_state *, struct sk_buff *skb);
274 int (*output)(struct xfrm_state *, struct sk_buff *pskb);
275 int (*reject)(struct xfrm_state *, struct sk_buff *, struct flowi *);
276 int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **);
277 xfrm_address_t *(*local_addr)(struct xfrm_state *, xfrm_address_t *);
278 xfrm_address_t *(*remote_addr)(struct xfrm_state *, xfrm_address_t *);
279 /* Estimate maximal size of result of transformation of a dgram */
280 u32 (*get_max_size)(struct xfrm_state *, int size);
283 extern int xfrm_register_type(struct xfrm_type *type, unsigned short family);
284 extern int xfrm_unregister_type(struct xfrm_type *type, unsigned short family);
285 extern struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family);
286 extern void xfrm_put_type(struct xfrm_type *type);
289 int (*input)(struct xfrm_state *x, struct sk_buff *skb);
290 int (*output)(struct sk_buff *skb);
292 struct module *owner;
296 extern int xfrm_register_mode(struct xfrm_mode *mode, int family);
297 extern int xfrm_unregister_mode(struct xfrm_mode *mode, int family);
298 extern struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family);
299 extern void xfrm_put_mode(struct xfrm_mode *mode);
303 /* id in template is interpreted as:
304 * daddr - destination of tunnel, may be zero for transport mode.
305 * spi - zero to acquire spi. Not zero if spi is static, then
306 * daddr must be fixed too.
307 * proto - AH/ESP/IPCOMP
311 /* Source address of tunnel. Ignored, if it is not a tunnel. */
312 xfrm_address_t saddr;
316 /* Mode: transport, tunnel etc. */
319 /* Sharing mode: unique, this session only, this user only etc. */
322 /* May skip this transfomration if no SA is found */
325 /* Bit mask of algos allowed for acquisition */
331 #define XFRM_MAX_DEPTH 6
335 struct xfrm_policy *next;
336 struct list_head list;
338 /* This lock only affects elements except for entry. */
341 struct timer_list timer;
346 struct xfrm_selector selector;
347 struct xfrm_lifetime_cfg lft;
348 struct xfrm_lifetime_cur curlft;
349 struct dst_entry *bundles;
355 struct xfrm_sec_ctx *security;
356 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
359 #define XFRM_KM_TIMEOUT 30
361 #define XFRM_REPLAY_SEQ 1
362 #define XFRM_REPLAY_OSEQ 2
363 #define XFRM_REPLAY_SEQ_MASK 3
365 #define XFRM_REPLAY_UPDATE XFRM_AE_CR
366 #define XFRM_REPLAY_TIMEOUT XFRM_AE_CE
368 /* default aevent timeout in units of 100ms */
369 #define XFRM_AE_ETIME 10
370 /* Async Event timer multiplier */
371 #define XFRM_AE_ETH_M 10
372 /* default seq threshold size */
373 #define XFRM_AE_SEQT_SIZE 2
377 struct list_head list;
379 int (*notify)(struct xfrm_state *x, struct km_event *c);
380 int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
381 struct xfrm_policy *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
382 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport);
383 int (*notify_policy)(struct xfrm_policy *x, int dir, struct km_event *c);
384 int (*report)(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
387 extern int xfrm_register_km(struct xfrm_mgr *km);
388 extern int xfrm_unregister_km(struct xfrm_mgr *km);
391 extern struct xfrm_policy *xfrm_policy_list[XFRM_POLICY_MAX*2];
392 #ifdef CONFIG_XFRM_SUB_POLICY
393 extern struct xfrm_policy *xfrm_policy_list_sub[XFRM_POLICY_MAX*2];
395 static inline int xfrm_policy_lists_empty(int dir)
397 return (!xfrm_policy_list[dir] && !xfrm_policy_list_sub[dir]);
400 static inline int xfrm_policy_lists_empty(int dir)
402 return (!xfrm_policy_list[dir]);
406 static inline void xfrm_pol_hold(struct xfrm_policy *policy)
408 if (likely(policy != NULL))
409 atomic_inc(&policy->refcnt);
412 extern void __xfrm_policy_destroy(struct xfrm_policy *policy);
414 static inline void xfrm_pol_put(struct xfrm_policy *policy)
416 if (atomic_dec_and_test(&policy->refcnt))
417 __xfrm_policy_destroy(policy);
420 #ifdef CONFIG_XFRM_SUB_POLICY
421 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
424 for (i = npols - 1; i >= 0; --i)
425 xfrm_pol_put(pols[i]);
428 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
430 xfrm_pol_put(pols[0]);
434 #define XFRM_DST_HSIZE 1024
437 unsigned __xfrm4_dst_hash(xfrm_address_t *addr)
441 h = (h ^ (h>>16)) % XFRM_DST_HSIZE;
446 unsigned __xfrm6_dst_hash(xfrm_address_t *addr)
449 h = ntohl(addr->a6[2]^addr->a6[3]);
450 h = (h ^ (h>>16)) % XFRM_DST_HSIZE;
455 unsigned __xfrm4_src_hash(xfrm_address_t *addr)
457 return __xfrm4_dst_hash(addr);
461 unsigned __xfrm6_src_hash(xfrm_address_t *addr)
463 return __xfrm6_dst_hash(addr);
467 unsigned xfrm_src_hash(xfrm_address_t *addr, unsigned short family)
471 return __xfrm4_src_hash(addr);
473 return __xfrm6_src_hash(addr);
479 unsigned __xfrm4_spi_hash(xfrm_address_t *addr, u32 spi, u8 proto)
482 h = ntohl(addr->a4^spi^proto);
483 h = (h ^ (h>>10) ^ (h>>20)) % XFRM_DST_HSIZE;
488 unsigned __xfrm6_spi_hash(xfrm_address_t *addr, u32 spi, u8 proto)
491 h = ntohl(addr->a6[2]^addr->a6[3]^spi^proto);
492 h = (h ^ (h>>10) ^ (h>>20)) % XFRM_DST_HSIZE;
497 unsigned xfrm_spi_hash(xfrm_address_t *addr, u32 spi, u8 proto, unsigned short family)
501 return __xfrm4_spi_hash(addr, spi, proto);
503 return __xfrm6_spi_hash(addr, spi, proto);
508 extern void __xfrm_state_destroy(struct xfrm_state *);
510 static inline void __xfrm_state_put(struct xfrm_state *x)
512 atomic_dec(&x->refcnt);
515 static inline void xfrm_state_put(struct xfrm_state *x)
517 if (atomic_dec_and_test(&x->refcnt))
518 __xfrm_state_destroy(x);
521 static inline void xfrm_state_hold(struct xfrm_state *x)
523 atomic_inc(&x->refcnt);
526 static __inline__ int addr_match(void *token1, void *token2, int prefixlen)
533 pdw = prefixlen >> 5; /* num of whole __u32 in prefix */
534 pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */
537 if (memcmp(a1, a2, pdw << 2))
543 mask = htonl((0xffffffff) << (32 - pbi));
545 if ((a1[pdw] ^ a2[pdw]) & mask)
553 u16 xfrm_flowi_sport(struct flowi *fl)
560 port = fl->fl_ip_sport;
564 port = htons(fl->fl_icmp_type);
566 #ifdef CONFIG_IPV6_MIP6
568 port = htons(fl->fl_mh_type);
578 u16 xfrm_flowi_dport(struct flowi *fl)
585 port = fl->fl_ip_dport;
589 port = htons(fl->fl_icmp_code);
598 __xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
600 return addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
601 addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
602 !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
603 !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
604 (fl->proto == sel->proto || !sel->proto) &&
605 (fl->oif == sel->ifindex || !sel->ifindex);
609 __xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
611 return addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
612 addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
613 !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
614 !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
615 (fl->proto == sel->proto || !sel->proto) &&
616 (fl->oif == sel->ifindex || !sel->ifindex);
620 xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
621 unsigned short family)
625 return __xfrm4_selector_match(sel, fl);
627 return __xfrm6_selector_match(sel, fl);
632 #ifdef CONFIG_SECURITY_NETWORK_XFRM
633 /* If neither has a context --> match
634 * Otherwise, both must have a context and the sids, doi, alg must match
636 static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
638 return ((!s1 && !s2) ||
640 (s1->ctx_sid == s2->ctx_sid) &&
641 (s1->ctx_doi == s2->ctx_doi) &&
642 (s1->ctx_alg == s2->ctx_alg)));
645 static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
651 /* A struct encoding bundle of transformations to apply to some set of flow.
653 * dst->child points to the next element of bundle.
654 * dst->xfrm points to an instanse of transformer.
656 * Due to unfortunate limitations of current routing cache, which we
657 * have no time to fix, it mirrors struct rtable and bound to the same
658 * routing key, including saddr,daddr. However, we can have many of
659 * bundles differing by session id. All the bundles grow from a parent
665 struct xfrm_dst *next;
666 struct dst_entry dst;
670 struct dst_entry *route;
671 u32 route_mtu_cached;
672 u32 child_mtu_cached;
677 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
679 dst_release(xdst->route);
680 if (likely(xdst->u.dst.xfrm))
681 xfrm_state_put(xdst->u.dst.xfrm);
684 extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
690 struct xfrm_state *xvec[XFRM_MAX_DEPTH];
693 static inline struct sec_path *
694 secpath_get(struct sec_path *sp)
697 atomic_inc(&sp->refcnt);
701 extern void __secpath_destroy(struct sec_path *sp);
704 secpath_put(struct sec_path *sp)
706 if (sp && atomic_dec_and_test(&sp->refcnt))
707 __secpath_destroy(sp);
710 extern struct sec_path *secpath_dup(struct sec_path *src);
713 secpath_reset(struct sk_buff *skb)
716 secpath_put(skb->sp);
722 __xfrm4_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
724 return (tmpl->saddr.a4 &&
725 tmpl->saddr.a4 != x->props.saddr.a4);
729 __xfrm6_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
731 return (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
732 ipv6_addr_cmp((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
736 xfrm_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x, unsigned short family)
740 return __xfrm4_state_addr_cmp(tmpl, x);
742 return __xfrm6_state_addr_cmp(tmpl, x);
749 extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsigned short family);
751 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
753 if (sk && sk->sk_policy[XFRM_POLICY_IN])
754 return __xfrm_policy_check(sk, dir, skb, family);
756 return (xfrm_policy_lists_empty(dir) && !skb->sp) ||
757 (skb->dst->flags & DST_NOPOLICY) ||
758 __xfrm_policy_check(sk, dir, skb, family);
761 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
763 return xfrm_policy_check(sk, dir, skb, AF_INET);
766 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
768 return xfrm_policy_check(sk, dir, skb, AF_INET6);
771 extern int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family);
772 extern int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
774 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
776 return xfrm_policy_lists_empty(XFRM_POLICY_OUT) ||
777 (skb->dst->flags & DST_NOXFRM) ||
778 __xfrm_route_forward(skb, family);
781 static inline int xfrm4_route_forward(struct sk_buff *skb)
783 return xfrm_route_forward(skb, AF_INET);
786 static inline int xfrm6_route_forward(struct sk_buff *skb)
788 return xfrm_route_forward(skb, AF_INET6);
791 extern int __xfrm_sk_clone_policy(struct sock *sk);
793 static inline int xfrm_sk_clone_policy(struct sock *sk)
795 if (unlikely(sk->sk_policy[0] || sk->sk_policy[1]))
796 return __xfrm_sk_clone_policy(sk);
800 extern int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
802 static inline void xfrm_sk_free_policy(struct sock *sk)
804 if (unlikely(sk->sk_policy[0] != NULL)) {
805 xfrm_policy_delete(sk->sk_policy[0], XFRM_POLICY_MAX);
806 sk->sk_policy[0] = NULL;
808 if (unlikely(sk->sk_policy[1] != NULL)) {
809 xfrm_policy_delete(sk->sk_policy[1], XFRM_POLICY_MAX+1);
810 sk->sk_policy[1] = NULL;
816 static inline void xfrm_sk_free_policy(struct sock *sk) {}
817 static inline int xfrm_sk_clone_policy(struct sock *sk) { return 0; }
818 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
819 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
820 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
824 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
828 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
835 xfrm_address_t *xfrm_flowi_daddr(struct flowi *fl, unsigned short family)
839 return (xfrm_address_t *)&fl->fl4_dst;
841 return (xfrm_address_t *)&fl->fl6_dst;
847 xfrm_address_t *xfrm_flowi_saddr(struct flowi *fl, unsigned short family)
851 return (xfrm_address_t *)&fl->fl4_src;
853 return (xfrm_address_t *)&fl->fl6_src;
858 static __inline__ int
859 __xfrm4_state_addr_check(struct xfrm_state *x,
860 xfrm_address_t *daddr, xfrm_address_t *saddr)
862 if (daddr->a4 == x->id.daddr.a4 &&
863 (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
868 static __inline__ int
869 __xfrm6_state_addr_check(struct xfrm_state *x,
870 xfrm_address_t *daddr, xfrm_address_t *saddr)
872 if (!ipv6_addr_cmp((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
873 (!ipv6_addr_cmp((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr)||
874 ipv6_addr_any((struct in6_addr *)saddr) ||
875 ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
880 static __inline__ int
881 xfrm_state_addr_check(struct xfrm_state *x,
882 xfrm_address_t *daddr, xfrm_address_t *saddr,
883 unsigned short family)
887 return __xfrm4_state_addr_check(x, daddr, saddr);
889 return __xfrm6_state_addr_check(x, daddr, saddr);
894 static __inline__ int
895 xfrm_state_addr_flow_check(struct xfrm_state *x, struct flowi *fl,
896 unsigned short family)
900 return __xfrm4_state_addr_check(x,
901 (xfrm_address_t *)&fl->fl4_dst,
902 (xfrm_address_t *)&fl->fl4_src);
904 return __xfrm6_state_addr_check(x,
905 (xfrm_address_t *)&fl->fl6_dst,
906 (xfrm_address_t *)&fl->fl6_src);
911 static inline int xfrm_state_kern(struct xfrm_state *x)
913 return atomic_read(&x->tunnel_users);
916 static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
918 return (!userproto || proto == userproto ||
919 (userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH ||
920 proto == IPPROTO_ESP ||
921 proto == IPPROTO_COMP)));
925 * xfrm algorithm information
927 struct xfrm_algo_auth_info {
932 struct xfrm_algo_encr_info {
937 struct xfrm_algo_comp_info {
941 struct xfrm_algo_desc {
946 struct xfrm_algo_auth_info auth;
947 struct xfrm_algo_encr_info encr;
948 struct xfrm_algo_comp_info comp;
950 struct sadb_alg desc;
953 /* XFRM tunnel handlers. */
955 int (*handler)(struct sk_buff *skb);
956 int (*err_handler)(struct sk_buff *skb, __u32 info);
958 struct xfrm_tunnel *next;
962 struct xfrm6_tunnel {
963 int (*handler)(struct sk_buff *skb);
964 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
965 int type, int code, int offset, __u32 info);
967 struct xfrm6_tunnel *next;
971 extern void xfrm_init(void);
972 extern void xfrm4_init(void);
973 extern void xfrm6_init(void);
974 extern void xfrm6_fini(void);
975 extern void xfrm_state_init(void);
976 extern void xfrm4_state_init(void);
977 extern void xfrm6_state_init(void);
978 extern void xfrm6_state_fini(void);
980 extern int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*), void *);
981 extern struct xfrm_state *xfrm_state_alloc(void);
982 extern struct xfrm_state *xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
983 struct flowi *fl, struct xfrm_tmpl *tmpl,
984 struct xfrm_policy *pol, int *err,
985 unsigned short family);
986 extern int xfrm_state_check_expire(struct xfrm_state *x);
987 extern void xfrm_state_insert(struct xfrm_state *x);
988 extern int xfrm_state_add(struct xfrm_state *x);
989 extern int xfrm_state_update(struct xfrm_state *x);
990 extern struct xfrm_state *xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto, unsigned short family);
991 extern struct xfrm_state *xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family);
992 #ifdef CONFIG_XFRM_SUB_POLICY
993 extern int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
994 int n, unsigned short family);
995 extern int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
996 int n, unsigned short family);
998 static inline int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
999 int n, unsigned short family)
1004 static inline int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
1005 int n, unsigned short family)
1010 extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq);
1011 extern int xfrm_state_delete(struct xfrm_state *x);
1012 extern void xfrm_state_flush(u8 proto);
1013 extern int xfrm_replay_check(struct xfrm_state *x, u32 seq);
1014 extern void xfrm_replay_advance(struct xfrm_state *x, u32 seq);
1015 extern void xfrm_replay_notify(struct xfrm_state *x, int event);
1016 extern int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb);
1017 extern int xfrm_state_mtu(struct xfrm_state *x, int mtu);
1018 extern int xfrm_init_state(struct xfrm_state *x);
1019 extern int xfrm4_rcv(struct sk_buff *skb);
1020 extern int xfrm4_output(struct sk_buff *skb);
1021 extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler);
1022 extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler);
1023 extern int xfrm6_rcv_spi(struct sk_buff *skb, u32 spi);
1024 extern int xfrm6_rcv(struct sk_buff **pskb);
1025 extern int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
1026 xfrm_address_t *saddr, u8 proto);
1027 extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler);
1028 extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler);
1029 extern u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr);
1030 extern void xfrm6_tunnel_free_spi(xfrm_address_t *saddr);
1031 extern u32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr);
1032 extern int xfrm6_output(struct sk_buff *skb);
1033 extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
1037 extern int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type);
1038 extern int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen);
1039 extern int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, unsigned short family);
1041 static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1043 return -ENOPROTOOPT;
1046 static inline int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
1048 /* should not happen */
1052 static inline int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, unsigned short family)
1058 struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp);
1059 extern int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*), void *);
1060 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
1061 struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir,
1062 struct xfrm_selector *sel,
1063 struct xfrm_sec_ctx *ctx, int delete);
1064 struct xfrm_policy *xfrm_policy_byid(u8, int dir, u32 id, int delete);
1065 void xfrm_policy_flush(u8 type);
1066 u32 xfrm_get_acqseq(void);
1067 void xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
1068 struct xfrm_state * xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1069 xfrm_address_t *daddr, xfrm_address_t *saddr,
1070 int create, unsigned short family);
1071 extern void xfrm_policy_flush(u8 type);
1072 extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
1073 extern int xfrm_flush_bundles(void);
1074 extern void xfrm_flush_all_bundles(void);
1075 extern int xfrm_bundle_ok(struct xfrm_dst *xdst, struct flowi *fl, int family, int strict);
1076 extern void xfrm_init_pmtu(struct dst_entry *dst);
1078 extern wait_queue_head_t km_waitq;
1079 extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport);
1080 extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid);
1081 extern int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
1083 extern void xfrm_input_init(void);
1084 extern int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, u32 *spi, u32 *seq);
1086 extern void xfrm_probe_algs(void);
1087 extern int xfrm_count_auth_supported(void);
1088 extern int xfrm_count_enc_supported(void);
1089 extern struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
1090 extern struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
1091 extern struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
1092 extern struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
1093 extern struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
1094 extern struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe);
1095 extern struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe);
1096 extern struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe);
1100 typedef int (icv_update_fn_t)(struct hash_desc *, struct scatterlist *,
1103 extern int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *tfm,
1104 int offset, int len, icv_update_fn_t icv_update);
1106 static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b,
1112 return a->a4 - b->a4;
1114 return ipv6_addr_cmp((struct in6_addr *)a,
1115 (struct in6_addr *)b);
1119 static inline int xfrm_policy_id2dir(u32 index)
1124 static inline int xfrm_aevent_is_on(void)
1130 nlsk = rcu_dereference(xfrm_nl);
1132 ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS);
1137 static inline void xfrm_aevent_doreplay(struct xfrm_state *x)
1139 if (xfrm_aevent_is_on())
1140 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1144 #endif /* _NET_XFRM_H */