1 /* xfrm_user.c: User interface to configure xfrm engine.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
13 #include <linux/crypto.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/socket.h>
19 #include <linux/string.h>
20 #include <linux/net.h>
21 #include <linux/skbuff.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/pfkeyv2.h>
24 #include <linux/ipsec.h>
25 #include <linux/init.h>
26 #include <linux/security.h>
29 #include <net/netlink.h>
30 #include <asm/uaccess.h>
32 static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type)
34 struct rtattr *rt = xfrma[type - 1];
35 struct xfrm_algo *algp;
41 len = (rt->rta_len - sizeof(*rt)) - sizeof(*algp);
47 len -= (algp->alg_key_len + 7U) / 8;
53 if (!algp->alg_key_len &&
54 strcmp(algp->alg_name, "digest_null") != 0)
59 if (!algp->alg_key_len &&
60 strcmp(algp->alg_name, "cipher_null") != 0)
65 /* Zero length keys are legal. */
72 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
76 static int verify_encap_tmpl(struct rtattr **xfrma)
78 struct rtattr *rt = xfrma[XFRMA_ENCAP - 1];
79 struct xfrm_encap_tmpl *encap;
84 if ((rt->rta_len - sizeof(*rt)) < sizeof(*encap))
90 static int verify_one_addr(struct rtattr **xfrma, enum xfrm_attr_type_t type,
91 xfrm_address_t **addrp)
93 struct rtattr *rt = xfrma[type - 1];
98 if ((rt->rta_len - sizeof(*rt)) < sizeof(**addrp))
102 *addrp = RTA_DATA(rt);
107 static inline int verify_sec_ctx_len(struct rtattr **xfrma)
109 struct rtattr *rt = xfrma[XFRMA_SEC_CTX - 1];
110 struct xfrm_user_sec_ctx *uctx;
116 if (rt->rta_len < sizeof(*uctx))
121 len += sizeof(struct xfrm_user_sec_ctx);
122 len += uctx->ctx_len;
124 if (uctx->len != len)
131 static int verify_newsa_info(struct xfrm_usersa_info *p,
132 struct rtattr **xfrma)
142 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
154 switch (p->id.proto) {
156 if (!xfrma[XFRMA_ALG_AUTH-1] ||
157 xfrma[XFRMA_ALG_CRYPT-1] ||
158 xfrma[XFRMA_ALG_COMP-1])
163 if ((!xfrma[XFRMA_ALG_AUTH-1] &&
164 !xfrma[XFRMA_ALG_CRYPT-1]) ||
165 xfrma[XFRMA_ALG_COMP-1])
170 if (!xfrma[XFRMA_ALG_COMP-1] ||
171 xfrma[XFRMA_ALG_AUTH-1] ||
172 xfrma[XFRMA_ALG_CRYPT-1])
180 if ((err = verify_one_alg(xfrma, XFRMA_ALG_AUTH)))
182 if ((err = verify_one_alg(xfrma, XFRMA_ALG_CRYPT)))
184 if ((err = verify_one_alg(xfrma, XFRMA_ALG_COMP)))
186 if ((err = verify_encap_tmpl(xfrma)))
188 if ((err = verify_sec_ctx_len(xfrma)))
190 if ((err = verify_one_addr(xfrma, XFRMA_COADDR, NULL)))
195 case XFRM_MODE_TRANSPORT:
196 case XFRM_MODE_TUNNEL:
197 case XFRM_MODE_ROUTEOPTIMIZATION:
210 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
211 struct xfrm_algo_desc *(*get_byname)(char *, int),
212 struct rtattr *u_arg)
214 struct rtattr *rta = u_arg;
215 struct xfrm_algo *p, *ualg;
216 struct xfrm_algo_desc *algo;
222 ualg = RTA_DATA(rta);
224 algo = get_byname(ualg->alg_name, 1);
227 *props = algo->desc.sadb_alg_id;
229 len = sizeof(*ualg) + (ualg->alg_key_len + 7U) / 8;
230 p = kmalloc(len, GFP_KERNEL);
234 memcpy(p, ualg, len);
235 strcpy(p->alg_name, algo->name);
240 static int attach_encap_tmpl(struct xfrm_encap_tmpl **encapp, struct rtattr *u_arg)
242 struct rtattr *rta = u_arg;
243 struct xfrm_encap_tmpl *p, *uencap;
248 uencap = RTA_DATA(rta);
249 p = kmalloc(sizeof(*p), GFP_KERNEL);
253 memcpy(p, uencap, sizeof(*p));
259 static inline int xfrm_user_sec_ctx_size(struct xfrm_policy *xp)
261 struct xfrm_sec_ctx *xfrm_ctx = xp->security;
265 len += sizeof(struct xfrm_user_sec_ctx);
266 len += xfrm_ctx->ctx_len;
271 static int attach_sec_ctx(struct xfrm_state *x, struct rtattr *u_arg)
273 struct xfrm_user_sec_ctx *uctx;
278 uctx = RTA_DATA(u_arg);
279 return security_xfrm_state_alloc(x, uctx);
282 static int attach_one_addr(xfrm_address_t **addrpp, struct rtattr *u_arg)
284 struct rtattr *rta = u_arg;
285 xfrm_address_t *p, *uaddrp;
290 uaddrp = RTA_DATA(rta);
291 p = kmalloc(sizeof(*p), GFP_KERNEL);
295 memcpy(p, uaddrp, sizeof(*p));
300 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
302 memcpy(&x->id, &p->id, sizeof(x->id));
303 memcpy(&x->sel, &p->sel, sizeof(x->sel));
304 memcpy(&x->lft, &p->lft, sizeof(x->lft));
305 x->props.mode = p->mode;
306 x->props.replay_window = p->replay_window;
307 x->props.reqid = p->reqid;
308 x->props.family = p->family;
309 x->props.saddr = p->saddr;
310 x->props.flags = p->flags;
314 * someday when pfkey also has support, we could have the code
315 * somehow made shareable and move it to xfrm_state.c - JHS
318 static int xfrm_update_ae_params(struct xfrm_state *x, struct rtattr **xfrma)
321 struct rtattr *rp = xfrma[XFRMA_REPLAY_VAL-1];
322 struct rtattr *lt = xfrma[XFRMA_LTIME_VAL-1];
323 struct rtattr *et = xfrma[XFRMA_ETIMER_THRESH-1];
324 struct rtattr *rt = xfrma[XFRMA_REPLAY_THRESH-1];
327 struct xfrm_replay_state *replay;
328 if (RTA_PAYLOAD(rp) < sizeof(*replay))
330 replay = RTA_DATA(rp);
331 memcpy(&x->replay, replay, sizeof(*replay));
332 memcpy(&x->preplay, replay, sizeof(*replay));
336 struct xfrm_lifetime_cur *ltime;
337 if (RTA_PAYLOAD(lt) < sizeof(*ltime))
339 ltime = RTA_DATA(lt);
340 x->curlft.bytes = ltime->bytes;
341 x->curlft.packets = ltime->packets;
342 x->curlft.add_time = ltime->add_time;
343 x->curlft.use_time = ltime->use_time;
347 if (RTA_PAYLOAD(et) < sizeof(u32))
349 x->replay_maxage = *(u32*)RTA_DATA(et);
353 if (RTA_PAYLOAD(rt) < sizeof(u32))
355 x->replay_maxdiff = *(u32*)RTA_DATA(rt);
363 static struct xfrm_state *xfrm_state_construct(struct xfrm_usersa_info *p,
364 struct rtattr **xfrma,
367 struct xfrm_state *x = xfrm_state_alloc();
373 copy_from_user_state(x, p);
375 if ((err = attach_one_algo(&x->aalg, &x->props.aalgo,
376 xfrm_aalg_get_byname,
377 xfrma[XFRMA_ALG_AUTH-1])))
379 if ((err = attach_one_algo(&x->ealg, &x->props.ealgo,
380 xfrm_ealg_get_byname,
381 xfrma[XFRMA_ALG_CRYPT-1])))
383 if ((err = attach_one_algo(&x->calg, &x->props.calgo,
384 xfrm_calg_get_byname,
385 xfrma[XFRMA_ALG_COMP-1])))
387 if ((err = attach_encap_tmpl(&x->encap, xfrma[XFRMA_ENCAP-1])))
389 if ((err = attach_one_addr(&x->coaddr, xfrma[XFRMA_COADDR-1])))
391 err = xfrm_init_state(x);
395 if ((err = attach_sec_ctx(x, xfrma[XFRMA_SEC_CTX-1])))
399 x->replay_maxdiff = sysctl_xfrm_aevent_rseqth;
400 /* sysctl_xfrm_aevent_etime is in 100ms units */
401 x->replay_maxage = (sysctl_xfrm_aevent_etime*HZ)/XFRM_AE_ETH_M;
402 x->preplay.bitmap = 0;
403 x->preplay.seq = x->replay.seq+x->replay_maxdiff;
404 x->preplay.oseq = x->replay.oseq +x->replay_maxdiff;
406 /* override default values from above */
408 err = xfrm_update_ae_params(x, (struct rtattr **)xfrma);
415 x->km.state = XFRM_STATE_DEAD;
422 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
424 struct xfrm_usersa_info *p = NLMSG_DATA(nlh);
425 struct xfrm_state *x;
429 err = verify_newsa_info(p, (struct rtattr **)xfrma);
433 x = xfrm_state_construct(p, (struct rtattr **)xfrma, &err);
438 if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
439 err = xfrm_state_add(x);
441 err = xfrm_state_update(x);
444 x->km.state = XFRM_STATE_DEAD;
449 c.seq = nlh->nlmsg_seq;
450 c.pid = nlh->nlmsg_pid;
451 c.event = nlh->nlmsg_type;
453 km_state_notify(x, &c);
459 static struct xfrm_state *xfrm_user_state_lookup(struct xfrm_usersa_id *p,
460 struct rtattr **xfrma,
463 struct xfrm_state *x = NULL;
466 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
468 x = xfrm_state_lookup(&p->daddr, p->spi, p->proto, p->family);
470 xfrm_address_t *saddr = NULL;
472 err = verify_one_addr(xfrma, XFRMA_SRCADDR, &saddr);
481 x = xfrm_state_lookup_byaddr(&p->daddr, saddr, p->proto,
491 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
493 struct xfrm_state *x;
496 struct xfrm_usersa_id *p = NLMSG_DATA(nlh);
498 x = xfrm_user_state_lookup(p, (struct rtattr **)xfrma, &err);
502 if ((err = security_xfrm_state_delete(x)) != 0)
505 if (xfrm_state_kern(x)) {
510 err = xfrm_state_delete(x);
514 c.seq = nlh->nlmsg_seq;
515 c.pid = nlh->nlmsg_pid;
516 c.event = nlh->nlmsg_type;
517 km_state_notify(x, &c);
524 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
526 memcpy(&p->id, &x->id, sizeof(p->id));
527 memcpy(&p->sel, &x->sel, sizeof(p->sel));
528 memcpy(&p->lft, &x->lft, sizeof(p->lft));
529 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
530 memcpy(&p->stats, &x->stats, sizeof(p->stats));
531 p->saddr = x->props.saddr;
532 p->mode = x->props.mode;
533 p->replay_window = x->props.replay_window;
534 p->reqid = x->props.reqid;
535 p->family = x->props.family;
536 p->flags = x->props.flags;
540 struct xfrm_dump_info {
541 struct sk_buff *in_skb;
542 struct sk_buff *out_skb;
549 static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
551 struct xfrm_dump_info *sp = ptr;
552 struct sk_buff *in_skb = sp->in_skb;
553 struct sk_buff *skb = sp->out_skb;
554 struct xfrm_usersa_info *p;
555 struct nlmsghdr *nlh;
556 unsigned char *b = skb->tail;
558 if (sp->this_idx < sp->start_idx)
561 nlh = NLMSG_PUT(skb, NETLINK_CB(in_skb).pid,
563 XFRM_MSG_NEWSA, sizeof(*p));
564 nlh->nlmsg_flags = sp->nlmsg_flags;
567 copy_to_user_state(x, p);
570 RTA_PUT(skb, XFRMA_ALG_AUTH,
571 sizeof(*(x->aalg))+(x->aalg->alg_key_len+7)/8, x->aalg);
573 RTA_PUT(skb, XFRMA_ALG_CRYPT,
574 sizeof(*(x->ealg))+(x->ealg->alg_key_len+7)/8, x->ealg);
576 RTA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
579 RTA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
582 int ctx_size = sizeof(struct xfrm_sec_ctx) +
583 x->security->ctx_len;
584 struct rtattr *rt = __RTA_PUT(skb, XFRMA_SEC_CTX, ctx_size);
585 struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt);
587 uctx->exttype = XFRMA_SEC_CTX;
588 uctx->len = ctx_size;
589 uctx->ctx_doi = x->security->ctx_doi;
590 uctx->ctx_alg = x->security->ctx_alg;
591 uctx->ctx_len = x->security->ctx_len;
592 memcpy(uctx + 1, x->security->ctx_str, x->security->ctx_len);
596 RTA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
598 nlh->nlmsg_len = skb->tail - b;
605 skb_trim(skb, b - skb->data);
609 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
611 struct xfrm_dump_info info;
613 info.in_skb = cb->skb;
615 info.nlmsg_seq = cb->nlh->nlmsg_seq;
616 info.nlmsg_flags = NLM_F_MULTI;
618 info.start_idx = cb->args[0];
619 (void) xfrm_state_walk(0, dump_one_state, &info);
620 cb->args[0] = info.this_idx;
625 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
626 struct xfrm_state *x, u32 seq)
628 struct xfrm_dump_info info;
631 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
633 return ERR_PTR(-ENOMEM);
635 NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid;
636 info.in_skb = in_skb;
638 info.nlmsg_seq = seq;
639 info.nlmsg_flags = 0;
640 info.this_idx = info.start_idx = 0;
642 if (dump_one_state(x, 0, &info)) {
650 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
652 struct xfrm_usersa_id *p = NLMSG_DATA(nlh);
653 struct xfrm_state *x;
654 struct sk_buff *resp_skb;
657 x = xfrm_user_state_lookup(p, (struct rtattr **)xfrma, &err);
661 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
662 if (IS_ERR(resp_skb)) {
663 err = PTR_ERR(resp_skb);
665 err = netlink_unicast(xfrm_nl, resp_skb,
666 NETLINK_CB(skb).pid, MSG_DONTWAIT);
673 static int verify_userspi_info(struct xfrm_userspi_info *p)
675 switch (p->info.id.proto) {
681 /* IPCOMP spi is 16-bits. */
682 if (p->max >= 0x10000)
696 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
698 struct xfrm_state *x;
699 struct xfrm_userspi_info *p;
700 struct sk_buff *resp_skb;
701 xfrm_address_t *daddr;
706 err = verify_userspi_info(p);
710 family = p->info.family;
711 daddr = &p->info.id.daddr;
715 x = xfrm_find_acq_byseq(p->info.seq);
716 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) {
723 x = xfrm_find_acq(p->info.mode, p->info.reqid,
724 p->info.id.proto, daddr,
731 resp_skb = ERR_PTR(-ENOENT);
733 spin_lock_bh(&x->lock);
734 if (x->km.state != XFRM_STATE_DEAD) {
735 xfrm_alloc_spi(x, htonl(p->min), htonl(p->max));
737 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
739 spin_unlock_bh(&x->lock);
741 if (IS_ERR(resp_skb)) {
742 err = PTR_ERR(resp_skb);
746 err = netlink_unicast(xfrm_nl, resp_skb,
747 NETLINK_CB(skb).pid, MSG_DONTWAIT);
755 static int verify_policy_dir(__u8 dir)
759 case XFRM_POLICY_OUT:
760 case XFRM_POLICY_FWD:
770 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
774 case XFRM_SHARE_SESSION:
775 case XFRM_SHARE_USER:
776 case XFRM_SHARE_UNIQUE:
784 case XFRM_POLICY_ALLOW:
785 case XFRM_POLICY_BLOCK:
792 switch (p->sel.family) {
797 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
800 return -EAFNOSUPPORT;
807 return verify_policy_dir(p->dir);
810 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct rtattr **xfrma)
812 struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1];
813 struct xfrm_user_sec_ctx *uctx;
819 return security_xfrm_policy_alloc(pol, uctx);
822 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
828 for (i = 0; i < nr; i++, ut++) {
829 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
831 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
832 memcpy(&t->saddr, &ut->saddr,
833 sizeof(xfrm_address_t));
834 t->reqid = ut->reqid;
836 t->share = ut->share;
837 t->optional = ut->optional;
838 t->aalgos = ut->aalgos;
839 t->ealgos = ut->ealgos;
840 t->calgos = ut->calgos;
844 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct rtattr **xfrma)
846 struct rtattr *rt = xfrma[XFRMA_TMPL-1];
847 struct xfrm_user_tmpl *utmpl;
853 nr = (rt->rta_len - sizeof(*rt)) / sizeof(*utmpl);
855 if (nr > XFRM_MAX_DEPTH)
858 copy_templates(pol, RTA_DATA(rt), nr);
863 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
865 xp->priority = p->priority;
866 xp->index = p->index;
867 memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
868 memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
869 xp->action = p->action;
870 xp->flags = p->flags;
871 xp->family = p->sel.family;
872 /* XXX xp->share = p->share; */
875 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
877 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
878 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
879 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
880 p->priority = xp->priority;
881 p->index = xp->index;
882 p->sel.family = xp->family;
884 p->action = xp->action;
885 p->flags = xp->flags;
886 p->share = XFRM_SHARE_ANY; /* XXX xp->share */
889 static struct xfrm_policy *xfrm_policy_construct(struct xfrm_userpolicy_info *p, struct rtattr **xfrma, int *errp)
891 struct xfrm_policy *xp = xfrm_policy_alloc(GFP_KERNEL);
899 copy_from_user_policy(xp, p);
901 if (!(err = copy_from_user_tmpl(xp, xfrma)))
902 err = copy_from_user_sec_ctx(xp, xfrma);
913 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
915 struct xfrm_userpolicy_info *p = NLMSG_DATA(nlh);
916 struct xfrm_policy *xp;
921 err = verify_newpolicy_info(p);
924 err = verify_sec_ctx_len((struct rtattr **)xfrma);
928 xp = xfrm_policy_construct(p, (struct rtattr **)xfrma, &err);
932 /* shouldnt excl be based on nlh flags??
933 * Aha! this is anti-netlink really i.e more pfkey derived
934 * in netlink excl is a flag and you wouldnt need
935 * a type XFRM_MSG_UPDPOLICY - JHS */
936 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
937 err = xfrm_policy_insert(p->dir, xp, excl);
939 security_xfrm_policy_free(xp);
944 c.event = nlh->nlmsg_type;
945 c.seq = nlh->nlmsg_seq;
946 c.pid = nlh->nlmsg_pid;
947 km_policy_notify(xp, p->dir, &c);
954 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
956 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
959 if (xp->xfrm_nr == 0)
962 for (i = 0; i < xp->xfrm_nr; i++) {
963 struct xfrm_user_tmpl *up = &vec[i];
964 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
966 memcpy(&up->id, &kp->id, sizeof(up->id));
967 up->family = xp->family;
968 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
969 up->reqid = kp->reqid;
971 up->share = kp->share;
972 up->optional = kp->optional;
973 up->aalgos = kp->aalgos;
974 up->ealgos = kp->ealgos;
975 up->calgos = kp->calgos;
977 RTA_PUT(skb, XFRMA_TMPL,
978 (sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr),
987 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
989 int ctx_size = sizeof(struct xfrm_sec_ctx) + s->ctx_len;
990 struct rtattr *rt = __RTA_PUT(skb, XFRMA_SEC_CTX, ctx_size);
991 struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt);
993 uctx->exttype = XFRMA_SEC_CTX;
994 uctx->len = ctx_size;
995 uctx->ctx_doi = s->ctx_doi;
996 uctx->ctx_alg = s->ctx_alg;
997 uctx->ctx_len = s->ctx_len;
998 memcpy(uctx + 1, s->ctx_str, s->ctx_len);
1005 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
1008 return copy_sec_ctx(x->security, skb);
1013 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
1016 return copy_sec_ctx(xp->security, skb);
1021 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
1023 struct xfrm_dump_info *sp = ptr;
1024 struct xfrm_userpolicy_info *p;
1025 struct sk_buff *in_skb = sp->in_skb;
1026 struct sk_buff *skb = sp->out_skb;
1027 struct nlmsghdr *nlh;
1028 unsigned char *b = skb->tail;
1030 if (sp->this_idx < sp->start_idx)
1033 nlh = NLMSG_PUT(skb, NETLINK_CB(in_skb).pid,
1035 XFRM_MSG_NEWPOLICY, sizeof(*p));
1036 p = NLMSG_DATA(nlh);
1037 nlh->nlmsg_flags = sp->nlmsg_flags;
1039 copy_to_user_policy(xp, p, dir);
1040 if (copy_to_user_tmpl(xp, skb) < 0)
1042 if (copy_to_user_sec_ctx(xp, skb))
1045 nlh->nlmsg_len = skb->tail - b;
1051 skb_trim(skb, b - skb->data);
1055 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
1057 struct xfrm_dump_info info;
1059 info.in_skb = cb->skb;
1061 info.nlmsg_seq = cb->nlh->nlmsg_seq;
1062 info.nlmsg_flags = NLM_F_MULTI;
1064 info.start_idx = cb->args[0];
1065 (void) xfrm_policy_walk(dump_one_policy, &info);
1066 cb->args[0] = info.this_idx;
1071 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
1072 struct xfrm_policy *xp,
1075 struct xfrm_dump_info info;
1076 struct sk_buff *skb;
1078 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1080 return ERR_PTR(-ENOMEM);
1082 NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid;
1083 info.in_skb = in_skb;
1085 info.nlmsg_seq = seq;
1086 info.nlmsg_flags = 0;
1087 info.this_idx = info.start_idx = 0;
1089 if (dump_one_policy(xp, dir, 0, &info) < 0) {
1097 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
1099 struct xfrm_policy *xp;
1100 struct xfrm_userpolicy_id *p;
1105 p = NLMSG_DATA(nlh);
1106 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
1108 err = verify_policy_dir(p->dir);
1113 xp = xfrm_policy_byid(p->dir, p->index, delete);
1115 struct rtattr **rtattrs = (struct rtattr **)xfrma;
1116 struct rtattr *rt = rtattrs[XFRMA_SEC_CTX-1];
1117 struct xfrm_policy tmp;
1119 err = verify_sec_ctx_len(rtattrs);
1123 memset(&tmp, 0, sizeof(struct xfrm_policy));
1125 struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt);
1127 if ((err = security_xfrm_policy_alloc(&tmp, uctx)))
1130 xp = xfrm_policy_bysel_ctx(p->dir, &p->sel, tmp.security, delete);
1131 security_xfrm_policy_free(&tmp);
1137 struct sk_buff *resp_skb;
1139 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
1140 if (IS_ERR(resp_skb)) {
1141 err = PTR_ERR(resp_skb);
1143 err = netlink_unicast(xfrm_nl, resp_skb,
1144 NETLINK_CB(skb).pid,
1148 if ((err = security_xfrm_policy_delete(xp)) != 0)
1150 c.data.byid = p->index;
1151 c.event = nlh->nlmsg_type;
1152 c.seq = nlh->nlmsg_seq;
1153 c.pid = nlh->nlmsg_pid;
1154 km_policy_notify(xp, p->dir, &c);
1163 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
1166 struct xfrm_usersa_flush *p = NLMSG_DATA(nlh);
1168 xfrm_state_flush(p->proto);
1169 c.data.proto = p->proto;
1170 c.event = nlh->nlmsg_type;
1171 c.seq = nlh->nlmsg_seq;
1172 c.pid = nlh->nlmsg_pid;
1173 km_state_notify(NULL, &c);
1179 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
1181 struct xfrm_aevent_id *id;
1182 struct nlmsghdr *nlh;
1183 struct xfrm_lifetime_cur ltime;
1184 unsigned char *b = skb->tail;
1186 nlh = NLMSG_PUT(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id));
1187 id = NLMSG_DATA(nlh);
1188 nlh->nlmsg_flags = 0;
1190 id->sa_id.daddr = x->id.daddr;
1191 id->sa_id.spi = x->id.spi;
1192 id->sa_id.family = x->props.family;
1193 id->sa_id.proto = x->id.proto;
1194 id->flags = c->data.aevent;
1196 RTA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay);
1198 ltime.bytes = x->curlft.bytes;
1199 ltime.packets = x->curlft.packets;
1200 ltime.add_time = x->curlft.add_time;
1201 ltime.use_time = x->curlft.use_time;
1203 RTA_PUT(skb, XFRMA_LTIME_VAL, sizeof(struct xfrm_lifetime_cur), <ime);
1205 if (id->flags&XFRM_AE_RTHR) {
1206 RTA_PUT(skb,XFRMA_REPLAY_THRESH,sizeof(u32),&x->replay_maxdiff);
1209 if (id->flags&XFRM_AE_ETHR) {
1210 u32 etimer = x->replay_maxage*10/HZ;
1211 RTA_PUT(skb,XFRMA_ETIMER_THRESH,sizeof(u32),&etimer);
1214 nlh->nlmsg_len = skb->tail - b;
1219 skb_trim(skb, b - skb->data);
1223 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
1225 struct xfrm_state *x;
1226 struct sk_buff *r_skb;
1229 struct xfrm_aevent_id *p = NLMSG_DATA(nlh);
1230 int len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id));
1231 struct xfrm_usersa_id *id = &p->sa_id;
1233 len += RTA_SPACE(sizeof(struct xfrm_replay_state));
1234 len += RTA_SPACE(sizeof(struct xfrm_lifetime_cur));
1236 if (p->flags&XFRM_AE_RTHR)
1237 len+=RTA_SPACE(sizeof(u32));
1239 if (p->flags&XFRM_AE_ETHR)
1240 len+=RTA_SPACE(sizeof(u32));
1242 r_skb = alloc_skb(len, GFP_ATOMIC);
1246 x = xfrm_state_lookup(&id->daddr, id->spi, id->proto, id->family);
1253 * XXX: is this lock really needed - none of the other
1254 * gets lock (the concern is things getting updated
1255 * while we are still reading) - jhs
1257 spin_lock_bh(&x->lock);
1258 c.data.aevent = p->flags;
1259 c.seq = nlh->nlmsg_seq;
1260 c.pid = nlh->nlmsg_pid;
1262 if (build_aevent(r_skb, x, &c) < 0)
1264 err = netlink_unicast(xfrm_nl, r_skb,
1265 NETLINK_CB(skb).pid, MSG_DONTWAIT);
1266 spin_unlock_bh(&x->lock);
1271 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
1273 struct xfrm_state *x;
1276 struct xfrm_aevent_id *p = NLMSG_DATA(nlh);
1277 struct rtattr *rp = xfrma[XFRMA_REPLAY_VAL-1];
1278 struct rtattr *lt = xfrma[XFRMA_LTIME_VAL-1];
1283 /* pedantic mode - thou shalt sayeth replaceth */
1284 if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
1287 x = xfrm_state_lookup(&p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
1291 if (x->km.state != XFRM_STATE_VALID)
1294 spin_lock_bh(&x->lock);
1295 err = xfrm_update_ae_params(x,(struct rtattr **)xfrma);
1296 spin_unlock_bh(&x->lock);
1300 c.event = nlh->nlmsg_type;
1301 c.seq = nlh->nlmsg_seq;
1302 c.pid = nlh->nlmsg_pid;
1303 c.data.aevent = XFRM_AE_CU;
1304 km_state_notify(x, &c);
1311 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
1315 xfrm_policy_flush();
1316 c.event = nlh->nlmsg_type;
1317 c.seq = nlh->nlmsg_seq;
1318 c.pid = nlh->nlmsg_pid;
1319 km_policy_notify(NULL, 0, &c);
1323 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
1325 struct xfrm_policy *xp;
1326 struct xfrm_user_polexpire *up = NLMSG_DATA(nlh);
1327 struct xfrm_userpolicy_info *p = &up->pol;
1331 xp = xfrm_policy_byid(p->dir, p->index, 0);
1333 struct rtattr **rtattrs = (struct rtattr **)xfrma;
1334 struct rtattr *rt = rtattrs[XFRMA_SEC_CTX-1];
1335 struct xfrm_policy tmp;
1337 err = verify_sec_ctx_len(rtattrs);
1341 memset(&tmp, 0, sizeof(struct xfrm_policy));
1343 struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt);
1345 if ((err = security_xfrm_policy_alloc(&tmp, uctx)))
1348 xp = xfrm_policy_bysel_ctx(p->dir, &p->sel, tmp.security, 0);
1349 security_xfrm_policy_free(&tmp);
1354 read_lock(&xp->lock);
1356 read_unlock(&xp->lock);
1360 read_unlock(&xp->lock);
1363 xfrm_policy_delete(xp, p->dir);
1365 // reset the timers here?
1366 printk("Dont know what to do with soft policy expire\n");
1368 km_policy_expired(xp, p->dir, up->hard, current->pid);
1375 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
1377 struct xfrm_state *x;
1379 struct xfrm_user_expire *ue = NLMSG_DATA(nlh);
1380 struct xfrm_usersa_info *p = &ue->state;
1382 x = xfrm_state_lookup(&p->id.daddr, p->id.spi, p->id.proto, p->family);
1390 spin_lock_bh(&x->lock);
1391 if (x->km.state != XFRM_STATE_VALID)
1393 km_state_expired(x, ue->hard, current->pid);
1396 __xfrm_state_delete(x);
1398 spin_unlock_bh(&x->lock);
1403 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
1405 struct xfrm_policy *xp;
1406 struct xfrm_user_tmpl *ut;
1408 struct rtattr *rt = xfrma[XFRMA_TMPL-1];
1410 struct xfrm_user_acquire *ua = NLMSG_DATA(nlh);
1411 struct xfrm_state *x = xfrm_state_alloc();
1417 err = verify_newpolicy_info(&ua->policy);
1419 printk("BAD policy passed\n");
1425 xp = xfrm_policy_construct(&ua->policy, (struct rtattr **) xfrma, &err); if (!xp) {
1430 memcpy(&x->id, &ua->id, sizeof(ua->id));
1431 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
1432 memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
1435 /* extract the templates and for each call km_key */
1436 for (i = 0; i < xp->xfrm_nr; i++, ut++) {
1437 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
1438 memcpy(&x->id, &t->id, sizeof(x->id));
1439 x->props.mode = t->mode;
1440 x->props.reqid = t->reqid;
1441 x->props.family = ut->family;
1442 t->aalgos = ua->aalgos;
1443 t->ealgos = ua->ealgos;
1444 t->calgos = ua->calgos;
1445 err = km_query(x, t, xp);
1456 #define XMSGSIZE(type) NLMSG_LENGTH(sizeof(struct type))
1458 static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
1459 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
1460 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
1461 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
1462 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
1463 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1464 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1465 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
1466 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
1467 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
1468 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
1469 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
1470 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
1471 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
1472 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = NLMSG_LENGTH(0),
1473 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
1474 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
1479 static struct xfrm_link {
1480 int (*doit)(struct sk_buff *, struct nlmsghdr *, void **);
1481 int (*dump)(struct sk_buff *, struct netlink_callback *);
1482 } xfrm_dispatch[XFRM_NR_MSGTYPES] = {
1483 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
1484 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
1485 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
1486 .dump = xfrm_dump_sa },
1487 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
1488 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
1489 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
1490 .dump = xfrm_dump_policy },
1491 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
1492 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire },
1493 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
1494 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
1495 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
1496 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
1497 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa },
1498 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
1499 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
1500 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
1503 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
1505 struct rtattr *xfrma[XFRMA_MAX];
1506 struct xfrm_link *link;
1509 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
1512 type = nlh->nlmsg_type;
1514 /* A control message: ignore them */
1515 if (type < XFRM_MSG_BASE)
1518 /* Unknown message: reply with EINVAL */
1519 if (type > XFRM_MSG_MAX)
1522 type -= XFRM_MSG_BASE;
1523 link = &xfrm_dispatch[type];
1525 /* All operations require privileges, even GET */
1526 if (security_netlink_recv(skb, CAP_NET_ADMIN)) {
1531 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
1532 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
1533 (nlh->nlmsg_flags & NLM_F_DUMP)) {
1534 if (link->dump == NULL)
1537 if ((*errp = netlink_dump_start(xfrm_nl, skb, nlh,
1538 link->dump, NULL)) != 0) {
1542 netlink_queue_skip(nlh, skb);
1546 memset(xfrma, 0, sizeof(xfrma));
1548 if (nlh->nlmsg_len < (min_len = xfrm_msg_min[type]))
1551 if (nlh->nlmsg_len > min_len) {
1552 int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len);
1553 struct rtattr *attr = (void *) nlh + NLMSG_ALIGN(min_len);
1555 while (RTA_OK(attr, attrlen)) {
1556 unsigned short flavor = attr->rta_type;
1558 if (flavor > XFRMA_MAX)
1560 xfrma[flavor - 1] = attr;
1562 attr = RTA_NEXT(attr, attrlen);
1566 if (link->doit == NULL)
1568 *errp = link->doit(skb, nlh, (void **) &xfrma);
1577 static void xfrm_netlink_rcv(struct sock *sk, int len)
1579 unsigned int qlen = 0;
1582 mutex_lock(&xfrm_cfg_mutex);
1583 netlink_run_queue(sk, &qlen, &xfrm_user_rcv_msg);
1584 mutex_unlock(&xfrm_cfg_mutex);
1589 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
1591 struct xfrm_user_expire *ue;
1592 struct nlmsghdr *nlh;
1593 unsigned char *b = skb->tail;
1595 nlh = NLMSG_PUT(skb, c->pid, 0, XFRM_MSG_EXPIRE,
1597 ue = NLMSG_DATA(nlh);
1598 nlh->nlmsg_flags = 0;
1600 copy_to_user_state(x, &ue->state);
1601 ue->hard = (c->data.hard != 0) ? 1 : 0;
1603 nlh->nlmsg_len = skb->tail - b;
1607 skb_trim(skb, b - skb->data);
1611 static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
1613 struct sk_buff *skb;
1614 int len = NLMSG_LENGTH(sizeof(struct xfrm_user_expire));
1616 skb = alloc_skb(len, GFP_ATOMIC);
1620 if (build_expire(skb, x, c) < 0)
1623 NETLINK_CB(skb).dst_group = XFRMNLGRP_EXPIRE;
1624 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
1627 static int xfrm_aevent_state_notify(struct xfrm_state *x, struct km_event *c)
1629 struct sk_buff *skb;
1630 int len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id));
1632 len += RTA_SPACE(sizeof(struct xfrm_replay_state));
1633 len += RTA_SPACE(sizeof(struct xfrm_lifetime_cur));
1634 skb = alloc_skb(len, GFP_ATOMIC);
1638 if (build_aevent(skb, x, c) < 0)
1641 NETLINK_CB(skb).dst_group = XFRMNLGRP_AEVENTS;
1642 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC);
1645 static int xfrm_notify_sa_flush(struct km_event *c)
1647 struct xfrm_usersa_flush *p;
1648 struct nlmsghdr *nlh;
1649 struct sk_buff *skb;
1651 int len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_flush));
1653 skb = alloc_skb(len, GFP_ATOMIC);
1658 nlh = NLMSG_PUT(skb, c->pid, c->seq,
1659 XFRM_MSG_FLUSHSA, sizeof(*p));
1660 nlh->nlmsg_flags = 0;
1662 p = NLMSG_DATA(nlh);
1663 p->proto = c->data.proto;
1665 nlh->nlmsg_len = skb->tail - b;
1667 NETLINK_CB(skb).dst_group = XFRMNLGRP_SA;
1668 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
1675 static int inline xfrm_sa_len(struct xfrm_state *x)
1679 l += RTA_SPACE(sizeof(*x->aalg) + (x->aalg->alg_key_len+7)/8);
1681 l += RTA_SPACE(sizeof(*x->ealg) + (x->ealg->alg_key_len+7)/8);
1683 l += RTA_SPACE(sizeof(*x->calg));
1685 l += RTA_SPACE(sizeof(*x->encap));
1690 static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c)
1692 struct xfrm_usersa_info *p;
1693 struct xfrm_usersa_id *id;
1694 struct nlmsghdr *nlh;
1695 struct sk_buff *skb;
1697 int len = xfrm_sa_len(x);
1700 headlen = sizeof(*p);
1701 if (c->event == XFRM_MSG_DELSA) {
1702 len += RTA_SPACE(headlen);
1703 headlen = sizeof(*id);
1705 len += NLMSG_SPACE(headlen);
1707 skb = alloc_skb(len, GFP_ATOMIC);
1712 nlh = NLMSG_PUT(skb, c->pid, c->seq, c->event, headlen);
1713 nlh->nlmsg_flags = 0;
1715 p = NLMSG_DATA(nlh);
1716 if (c->event == XFRM_MSG_DELSA) {
1717 id = NLMSG_DATA(nlh);
1718 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
1719 id->spi = x->id.spi;
1720 id->family = x->props.family;
1721 id->proto = x->id.proto;
1723 p = RTA_DATA(__RTA_PUT(skb, XFRMA_SA, sizeof(*p)));
1726 copy_to_user_state(x, p);
1729 RTA_PUT(skb, XFRMA_ALG_AUTH,
1730 sizeof(*(x->aalg))+(x->aalg->alg_key_len+7)/8, x->aalg);
1732 RTA_PUT(skb, XFRMA_ALG_CRYPT,
1733 sizeof(*(x->ealg))+(x->ealg->alg_key_len+7)/8, x->ealg);
1735 RTA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
1738 RTA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
1740 nlh->nlmsg_len = skb->tail - b;
1742 NETLINK_CB(skb).dst_group = XFRMNLGRP_SA;
1743 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
1751 static int xfrm_send_state_notify(struct xfrm_state *x, struct km_event *c)
1755 case XFRM_MSG_EXPIRE:
1756 return xfrm_exp_state_notify(x, c);
1757 case XFRM_MSG_NEWAE:
1758 return xfrm_aevent_state_notify(x, c);
1759 case XFRM_MSG_DELSA:
1760 case XFRM_MSG_UPDSA:
1761 case XFRM_MSG_NEWSA:
1762 return xfrm_notify_sa(x, c);
1763 case XFRM_MSG_FLUSHSA:
1764 return xfrm_notify_sa_flush(c);
1766 printk("xfrm_user: Unknown SA event %d\n", c->event);
1774 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
1775 struct xfrm_tmpl *xt, struct xfrm_policy *xp,
1778 struct xfrm_user_acquire *ua;
1779 struct nlmsghdr *nlh;
1780 unsigned char *b = skb->tail;
1781 __u32 seq = xfrm_get_acqseq();
1783 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_ACQUIRE,
1785 ua = NLMSG_DATA(nlh);
1786 nlh->nlmsg_flags = 0;
1788 memcpy(&ua->id, &x->id, sizeof(ua->id));
1789 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
1790 memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
1791 copy_to_user_policy(xp, &ua->policy, dir);
1792 ua->aalgos = xt->aalgos;
1793 ua->ealgos = xt->ealgos;
1794 ua->calgos = xt->calgos;
1795 ua->seq = x->km.seq = seq;
1797 if (copy_to_user_tmpl(xp, skb) < 0)
1799 if (copy_to_user_state_sec_ctx(x, skb))
1802 nlh->nlmsg_len = skb->tail - b;
1806 skb_trim(skb, b - skb->data);
1810 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
1811 struct xfrm_policy *xp, int dir)
1813 struct sk_buff *skb;
1816 len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
1817 len += NLMSG_SPACE(sizeof(struct xfrm_user_acquire));
1818 len += RTA_SPACE(xfrm_user_sec_ctx_size(xp));
1819 skb = alloc_skb(len, GFP_ATOMIC);
1823 if (build_acquire(skb, x, xt, xp, dir) < 0)
1826 NETLINK_CB(skb).dst_group = XFRMNLGRP_ACQUIRE;
1827 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC);
1830 /* User gives us xfrm_user_policy_info followed by an array of 0
1831 * or more templates.
1833 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
1834 u8 *data, int len, int *dir)
1836 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
1837 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
1838 struct xfrm_policy *xp;
1841 switch (sk->sk_family) {
1843 if (opt != IP_XFRM_POLICY) {
1848 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1850 if (opt != IPV6_XFRM_POLICY) {
1863 if (len < sizeof(*p) ||
1864 verify_newpolicy_info(p))
1867 nr = ((len - sizeof(*p)) / sizeof(*ut));
1868 if (nr > XFRM_MAX_DEPTH)
1871 if (p->dir > XFRM_POLICY_OUT)
1874 xp = xfrm_policy_alloc(GFP_KERNEL);
1880 copy_from_user_policy(xp, p);
1881 copy_templates(xp, ut, nr);
1883 if (!xp->security) {
1884 int err = security_xfrm_sock_policy_alloc(xp, sk);
1897 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
1898 int dir, struct km_event *c)
1900 struct xfrm_user_polexpire *upe;
1901 struct nlmsghdr *nlh;
1902 int hard = c->data.hard;
1903 unsigned char *b = skb->tail;
1905 nlh = NLMSG_PUT(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe));
1906 upe = NLMSG_DATA(nlh);
1907 nlh->nlmsg_flags = 0;
1909 copy_to_user_policy(xp, &upe->pol, dir);
1910 if (copy_to_user_tmpl(xp, skb) < 0)
1912 if (copy_to_user_sec_ctx(xp, skb))
1916 nlh->nlmsg_len = skb->tail - b;
1920 skb_trim(skb, b - skb->data);
1924 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1926 struct sk_buff *skb;
1929 len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
1930 len += NLMSG_SPACE(sizeof(struct xfrm_user_polexpire));
1931 len += RTA_SPACE(xfrm_user_sec_ctx_size(xp));
1932 skb = alloc_skb(len, GFP_ATOMIC);
1936 if (build_polexpire(skb, xp, dir, c) < 0)
1939 NETLINK_CB(skb).dst_group = XFRMNLGRP_EXPIRE;
1940 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
1943 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c)
1945 struct xfrm_userpolicy_info *p;
1946 struct xfrm_userpolicy_id *id;
1947 struct nlmsghdr *nlh;
1948 struct sk_buff *skb;
1950 int len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
1953 headlen = sizeof(*p);
1954 if (c->event == XFRM_MSG_DELPOLICY) {
1955 len += RTA_SPACE(headlen);
1956 headlen = sizeof(*id);
1958 len += NLMSG_SPACE(headlen);
1960 skb = alloc_skb(len, GFP_ATOMIC);
1965 nlh = NLMSG_PUT(skb, c->pid, c->seq, c->event, headlen);
1967 p = NLMSG_DATA(nlh);
1968 if (c->event == XFRM_MSG_DELPOLICY) {
1969 id = NLMSG_DATA(nlh);
1970 memset(id, 0, sizeof(*id));
1973 id->index = xp->index;
1975 memcpy(&id->sel, &xp->selector, sizeof(id->sel));
1977 p = RTA_DATA(__RTA_PUT(skb, XFRMA_POLICY, sizeof(*p)));
1980 nlh->nlmsg_flags = 0;
1982 copy_to_user_policy(xp, p, dir);
1983 if (copy_to_user_tmpl(xp, skb) < 0)
1986 nlh->nlmsg_len = skb->tail - b;
1988 NETLINK_CB(skb).dst_group = XFRMNLGRP_POLICY;
1989 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
1997 static int xfrm_notify_policy_flush(struct km_event *c)
1999 struct nlmsghdr *nlh;
2000 struct sk_buff *skb;
2002 int len = NLMSG_LENGTH(0);
2004 skb = alloc_skb(len, GFP_ATOMIC);
2010 nlh = NLMSG_PUT(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0);
2012 nlh->nlmsg_len = skb->tail - b;
2014 NETLINK_CB(skb).dst_group = XFRMNLGRP_POLICY;
2015 return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2022 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
2026 case XFRM_MSG_NEWPOLICY:
2027 case XFRM_MSG_UPDPOLICY:
2028 case XFRM_MSG_DELPOLICY:
2029 return xfrm_notify_policy(xp, dir, c);
2030 case XFRM_MSG_FLUSHPOLICY:
2031 return xfrm_notify_policy_flush(c);
2032 case XFRM_MSG_POLEXPIRE:
2033 return xfrm_exp_policy_notify(xp, dir, c);
2035 printk("xfrm_user: Unknown Policy event %d\n", c->event);
2042 static struct xfrm_mgr netlink_mgr = {
2044 .notify = xfrm_send_state_notify,
2045 .acquire = xfrm_send_acquire,
2046 .compile_policy = xfrm_compile_policy,
2047 .notify_policy = xfrm_send_policy_notify,
2050 static int __init xfrm_user_init(void)
2054 printk(KERN_INFO "Initializing IPsec netlink socket\n");
2056 nlsk = netlink_kernel_create(NETLINK_XFRM, XFRMNLGRP_MAX,
2057 xfrm_netlink_rcv, THIS_MODULE);
2060 rcu_assign_pointer(xfrm_nl, nlsk);
2062 xfrm_register_km(&netlink_mgr);
2067 static void __exit xfrm_user_exit(void)
2069 struct sock *nlsk = xfrm_nl;
2071 xfrm_unregister_km(&netlink_mgr);
2072 rcu_assign_pointer(xfrm_nl, NULL);
2074 sock_release(nlsk->sk_socket);
2077 module_init(xfrm_user_init);
2078 module_exit(xfrm_user_exit);
2079 MODULE_LICENSE("GPL");
2080 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);