2 #include <linux/module.h>
6 #include <asm/scatterlist.h>
7 #include <linux/crypto.h>
8 #include <linux/kernel.h>
9 #include <linux/pfkeyv2.h>
10 #include <linux/random.h>
11 #include <linux/spinlock.h>
13 #include <net/protocol.h>
16 static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
19 struct iphdr *top_iph;
20 struct ip_esp_hdr *esph;
21 struct crypto_blkcipher *tfm;
22 struct blkcipher_desc desc;
24 struct sk_buff *trailer;
31 /* Strip IP+ESP header. */
32 __skb_pull(skb, skb_transport_offset(skb));
33 /* Now skb is pure payload to encrypt */
37 /* Round to block size */
41 alen = esp->auth.icv_trunc_len;
45 blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
46 clen = ALIGN(clen + 2, blksize);
48 clen = ALIGN(clen, esp->conf.padlen);
50 if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0)
54 tail = skb_tail_pointer(trailer);
57 for (i=0; i<clen-skb->len - 2; i++)
60 tail[clen - skb->len - 2] = (clen - skb->len) - 2;
61 pskb_put(skb, trailer, clen - skb->len);
63 __skb_push(skb, -skb_network_offset(skb));
64 top_iph = ip_hdr(skb);
65 esph = (struct ip_esp_hdr *)(skb_network_header(skb) +
67 top_iph->tot_len = htons(skb->len + alen);
68 *(skb_tail_pointer(trailer) - 1) = top_iph->protocol;
70 spin_lock_bh(&x->lock);
72 /* this is non-NULL only with UDP Encapsulation */
74 struct xfrm_encap_tmpl *encap = x->encap;
78 uh = (struct udphdr *)esph;
79 uh->source = encap->encap_sport;
80 uh->dest = encap->encap_dport;
81 uh->len = htons(skb->len + alen - top_iph->ihl*4);
84 switch (encap->encap_type) {
86 case UDP_ENCAP_ESPINUDP:
87 esph = (struct ip_esp_hdr *)(uh + 1);
89 case UDP_ENCAP_ESPINUDP_NON_IKE:
90 udpdata32 = (__be32 *)(uh + 1);
91 udpdata32[0] = udpdata32[1] = 0;
92 esph = (struct ip_esp_hdr *)(udpdata32 + 2);
96 top_iph->protocol = IPPROTO_UDP;
98 top_iph->protocol = IPPROTO_ESP;
100 esph->spi = x->id.spi;
101 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq);
103 if (esp->conf.ivlen) {
104 if (unlikely(!esp->conf.ivinitted)) {
105 get_random_bytes(esp->conf.ivec, esp->conf.ivlen);
106 esp->conf.ivinitted = 1;
108 crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
112 struct scatterlist *sg = &esp->sgbuf[0];
114 if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
115 sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
119 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
120 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
121 if (unlikely(sg != &esp->sgbuf[0]))
128 if (esp->conf.ivlen) {
129 memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen);
130 crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
133 if (esp->auth.icv_full_len) {
134 err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data,
135 sizeof(*esph) + esp->conf.ivlen + clen);
136 memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen);
140 spin_unlock_bh(&x->lock);
142 ip_send_check(top_iph);
149 * Note: detecting truncated vs. non-truncated authentication data is very
150 * expensive, so we only support truncated data, which is the recommended
153 static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
156 struct ip_esp_hdr *esph;
157 struct esp_data *esp = x->data;
158 struct crypto_blkcipher *tfm = esp->conf.tfm;
159 struct blkcipher_desc desc = { .tfm = tfm };
160 struct sk_buff *trailer;
161 int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
162 int alen = esp->auth.icv_trunc_len;
163 int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen;
167 struct scatterlist *sg;
171 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr)))
174 if (elen <= 0 || (elen & (blksize-1)))
177 /* If integrity check is required, do this. */
178 if (esp->auth.icv_full_len) {
181 err = esp_mac_digest(esp, skb, 0, skb->len - alen);
185 if (skb_copy_bits(skb, skb->len - alen, sum, alen))
188 if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) {
189 x->stats.integrity_failed++;
194 if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0)
197 skb->ip_summed = CHECKSUM_NONE;
199 esph = (struct ip_esp_hdr*)skb->data;
201 /* Get ivec. This can be wrong, check against another impls. */
203 crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen);
207 if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
208 sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
212 skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen);
213 err = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
214 if (unlikely(sg != &esp->sgbuf[0]))
219 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
223 if (padlen+2 >= elen)
226 /* ... check padding bits here. Silly. :-) */
232 struct xfrm_encap_tmpl *encap = x->encap;
233 struct udphdr *uh = (void *)(skb_network_header(skb) + ihl);
236 * 1) if the NAT-T peer's IP or port changed then
237 * advertize the change to the keying daemon.
238 * This is an inbound SA, so just compare
241 if (iph->saddr != x->props.saddr.a4 ||
242 uh->source != encap->encap_sport) {
243 xfrm_address_t ipaddr;
245 ipaddr.a4 = iph->saddr;
246 km_new_mapping(x, &ipaddr, uh->source);
248 /* XXX: perhaps add an extra
249 * policy check here, to see
250 * if we should allow or
251 * reject a packet from a
258 * 2) ignore UDP/TCP checksums in case
259 * of NAT-T in Transport Mode, or
260 * perform other post-processing fixes
261 * as per draft-ietf-ipsec-udp-encaps-06,
264 if (x->props.mode == XFRM_MODE_TRANSPORT)
265 skb->ip_summed = CHECKSUM_UNNECESSARY;
268 iph->protocol = nexthdr[1];
269 pskb_trim(skb, skb->len - alen - padlen - 2);
270 __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen);
271 skb_set_transport_header(skb, -ihl);
279 static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
281 struct esp_data *esp = x->data;
282 u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
283 u32 align = max_t(u32, blksize, esp->conf.padlen);
286 mtu -= x->props.header_len + esp->auth.icv_trunc_len;
287 rem = mtu & (align - 1);
290 switch (x->props.mode) {
291 case XFRM_MODE_TUNNEL:
294 case XFRM_MODE_TRANSPORT:
297 mtu += min_t(u32, blksize - 4, rem);
300 /* The worst case. */
301 mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem);
308 static void esp4_err(struct sk_buff *skb, u32 info)
310 struct iphdr *iph = (struct iphdr*)skb->data;
311 struct ip_esp_hdr *esph = (struct ip_esp_hdr*)(skb->data+(iph->ihl<<2));
312 struct xfrm_state *x;
314 if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH ||
315 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
318 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET);
321 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n",
322 ntohl(esph->spi), ntohl(iph->daddr));
326 static void esp_destroy(struct xfrm_state *x)
328 struct esp_data *esp = x->data;
333 crypto_free_blkcipher(esp->conf.tfm);
334 esp->conf.tfm = NULL;
335 kfree(esp->conf.ivec);
336 esp->conf.ivec = NULL;
337 crypto_free_hash(esp->auth.tfm);
338 esp->auth.tfm = NULL;
339 kfree(esp->auth.work_icv);
340 esp->auth.work_icv = NULL;
344 static int esp_init_state(struct xfrm_state *x)
346 struct esp_data *esp = NULL;
347 struct crypto_blkcipher *tfm;
353 esp = kzalloc(sizeof(*esp), GFP_KERNEL);
358 struct xfrm_algo_desc *aalg_desc;
359 struct crypto_hash *hash;
361 hash = crypto_alloc_hash(x->aalg->alg_name, 0,
366 esp->auth.tfm = hash;
367 if (crypto_hash_setkey(hash, x->aalg->alg_key,
368 (x->aalg->alg_key_len + 7) / 8))
371 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
374 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
375 crypto_hash_digestsize(hash)) {
376 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
378 crypto_hash_digestsize(hash),
379 aalg_desc->uinfo.auth.icv_fullbits/8);
383 esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
384 esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;
386 esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL);
387 if (!esp->auth.work_icv)
391 tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC);
395 esp->conf.ivlen = crypto_blkcipher_ivsize(tfm);
396 esp->conf.padlen = 0;
397 if (esp->conf.ivlen) {
398 esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL);
399 if (unlikely(esp->conf.ivec == NULL))
401 esp->conf.ivinitted = 0;
403 if (crypto_blkcipher_setkey(tfm, x->ealg->alg_key,
404 (x->ealg->alg_key_len + 7) / 8))
406 x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
407 if (x->props.mode == XFRM_MODE_TUNNEL)
408 x->props.header_len += sizeof(struct iphdr);
409 else if (x->props.mode == XFRM_MODE_BEET)
410 x->props.header_len += IPV4_BEET_PHMAXLEN;
412 struct xfrm_encap_tmpl *encap = x->encap;
414 switch (encap->encap_type) {
417 case UDP_ENCAP_ESPINUDP:
418 x->props.header_len += sizeof(struct udphdr);
420 case UDP_ENCAP_ESPINUDP_NON_IKE:
421 x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
426 align = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
427 if (esp->conf.padlen)
428 align = max_t(u32, align, esp->conf.padlen);
429 x->props.trailer_len = align + 1 + esp->auth.icv_trunc_len;
439 static struct xfrm_type esp_type =
441 .description = "ESP4",
442 .owner = THIS_MODULE,
443 .proto = IPPROTO_ESP,
444 .flags = XFRM_TYPE_REPLAY_PROT,
445 .init_state = esp_init_state,
446 .destructor = esp_destroy,
447 .get_mtu = esp4_get_mtu,
452 static struct net_protocol esp4_protocol = {
453 .handler = xfrm4_rcv,
454 .err_handler = esp4_err,
458 static int __init esp4_init(void)
460 if (xfrm_register_type(&esp_type, AF_INET) < 0) {
461 printk(KERN_INFO "ip esp init: can't add xfrm type\n");
464 if (inet_add_protocol(&esp4_protocol, IPPROTO_ESP) < 0) {
465 printk(KERN_INFO "ip esp init: can't add protocol\n");
466 xfrm_unregister_type(&esp_type, AF_INET);
472 static void __exit esp4_fini(void)
474 if (inet_del_protocol(&esp4_protocol, IPPROTO_ESP) < 0)
475 printk(KERN_INFO "ip esp close: can't remove protocol\n");
476 if (xfrm_unregister_type(&esp_type, AF_INET) < 0)
477 printk(KERN_INFO "ip esp close: can't remove xfrm type\n");
480 module_init(esp4_init);
481 module_exit(esp4_fini);
482 MODULE_LICENSE("GPL");
483 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);