[IPSEC]: Add compatibility algorithm name support
[safe/jmp/linux-2.6] / net / xfrm / xfrm_algo.c
1 /* 
2  * xfrm algorithm interface
3  *
4  * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the Free
8  * Software Foundation; either version 2 of the License, or (at your option) 
9  * any later version.
10  */
11
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pfkeyv2.h>
15 #include <linux/crypto.h>
16 #include <net/xfrm.h>
17 #if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
18 #include <net/ah.h>
19 #endif
20 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
21 #include <net/esp.h>
22 #endif
23 #include <asm/scatterlist.h>
24
25 /*
26  * Algorithms supported by IPsec.  These entries contain properties which
27  * are used in key negotiation and xfrm processing, and are used to verify
28  * that instantiated crypto transforms have correct parameters for IPsec
29  * purposes.
30  */
31 static struct xfrm_algo_desc aalg_list[] = {
32 {
33         .name = "digest_null",
34         
35         .uinfo = {
36                 .auth = {
37                         .icv_truncbits = 0,
38                         .icv_fullbits = 0,
39                 }
40         },
41         
42         .desc = {
43                 .sadb_alg_id = SADB_X_AALG_NULL,
44                 .sadb_alg_ivlen = 0,
45                 .sadb_alg_minbits = 0,
46                 .sadb_alg_maxbits = 0
47         }
48 },
49 {
50         .name = "md5",
51
52         .uinfo = {
53                 .auth = {
54                         .icv_truncbits = 96,
55                         .icv_fullbits = 128,
56                 }
57         },
58         
59         .desc = {
60                 .sadb_alg_id = SADB_AALG_MD5HMAC,
61                 .sadb_alg_ivlen = 0,
62                 .sadb_alg_minbits = 128,
63                 .sadb_alg_maxbits = 128
64         }
65 },
66 {
67         .name = "sha1",
68
69         .uinfo = {
70                 .auth = {
71                         .icv_truncbits = 96,
72                         .icv_fullbits = 160,
73                 }
74         },
75
76         .desc = {
77                 .sadb_alg_id = SADB_AALG_SHA1HMAC,
78                 .sadb_alg_ivlen = 0,
79                 .sadb_alg_minbits = 160,
80                 .sadb_alg_maxbits = 160
81         }
82 },
83 {
84         .name = "sha256",
85
86         .uinfo = {
87                 .auth = {
88                         .icv_truncbits = 96,
89                         .icv_fullbits = 256,
90                 }
91         },
92
93         .desc = {
94                 .sadb_alg_id = SADB_X_AALG_SHA2_256HMAC,
95                 .sadb_alg_ivlen = 0,
96                 .sadb_alg_minbits = 256,
97                 .sadb_alg_maxbits = 256
98         }
99 },
100 {
101         .name = "ripemd160",
102
103         .uinfo = {
104                 .auth = {
105                         .icv_truncbits = 96,
106                         .icv_fullbits = 160,
107                 }
108         },
109
110         .desc = {
111                 .sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC,
112                 .sadb_alg_ivlen = 0,
113                 .sadb_alg_minbits = 160,
114                 .sadb_alg_maxbits = 160
115         }
116 },
117 };
118
119 static struct xfrm_algo_desc ealg_list[] = {
120 {
121         .name = "cipher_null",
122         
123         .uinfo = {
124                 .encr = {
125                         .blockbits = 8,
126                         .defkeybits = 0,
127                 }
128         },
129         
130         .desc = {
131                 .sadb_alg_id =  SADB_EALG_NULL,
132                 .sadb_alg_ivlen = 0,
133                 .sadb_alg_minbits = 0,
134                 .sadb_alg_maxbits = 0
135         }
136 },
137 {
138         .name = "des",
139
140         .uinfo = {
141                 .encr = {
142                         .blockbits = 64,
143                         .defkeybits = 64,
144                 }
145         },
146
147         .desc = {
148                 .sadb_alg_id = SADB_EALG_DESCBC,
149                 .sadb_alg_ivlen = 8,
150                 .sadb_alg_minbits = 64,
151                 .sadb_alg_maxbits = 64
152         }
153 },
154 {
155         .name = "des3_ede",
156
157         .uinfo = {
158                 .encr = {
159                         .blockbits = 64,
160                         .defkeybits = 192,
161                 }
162         },
163
164         .desc = {
165                 .sadb_alg_id = SADB_EALG_3DESCBC,
166                 .sadb_alg_ivlen = 8,
167                 .sadb_alg_minbits = 192,
168                 .sadb_alg_maxbits = 192
169         }
170 },
171 {
172         .name = "cast128",
173
174         .uinfo = {
175                 .encr = {
176                         .blockbits = 64,
177                         .defkeybits = 128,
178                 }
179         },
180
181         .desc = {
182                 .sadb_alg_id = SADB_X_EALG_CASTCBC,
183                 .sadb_alg_ivlen = 8,
184                 .sadb_alg_minbits = 40,
185                 .sadb_alg_maxbits = 128
186         }
187 },
188 {
189         .name = "blowfish",
190
191         .uinfo = {
192                 .encr = {
193                         .blockbits = 64,
194                         .defkeybits = 128,
195                 }
196         },
197
198         .desc = {
199                 .sadb_alg_id = SADB_X_EALG_BLOWFISHCBC,
200                 .sadb_alg_ivlen = 8,
201                 .sadb_alg_minbits = 40,
202                 .sadb_alg_maxbits = 448
203         }
204 },
205 {
206         .name = "aes",
207
208         .uinfo = {
209                 .encr = {
210                         .blockbits = 128,
211                         .defkeybits = 128,
212                 }
213         },
214
215         .desc = {
216                 .sadb_alg_id = SADB_X_EALG_AESCBC,
217                 .sadb_alg_ivlen = 8,
218                 .sadb_alg_minbits = 128,
219                 .sadb_alg_maxbits = 256
220         }
221 },
222 {
223         .name = "serpent",
224
225         .uinfo = {
226                 .encr = {
227                         .blockbits = 128,
228                         .defkeybits = 128,
229                 }
230         },
231
232         .desc = {
233                 .sadb_alg_id = SADB_X_EALG_SERPENTCBC,
234                 .sadb_alg_ivlen = 8,
235                 .sadb_alg_minbits = 128,
236                 .sadb_alg_maxbits = 256,
237         }
238 },
239 {
240         .name = "twofish",
241                  
242         .uinfo = {
243                 .encr = {
244                         .blockbits = 128,
245                         .defkeybits = 128,
246                 }
247         },
248
249         .desc = {
250                 .sadb_alg_id = SADB_X_EALG_TWOFISHCBC,
251                 .sadb_alg_ivlen = 8,
252                 .sadb_alg_minbits = 128,
253                 .sadb_alg_maxbits = 256
254         }
255 },
256 };
257
258 static struct xfrm_algo_desc calg_list[] = {
259 {
260         .name = "deflate",
261         .uinfo = {
262                 .comp = {
263                         .threshold = 90,
264                 }
265         },
266         .desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE }
267 },
268 {
269         .name = "lzs",
270         .uinfo = {
271                 .comp = {
272                         .threshold = 90,
273                 }
274         },
275         .desc = { .sadb_alg_id = SADB_X_CALG_LZS }
276 },
277 {
278         .name = "lzjh",
279         .uinfo = {
280                 .comp = {
281                         .threshold = 50,
282                 }
283         },
284         .desc = { .sadb_alg_id = SADB_X_CALG_LZJH }
285 },
286 };
287
288 static inline int aalg_entries(void)
289 {
290         return ARRAY_SIZE(aalg_list);
291 }
292
293 static inline int ealg_entries(void)
294 {
295         return ARRAY_SIZE(ealg_list);
296 }
297
298 static inline int calg_entries(void)
299 {
300         return ARRAY_SIZE(calg_list);
301 }
302
303 /* Todo: generic iterators */
304 struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id)
305 {
306         int i;
307
308         for (i = 0; i < aalg_entries(); i++) {
309                 if (aalg_list[i].desc.sadb_alg_id == alg_id) {
310                         if (aalg_list[i].available)
311                                 return &aalg_list[i];
312                         else
313                                 break;
314                 }
315         }
316         return NULL;
317 }
318 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byid);
319
320 struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id)
321 {
322         int i;
323
324         for (i = 0; i < ealg_entries(); i++) {
325                 if (ealg_list[i].desc.sadb_alg_id == alg_id) {
326                         if (ealg_list[i].available)
327                                 return &ealg_list[i];
328                         else
329                                 break;
330                 }
331         }
332         return NULL;
333 }
334 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byid);
335
336 struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id)
337 {
338         int i;
339
340         for (i = 0; i < calg_entries(); i++) {
341                 if (calg_list[i].desc.sadb_alg_id == alg_id) {
342                         if (calg_list[i].available)
343                                 return &calg_list[i];
344                         else
345                                 break;
346                 }
347         }
348         return NULL;
349 }
350 EXPORT_SYMBOL_GPL(xfrm_calg_get_byid);
351
352 static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
353                                               int entries, char *name,
354                                               int probe)
355 {
356         int i, status;
357
358         if (!name)
359                 return NULL;
360
361         for (i = 0; i < entries; i++) {
362                 if (strcmp(name, list[i].name) &&
363                     (!list[i].compat || strcmp(name, list[i].compat)))
364                         continue;
365
366                 if (list[i].available)
367                         return &list[i];
368
369                 if (!probe)
370                         break;
371
372                 status = crypto_alg_available(name, 0);
373                 if (!status)
374                         break;
375
376                 list[i].available = status;
377                 return &list[i];
378         }
379         return NULL;
380 }
381
382 struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe)
383 {
384         return xfrm_get_byname(aalg_list, aalg_entries(), name, probe);
385 }
386 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
387
388 struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe)
389 {
390         return xfrm_get_byname(ealg_list, ealg_entries(), name, probe);
391 }
392 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
393
394 struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe)
395 {
396         return xfrm_get_byname(calg_list, calg_entries(), name, probe);
397 }
398 EXPORT_SYMBOL_GPL(xfrm_calg_get_byname);
399
400 struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx)
401 {
402         if (idx >= aalg_entries())
403                 return NULL;
404
405         return &aalg_list[idx];
406 }
407 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byidx);
408
409 struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx)
410 {
411         if (idx >= ealg_entries())
412                 return NULL;
413
414         return &ealg_list[idx];
415 }
416 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byidx);
417
418 /*
419  * Probe for the availability of crypto algorithms, and set the available
420  * flag for any algorithms found on the system.  This is typically called by
421  * pfkey during userspace SA add, update or register.
422  */
423 void xfrm_probe_algs(void)
424 {
425 #ifdef CONFIG_CRYPTO
426         int i, status;
427         
428         BUG_ON(in_softirq());
429
430         for (i = 0; i < aalg_entries(); i++) {
431                 status = crypto_alg_available(aalg_list[i].name, 0);
432                 if (aalg_list[i].available != status)
433                         aalg_list[i].available = status;
434         }
435         
436         for (i = 0; i < ealg_entries(); i++) {
437                 status = crypto_alg_available(ealg_list[i].name, 0);
438                 if (ealg_list[i].available != status)
439                         ealg_list[i].available = status;
440         }
441         
442         for (i = 0; i < calg_entries(); i++) {
443                 status = crypto_alg_available(calg_list[i].name, 0);
444                 if (calg_list[i].available != status)
445                         calg_list[i].available = status;
446         }
447 #endif
448 }
449 EXPORT_SYMBOL_GPL(xfrm_probe_algs);
450
451 int xfrm_count_auth_supported(void)
452 {
453         int i, n;
454
455         for (i = 0, n = 0; i < aalg_entries(); i++)
456                 if (aalg_list[i].available)
457                         n++;
458         return n;
459 }
460 EXPORT_SYMBOL_GPL(xfrm_count_auth_supported);
461
462 int xfrm_count_enc_supported(void)
463 {
464         int i, n;
465
466         for (i = 0, n = 0; i < ealg_entries(); i++)
467                 if (ealg_list[i].available)
468                         n++;
469         return n;
470 }
471 EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
472
473 /* Move to common area: it is shared with AH. */
474
475 void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
476                   int offset, int len, icv_update_fn_t icv_update)
477 {
478         int start = skb_headlen(skb);
479         int i, copy = start - offset;
480         struct scatterlist sg;
481
482         /* Checksum header. */
483         if (copy > 0) {
484                 if (copy > len)
485                         copy = len;
486                 
487                 sg.page = virt_to_page(skb->data + offset);
488                 sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
489                 sg.length = copy;
490                 
491                 icv_update(tfm, &sg, 1);
492                 
493                 if ((len -= copy) == 0)
494                         return;
495                 offset += copy;
496         }
497
498         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
499                 int end;
500
501                 BUG_TRAP(start <= offset + len);
502
503                 end = start + skb_shinfo(skb)->frags[i].size;
504                 if ((copy = end - offset) > 0) {
505                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
506
507                         if (copy > len)
508                                 copy = len;
509                         
510                         sg.page = frag->page;
511                         sg.offset = frag->page_offset + offset-start;
512                         sg.length = copy;
513                         
514                         icv_update(tfm, &sg, 1);
515
516                         if (!(len -= copy))
517                                 return;
518                         offset += copy;
519                 }
520                 start = end;
521         }
522
523         if (skb_shinfo(skb)->frag_list) {
524                 struct sk_buff *list = skb_shinfo(skb)->frag_list;
525
526                 for (; list; list = list->next) {
527                         int end;
528
529                         BUG_TRAP(start <= offset + len);
530
531                         end = start + list->len;
532                         if ((copy = end - offset) > 0) {
533                                 if (copy > len)
534                                         copy = len;
535                                 skb_icv_walk(list, tfm, offset-start, copy, icv_update);
536                                 if ((len -= copy) == 0)
537                                         return;
538                                 offset += copy;
539                         }
540                         start = end;
541                 }
542         }
543         BUG_ON(len);
544 }
545 EXPORT_SYMBOL_GPL(skb_icv_walk);
546
547 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
548
549 /* Looking generic it is not used in another places. */
550
551 int
552 skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
553 {
554         int start = skb_headlen(skb);
555         int i, copy = start - offset;
556         int elt = 0;
557
558         if (copy > 0) {
559                 if (copy > len)
560                         copy = len;
561                 sg[elt].page = virt_to_page(skb->data + offset);
562                 sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
563                 sg[elt].length = copy;
564                 elt++;
565                 if ((len -= copy) == 0)
566                         return elt;
567                 offset += copy;
568         }
569
570         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
571                 int end;
572
573                 BUG_TRAP(start <= offset + len);
574
575                 end = start + skb_shinfo(skb)->frags[i].size;
576                 if ((copy = end - offset) > 0) {
577                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
578
579                         if (copy > len)
580                                 copy = len;
581                         sg[elt].page = frag->page;
582                         sg[elt].offset = frag->page_offset+offset-start;
583                         sg[elt].length = copy;
584                         elt++;
585                         if (!(len -= copy))
586                                 return elt;
587                         offset += copy;
588                 }
589                 start = end;
590         }
591
592         if (skb_shinfo(skb)->frag_list) {
593                 struct sk_buff *list = skb_shinfo(skb)->frag_list;
594
595                 for (; list; list = list->next) {
596                         int end;
597
598                         BUG_TRAP(start <= offset + len);
599
600                         end = start + list->len;
601                         if ((copy = end - offset) > 0) {
602                                 if (copy > len)
603                                         copy = len;
604                                 elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
605                                 if ((len -= copy) == 0)
606                                         return elt;
607                                 offset += copy;
608                         }
609                         start = end;
610                 }
611         }
612         BUG_ON(len);
613         return elt;
614 }
615 EXPORT_SYMBOL_GPL(skb_to_sgvec);
616
617 /* Check that skb data bits are writable. If they are not, copy data
618  * to newly created private area. If "tailbits" is given, make sure that
619  * tailbits bytes beyond current end of skb are writable.
620  *
621  * Returns amount of elements of scatterlist to load for subsequent
622  * transformations and pointer to writable trailer skb.
623  */
624
625 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
626 {
627         int copyflag;
628         int elt;
629         struct sk_buff *skb1, **skb_p;
630
631         /* If skb is cloned or its head is paged, reallocate
632          * head pulling out all the pages (pages are considered not writable
633          * at the moment even if they are anonymous).
634          */
635         if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
636             __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
637                 return -ENOMEM;
638
639         /* Easy case. Most of packets will go this way. */
640         if (!skb_shinfo(skb)->frag_list) {
641                 /* A little of trouble, not enough of space for trailer.
642                  * This should not happen, when stack is tuned to generate
643                  * good frames. OK, on miss we reallocate and reserve even more
644                  * space, 128 bytes is fair. */
645
646                 if (skb_tailroom(skb) < tailbits &&
647                     pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
648                         return -ENOMEM;
649
650                 /* Voila! */
651                 *trailer = skb;
652                 return 1;
653         }
654
655         /* Misery. We are in troubles, going to mincer fragments... */
656
657         elt = 1;
658         skb_p = &skb_shinfo(skb)->frag_list;
659         copyflag = 0;
660
661         while ((skb1 = *skb_p) != NULL) {
662                 int ntail = 0;
663
664                 /* The fragment is partially pulled by someone,
665                  * this can happen on input. Copy it and everything
666                  * after it. */
667
668                 if (skb_shared(skb1))
669                         copyflag = 1;
670
671                 /* If the skb is the last, worry about trailer. */
672
673                 if (skb1->next == NULL && tailbits) {
674                         if (skb_shinfo(skb1)->nr_frags ||
675                             skb_shinfo(skb1)->frag_list ||
676                             skb_tailroom(skb1) < tailbits)
677                                 ntail = tailbits + 128;
678                 }
679
680                 if (copyflag ||
681                     skb_cloned(skb1) ||
682                     ntail ||
683                     skb_shinfo(skb1)->nr_frags ||
684                     skb_shinfo(skb1)->frag_list) {
685                         struct sk_buff *skb2;
686
687                         /* Fuck, we are miserable poor guys... */
688                         if (ntail == 0)
689                                 skb2 = skb_copy(skb1, GFP_ATOMIC);
690                         else
691                                 skb2 = skb_copy_expand(skb1,
692                                                        skb_headroom(skb1),
693                                                        ntail,
694                                                        GFP_ATOMIC);
695                         if (unlikely(skb2 == NULL))
696                                 return -ENOMEM;
697
698                         if (skb1->sk)
699                                 skb_set_owner_w(skb2, skb1->sk);
700
701                         /* Looking around. Are we still alive?
702                          * OK, link new skb, drop old one */
703
704                         skb2->next = skb1->next;
705                         *skb_p = skb2;
706                         kfree_skb(skb1);
707                         skb1 = skb2;
708                 }
709                 elt++;
710                 *trailer = skb1;
711                 skb_p = &skb1->next;
712         }
713
714         return elt;
715 }
716 EXPORT_SYMBOL_GPL(skb_cow_data);
717
718 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
719 {
720         if (tail != skb) {
721                 skb->data_len += len;
722                 skb->len += len;
723         }
724         return skb_put(tail, len);
725 }
726 EXPORT_SYMBOL_GPL(pskb_put);
727 #endif