[CRYPTO] padlock-sha: TFMs don't need to be static
[safe/jmp/linux-2.6] / drivers / crypto / padlock-sha.c
1 /*
2  * Cryptographic API.
3  *
4  * Support for VIA PadLock hardware crypto engine.
5  *
6  * Copyright (c) 2006  Michal Ludvig <michal@logix.cz>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  */
14
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/errno.h>
18 #include <linux/crypto.h>
19 #include <linux/cryptohash.h>
20 #include <linux/interrupt.h>
21 #include <linux/kernel.h>
22 #include <linux/scatterlist.h>
23 #include "padlock.h"
24
25 #define SHA1_DEFAULT_FALLBACK   "sha1-generic"
26 #define SHA1_DIGEST_SIZE        20
27 #define SHA1_HMAC_BLOCK_SIZE    64
28
29 #define SHA256_DEFAULT_FALLBACK "sha256-generic"
30 #define SHA256_DIGEST_SIZE      32
31 #define SHA256_HMAC_BLOCK_SIZE  64
32
33 static char *sha1_fallback = SHA1_DEFAULT_FALLBACK;
34 static char *sha256_fallback = SHA256_DEFAULT_FALLBACK;
35
36 module_param(sha1_fallback, charp, 0644);
37 module_param(sha256_fallback, charp, 0644);
38
39 MODULE_PARM_DESC(sha1_fallback, "Fallback driver for SHA1. Default is "
40                  SHA1_DEFAULT_FALLBACK);
41 MODULE_PARM_DESC(sha256_fallback, "Fallback driver for SHA256. Default is "
42                  SHA256_DEFAULT_FALLBACK);
43
44 struct padlock_sha_ctx {
45         char            *data;
46         size_t          used;
47         int             bypass;
48         void (*f_sha_padlock)(const char *in, char *out, int count);
49         struct crypto_tfm *fallback_tfm;
50 };
51
52 static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm)
53 {
54         return (struct padlock_sha_ctx *)(crypto_tfm_ctx(tfm));
55 }
56
57 /* We'll need aligned address on the stack */
58 #define NEAREST_ALIGNED(ptr) \
59         ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT))
60
61 static struct crypto_alg sha1_alg, sha256_alg;
62
63 static void padlock_sha_bypass(struct crypto_tfm *tfm)
64 {
65         if (ctx(tfm)->bypass)
66                 return;
67
68         BUG_ON(!ctx(tfm)->fallback_tfm);
69
70         crypto_digest_init(ctx(tfm)->fallback_tfm);
71         if (ctx(tfm)->data && ctx(tfm)->used) {
72                 struct scatterlist sg;
73
74                 sg_set_buf(&sg, ctx(tfm)->data, ctx(tfm)->used);
75                 crypto_digest_update(ctx(tfm)->fallback_tfm, &sg, 1);
76         }
77
78         ctx(tfm)->used = 0;
79         ctx(tfm)->bypass = 1;
80 }
81
82 static void padlock_sha_init(struct crypto_tfm *tfm)
83 {
84         ctx(tfm)->used = 0;
85         ctx(tfm)->bypass = 0;
86 }
87
88 static void padlock_sha_update(struct crypto_tfm *tfm,
89                         const uint8_t *data, unsigned int length)
90 {
91         /* Our buffer is always one page. */
92         if (unlikely(!ctx(tfm)->bypass &&
93                      (ctx(tfm)->used + length > PAGE_SIZE)))
94                 padlock_sha_bypass(tfm);
95
96         if (unlikely(ctx(tfm)->bypass)) {
97                 struct scatterlist sg;
98                 BUG_ON(!ctx(tfm)->fallback_tfm);
99                 sg_set_buf(&sg, (uint8_t *)data, length);
100                 crypto_digest_update(ctx(tfm)->fallback_tfm, &sg, 1);
101                 return;
102         }
103
104         memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length);
105         ctx(tfm)->used += length;
106 }
107
108 static inline void padlock_output_block(uint32_t *src,
109                         uint32_t *dst, size_t count)
110 {
111         while (count--)
112                 *dst++ = swab32(*src++);
113 }
114
115 static void padlock_do_sha1(const char *in, char *out, int count)
116 {
117         /* We can't store directly to *out as it may be unaligned. */
118         /* BTW Don't reduce the buffer size below 128 Bytes!
119          *     PadLock microcode needs it that big. */
120         char buf[128+16];
121         char *result = NEAREST_ALIGNED(buf);
122
123         ((uint32_t *)result)[0] = 0x67452301;
124         ((uint32_t *)result)[1] = 0xEFCDAB89;
125         ((uint32_t *)result)[2] = 0x98BADCFE;
126         ((uint32_t *)result)[3] = 0x10325476;
127         ((uint32_t *)result)[4] = 0xC3D2E1F0;
128  
129         asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
130                       : "+S"(in), "+D"(result)
131                       : "c"(count), "a"(0));
132
133         padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
134 }
135
136 static void padlock_do_sha256(const char *in, char *out, int count)
137 {
138         /* We can't store directly to *out as it may be unaligned. */
139         /* BTW Don't reduce the buffer size below 128 Bytes!
140          *     PadLock microcode needs it that big. */
141         char buf[128+16];
142         char *result = NEAREST_ALIGNED(buf);
143
144         ((uint32_t *)result)[0] = 0x6A09E667;
145         ((uint32_t *)result)[1] = 0xBB67AE85;
146         ((uint32_t *)result)[2] = 0x3C6EF372;
147         ((uint32_t *)result)[3] = 0xA54FF53A;
148         ((uint32_t *)result)[4] = 0x510E527F;
149         ((uint32_t *)result)[5] = 0x9B05688C;
150         ((uint32_t *)result)[6] = 0x1F83D9AB;
151         ((uint32_t *)result)[7] = 0x5BE0CD19;
152
153         asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
154                       : "+S"(in), "+D"(result)
155                       : "c"(count), "a"(0));
156
157         padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
158 }
159
160 static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out)
161 {
162         if (unlikely(ctx(tfm)->bypass)) {
163                 BUG_ON(!ctx(tfm)->fallback_tfm);
164                 crypto_digest_final(ctx(tfm)->fallback_tfm, out);
165                 ctx(tfm)->bypass = 0;
166                 return;
167         }
168
169         /* Pass the input buffer to PadLock microcode... */
170         ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used);
171
172         ctx(tfm)->used = 0;
173 }
174
175 static int padlock_cra_init(struct crypto_tfm *tfm, const char *fallback_driver_name)
176 {
177         /* For now we'll allocate one page. This
178          * could eventually be configurable one day. */
179         ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL);
180         if (!ctx(tfm)->data)
181                 return -ENOMEM;
182
183         /* Allocate a fallback and abort if it failed. */
184         ctx(tfm)->fallback_tfm = crypto_alloc_tfm(fallback_driver_name, 0);
185         if (!ctx(tfm)->fallback_tfm) {
186                 printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
187                        fallback_driver_name);
188                 free_page((unsigned long)(ctx(tfm)->data));
189                 return -ENOENT;
190         }
191
192         return 0;
193 }
194
195 static int padlock_sha1_cra_init(struct crypto_tfm *tfm)
196 {
197         ctx(tfm)->f_sha_padlock = padlock_do_sha1;
198
199         return padlock_cra_init(tfm, sha1_fallback);
200 }
201
202 static int padlock_sha256_cra_init(struct crypto_tfm *tfm)
203 {
204         ctx(tfm)->f_sha_padlock = padlock_do_sha256;
205
206         return padlock_cra_init(tfm, sha256_fallback);
207 }
208
209 static void padlock_cra_exit(struct crypto_tfm *tfm)
210 {
211         if (ctx(tfm)->data) {
212                 free_page((unsigned long)(ctx(tfm)->data));
213                 ctx(tfm)->data = NULL;
214         }
215
216         BUG_ON(!ctx(tfm)->fallback_tfm);
217         crypto_free_tfm(ctx(tfm)->fallback_tfm);
218         ctx(tfm)->fallback_tfm = NULL;
219 }
220
221 static struct crypto_alg sha1_alg = {
222         .cra_name               =       "sha1",
223         .cra_driver_name        =       "sha1-padlock",
224         .cra_priority           =       PADLOCK_CRA_PRIORITY,
225         .cra_flags              =       CRYPTO_ALG_TYPE_DIGEST,
226         .cra_blocksize          =       SHA1_HMAC_BLOCK_SIZE,
227         .cra_ctxsize            =       sizeof(struct padlock_sha_ctx),
228         .cra_module             =       THIS_MODULE,
229         .cra_list               =       LIST_HEAD_INIT(sha1_alg.cra_list),
230         .cra_init               =       padlock_sha1_cra_init,
231         .cra_exit               =       padlock_cra_exit,
232         .cra_u                  =       {
233                 .digest = {
234                         .dia_digestsize =       SHA1_DIGEST_SIZE,
235                         .dia_init       =       padlock_sha_init,
236                         .dia_update     =       padlock_sha_update,
237                         .dia_final      =       padlock_sha_final,
238                 }
239         }
240 };
241
242 static struct crypto_alg sha256_alg = {
243         .cra_name               =       "sha256",
244         .cra_driver_name        =       "sha256-padlock",
245         .cra_priority           =       PADLOCK_CRA_PRIORITY,
246         .cra_flags              =       CRYPTO_ALG_TYPE_DIGEST,
247         .cra_blocksize          =       SHA256_HMAC_BLOCK_SIZE,
248         .cra_ctxsize            =       sizeof(struct padlock_sha_ctx),
249         .cra_module             =       THIS_MODULE,
250         .cra_list               =       LIST_HEAD_INIT(sha256_alg.cra_list),
251         .cra_init               =       padlock_sha256_cra_init,
252         .cra_exit               =       padlock_cra_exit,
253         .cra_u                  =       {
254                 .digest = {
255                         .dia_digestsize =       SHA256_DIGEST_SIZE,
256                         .dia_init       =       padlock_sha_init,
257                         .dia_update     =       padlock_sha_update,
258                         .dia_final      =       padlock_sha_final,
259                 }
260         }
261 };
262
263 static void __init padlock_sha_check_fallbacks(void)
264 {
265         struct crypto_tfm *tfm;
266
267         /* We'll try to allocate one TFM for each fallback
268          * to test that the modules are available. */
269         tfm = crypto_alloc_tfm(sha1_fallback, 0);
270         if (!tfm) {
271                 printk(KERN_WARNING PFX "Couldn't load fallback module for '%s'. Tried '%s'.\n",
272                        sha1_alg.cra_name, sha1_fallback);
273         } else {
274                 printk(KERN_NOTICE PFX "Fallback for '%s' is driver '%s' (prio=%d)\n", sha1_alg.cra_name,
275                        crypto_tfm_alg_driver_name(tfm), crypto_tfm_alg_priority(tfm));
276                 crypto_free_tfm(tfm);
277         }
278
279         tfm = crypto_alloc_tfm(sha256_fallback, 0);
280         if (!tfm) {
281                 printk(KERN_WARNING PFX "Couldn't load fallback module for '%s'. Tried '%s'.\n",
282                        sha256_alg.cra_name, sha256_fallback);
283         } else {
284                 printk(KERN_NOTICE PFX "Fallback for '%s' is driver '%s' (prio=%d)\n", sha256_alg.cra_name,
285                        crypto_tfm_alg_driver_name(tfm), crypto_tfm_alg_priority(tfm));
286                 crypto_free_tfm(tfm);
287         }
288 }
289
290 static int __init padlock_init(void)
291 {
292         int rc = -ENODEV;
293
294         if (!cpu_has_phe) {
295                 printk(KERN_ERR PFX "VIA PadLock Hash Engine not detected.\n");
296                 return -ENODEV;
297         }
298
299         if (!cpu_has_phe_enabled) {
300                 printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
301                 return -ENODEV;
302         }
303
304         padlock_sha_check_fallbacks();
305
306         rc = crypto_register_alg(&sha1_alg);
307         if (rc)
308                 goto out;
309
310         rc = crypto_register_alg(&sha256_alg);
311         if (rc)
312                 goto out_unreg1;
313
314         printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
315
316         return 0;
317
318 out_unreg1:
319         crypto_unregister_alg(&sha1_alg);
320 out:
321         printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
322         return rc;
323 }
324
325 static void __exit padlock_fini(void)
326 {
327         crypto_unregister_alg(&sha1_alg);
328         crypto_unregister_alg(&sha256_alg);
329 }
330
331 module_init(padlock_init);
332 module_exit(padlock_fini);
333
334 MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
335 MODULE_LICENSE("GPL");
336 MODULE_AUTHOR("Michal Ludvig");
337
338 MODULE_ALIAS("sha1-padlock");
339 MODULE_ALIAS("sha256-padlock");