crypto: talitos - Preempt overflow interrupts off-by-one fix
[safe/jmp/linux-2.6] / drivers / crypto / talitos.c
1 /*
2  * talitos - Freescale Integrated Security Engine (SEC) device driver
3  *
4  * Copyright (c) 2008 Freescale Semiconductor, Inc.
5  *
6  * Scatterlist Crypto API glue code copied from files with the following:
7  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * Crypto algorithm registration code copied from hifn driver:
10  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11  * All rights reserved.
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26  */
27
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_platform.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/io.h>
38 #include <linux/spinlock.h>
39 #include <linux/rtnetlink.h>
40
41 #include <crypto/algapi.h>
42 #include <crypto/aes.h>
43 #include <crypto/des.h>
44 #include <crypto/sha.h>
45 #include <crypto/aead.h>
46 #include <crypto/authenc.h>
47
48 #include "talitos.h"
49
50 #define TALITOS_TIMEOUT 100000
51 #define TALITOS_MAX_DATA_LEN 65535
52
53 #define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f)
54 #define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf)
55 #define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf)
56
57 /* descriptor pointer entry */
58 struct talitos_ptr {
59         __be16 len;     /* length */
60         u8 j_extent;    /* jump to sg link table and/or extent */
61         u8 eptr;        /* extended address */
62         __be32 ptr;     /* address */
63 };
64
65 /* descriptor */
66 struct talitos_desc {
67         __be32 hdr;                     /* header high bits */
68         __be32 hdr_lo;                  /* header low bits */
69         struct talitos_ptr ptr[7];      /* ptr/len pair array */
70 };
71
72 /**
73  * talitos_request - descriptor submission request
74  * @desc: descriptor pointer (kernel virtual)
75  * @dma_desc: descriptor's physical bus address
76  * @callback: whom to call when descriptor processing is done
77  * @context: caller context (optional)
78  */
79 struct talitos_request {
80         struct talitos_desc *desc;
81         dma_addr_t dma_desc;
82         void (*callback) (struct device *dev, struct talitos_desc *desc,
83                           void *context, int error);
84         void *context;
85 };
86
87 struct talitos_private {
88         struct device *dev;
89         struct of_device *ofdev;
90         void __iomem *reg;
91         int irq;
92
93         /* SEC version geometry (from device tree node) */
94         unsigned int num_channels;
95         unsigned int chfifo_len;
96         unsigned int exec_units;
97         unsigned int desc_types;
98
99         /* SEC Compatibility info */
100         unsigned long features;
101
102         /* next channel to be assigned next incoming descriptor */
103         atomic_t last_chan;
104
105         /* per-channel number of requests pending in channel h/w fifo */
106         atomic_t *submit_count;
107
108         /* per-channel request fifo */
109         struct talitos_request **fifo;
110
111         /*
112          * length of the request fifo
113          * fifo_len is chfifo_len rounded up to next power of 2
114          * so we can use bitwise ops to wrap
115          */
116         unsigned int fifo_len;
117
118         /* per-channel index to next free descriptor request */
119         int *head;
120
121         /* per-channel index to next in-progress/done descriptor request */
122         int *tail;
123
124         /* per-channel request submission (head) and release (tail) locks */
125         spinlock_t *head_lock;
126         spinlock_t *tail_lock;
127
128         /* request callback tasklet */
129         struct tasklet_struct done_task;
130
131         /* list of registered algorithms */
132         struct list_head alg_list;
133
134         /* hwrng device */
135         struct hwrng rng;
136 };
137
138 /* .features flag */
139 #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
140 #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
141
142 /*
143  * map virtual single (contiguous) pointer to h/w descriptor pointer
144  */
145 static void map_single_talitos_ptr(struct device *dev,
146                                    struct talitos_ptr *talitos_ptr,
147                                    unsigned short len, void *data,
148                                    unsigned char extent,
149                                    enum dma_data_direction dir)
150 {
151         talitos_ptr->len = cpu_to_be16(len);
152         talitos_ptr->ptr = cpu_to_be32(dma_map_single(dev, data, len, dir));
153         talitos_ptr->j_extent = extent;
154 }
155
156 /*
157  * unmap bus single (contiguous) h/w descriptor pointer
158  */
159 static void unmap_single_talitos_ptr(struct device *dev,
160                                      struct talitos_ptr *talitos_ptr,
161                                      enum dma_data_direction dir)
162 {
163         dma_unmap_single(dev, be32_to_cpu(talitos_ptr->ptr),
164                          be16_to_cpu(talitos_ptr->len), dir);
165 }
166
167 static int reset_channel(struct device *dev, int ch)
168 {
169         struct talitos_private *priv = dev_get_drvdata(dev);
170         unsigned int timeout = TALITOS_TIMEOUT;
171
172         setbits32(priv->reg + TALITOS_CCCR(ch), TALITOS_CCCR_RESET);
173
174         while ((in_be32(priv->reg + TALITOS_CCCR(ch)) & TALITOS_CCCR_RESET)
175                && --timeout)
176                 cpu_relax();
177
178         if (timeout == 0) {
179                 dev_err(dev, "failed to reset channel %d\n", ch);
180                 return -EIO;
181         }
182
183         /* set done writeback and IRQ */
184         setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE |
185                   TALITOS_CCCR_LO_CDIE);
186
187         /* and ICCR writeback, if available */
188         if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
189                 setbits32(priv->reg + TALITOS_CCCR_LO(ch),
190                           TALITOS_CCCR_LO_IWSE);
191
192         return 0;
193 }
194
195 static int reset_device(struct device *dev)
196 {
197         struct talitos_private *priv = dev_get_drvdata(dev);
198         unsigned int timeout = TALITOS_TIMEOUT;
199
200         setbits32(priv->reg + TALITOS_MCR, TALITOS_MCR_SWR);
201
202         while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
203                && --timeout)
204                 cpu_relax();
205
206         if (timeout == 0) {
207                 dev_err(dev, "failed to reset device\n");
208                 return -EIO;
209         }
210
211         return 0;
212 }
213
214 /*
215  * Reset and initialize the device
216  */
217 static int init_device(struct device *dev)
218 {
219         struct talitos_private *priv = dev_get_drvdata(dev);
220         int ch, err;
221
222         /*
223          * Master reset
224          * errata documentation: warning: certain SEC interrupts
225          * are not fully cleared by writing the MCR:SWR bit,
226          * set bit twice to completely reset
227          */
228         err = reset_device(dev);
229         if (err)
230                 return err;
231
232         err = reset_device(dev);
233         if (err)
234                 return err;
235
236         /* reset channels */
237         for (ch = 0; ch < priv->num_channels; ch++) {
238                 err = reset_channel(dev, ch);
239                 if (err)
240                         return err;
241         }
242
243         /* enable channel done and error interrupts */
244         setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
245         setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
246
247         /* disable integrity check error interrupts (use writeback instead) */
248         if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
249                 setbits32(priv->reg + TALITOS_MDEUICR_LO,
250                           TALITOS_MDEUICR_LO_ICE);
251
252         return 0;
253 }
254
255 /**
256  * talitos_submit - submits a descriptor to the device for processing
257  * @dev:        the SEC device to be used
258  * @desc:       the descriptor to be processed by the device
259  * @callback:   whom to call when processing is complete
260  * @context:    a handle for use by caller (optional)
261  *
262  * desc must contain valid dma-mapped (bus physical) address pointers.
263  * callback must check err and feedback in descriptor header
264  * for device processing status.
265  */
266 static int talitos_submit(struct device *dev, struct talitos_desc *desc,
267                           void (*callback)(struct device *dev,
268                                            struct talitos_desc *desc,
269                                            void *context, int error),
270                           void *context)
271 {
272         struct talitos_private *priv = dev_get_drvdata(dev);
273         struct talitos_request *request;
274         unsigned long flags, ch;
275         int head;
276
277         /* select done notification */
278         desc->hdr |= DESC_HDR_DONE_NOTIFY;
279
280         /* emulate SEC's round-robin channel fifo polling scheme */
281         ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1);
282
283         spin_lock_irqsave(&priv->head_lock[ch], flags);
284
285         if (!atomic_inc_not_zero(&priv->submit_count[ch])) {
286                 /* h/w fifo is full */
287                 spin_unlock_irqrestore(&priv->head_lock[ch], flags);
288                 return -EAGAIN;
289         }
290
291         head = priv->head[ch];
292         request = &priv->fifo[ch][head];
293
294         /* map descriptor and save caller data */
295         request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
296                                            DMA_BIDIRECTIONAL);
297         request->callback = callback;
298         request->context = context;
299
300         /* increment fifo head */
301         priv->head[ch] = (priv->head[ch] + 1) & (priv->fifo_len - 1);
302
303         smp_wmb();
304         request->desc = desc;
305
306         /* GO! */
307         wmb();
308         out_be32(priv->reg + TALITOS_FF_LO(ch), request->dma_desc);
309
310         spin_unlock_irqrestore(&priv->head_lock[ch], flags);
311
312         return -EINPROGRESS;
313 }
314
315 /*
316  * process what was done, notify callback of error if not
317  */
318 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
319 {
320         struct talitos_private *priv = dev_get_drvdata(dev);
321         struct talitos_request *request, saved_req;
322         unsigned long flags;
323         int tail, status;
324
325         spin_lock_irqsave(&priv->tail_lock[ch], flags);
326
327         tail = priv->tail[ch];
328         while (priv->fifo[ch][tail].desc) {
329                 request = &priv->fifo[ch][tail];
330
331                 /* descriptors with their done bits set don't get the error */
332                 rmb();
333                 if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE) {
334                         status = 0;
335                         /* Ack each pkt completed on channel */
336                         out_be32(priv->reg + TALITOS_ICR, (1 << (ch * 2)));
337                 } else
338                         if (!error)
339                                 break;
340                         else
341                                 status = error;
342
343                 dma_unmap_single(dev, request->dma_desc,
344                         sizeof(struct talitos_desc), DMA_BIDIRECTIONAL);
345
346                 /* copy entries so we can call callback outside lock */
347                 saved_req.desc = request->desc;
348                 saved_req.callback = request->callback;
349                 saved_req.context = request->context;
350
351                 /* release request entry in fifo */
352                 smp_wmb();
353                 request->desc = NULL;
354
355                 /* increment fifo tail */
356                 priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1);
357
358                 spin_unlock_irqrestore(&priv->tail_lock[ch], flags);
359
360                 atomic_dec(&priv->submit_count[ch]);
361
362                 saved_req.callback(dev, saved_req.desc, saved_req.context,
363                                    status);
364                 /* channel may resume processing in single desc error case */
365                 if (error && !reset_ch && status == error)
366                         return;
367                 spin_lock_irqsave(&priv->tail_lock[ch], flags);
368                 tail = priv->tail[ch];
369         }
370
371         spin_unlock_irqrestore(&priv->tail_lock[ch], flags);
372 }
373
374 /*
375  * process completed requests for channels that have done status
376  */
377 static void talitos_done(unsigned long data)
378 {
379         struct device *dev = (struct device *)data;
380         struct talitos_private *priv = dev_get_drvdata(dev);
381         int ch;
382
383         for (ch = 0; ch < priv->num_channels; ch++)
384                 flush_channel(dev, ch, 0, 0);
385
386         /* At this point, all completed channels have been processed.
387          * Unmask done interrupts for channels completed later on.
388          */
389         setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
390         setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
391 }
392
393 /*
394  * locate current (offending) descriptor
395  */
396 static struct talitos_desc *current_desc(struct device *dev, int ch)
397 {
398         struct talitos_private *priv = dev_get_drvdata(dev);
399         int tail = priv->tail[ch];
400         dma_addr_t cur_desc;
401
402         cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch));
403
404         while (priv->fifo[ch][tail].dma_desc != cur_desc) {
405                 tail = (tail + 1) & (priv->fifo_len - 1);
406                 if (tail == priv->tail[ch]) {
407                         dev_err(dev, "couldn't locate current descriptor\n");
408                         return NULL;
409                 }
410         }
411
412         return priv->fifo[ch][tail].desc;
413 }
414
415 /*
416  * user diagnostics; report root cause of error based on execution unit status
417  */
418 static void report_eu_error(struct device *dev, int ch, struct talitos_desc *desc)
419 {
420         struct talitos_private *priv = dev_get_drvdata(dev);
421         int i;
422
423         switch (desc->hdr & DESC_HDR_SEL0_MASK) {
424         case DESC_HDR_SEL0_AFEU:
425                 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
426                         in_be32(priv->reg + TALITOS_AFEUISR),
427                         in_be32(priv->reg + TALITOS_AFEUISR_LO));
428                 break;
429         case DESC_HDR_SEL0_DEU:
430                 dev_err(dev, "DEUISR 0x%08x_%08x\n",
431                         in_be32(priv->reg + TALITOS_DEUISR),
432                         in_be32(priv->reg + TALITOS_DEUISR_LO));
433                 break;
434         case DESC_HDR_SEL0_MDEUA:
435         case DESC_HDR_SEL0_MDEUB:
436                 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
437                         in_be32(priv->reg + TALITOS_MDEUISR),
438                         in_be32(priv->reg + TALITOS_MDEUISR_LO));
439                 break;
440         case DESC_HDR_SEL0_RNG:
441                 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
442                         in_be32(priv->reg + TALITOS_RNGUISR),
443                         in_be32(priv->reg + TALITOS_RNGUISR_LO));
444                 break;
445         case DESC_HDR_SEL0_PKEU:
446                 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
447                         in_be32(priv->reg + TALITOS_PKEUISR),
448                         in_be32(priv->reg + TALITOS_PKEUISR_LO));
449                 break;
450         case DESC_HDR_SEL0_AESU:
451                 dev_err(dev, "AESUISR 0x%08x_%08x\n",
452                         in_be32(priv->reg + TALITOS_AESUISR),
453                         in_be32(priv->reg + TALITOS_AESUISR_LO));
454                 break;
455         case DESC_HDR_SEL0_CRCU:
456                 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
457                         in_be32(priv->reg + TALITOS_CRCUISR),
458                         in_be32(priv->reg + TALITOS_CRCUISR_LO));
459                 break;
460         case DESC_HDR_SEL0_KEU:
461                 dev_err(dev, "KEUISR 0x%08x_%08x\n",
462                         in_be32(priv->reg + TALITOS_KEUISR),
463                         in_be32(priv->reg + TALITOS_KEUISR_LO));
464                 break;
465         }
466
467         switch (desc->hdr & DESC_HDR_SEL1_MASK) {
468         case DESC_HDR_SEL1_MDEUA:
469         case DESC_HDR_SEL1_MDEUB:
470                 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
471                         in_be32(priv->reg + TALITOS_MDEUISR),
472                         in_be32(priv->reg + TALITOS_MDEUISR_LO));
473                 break;
474         case DESC_HDR_SEL1_CRCU:
475                 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
476                         in_be32(priv->reg + TALITOS_CRCUISR),
477                         in_be32(priv->reg + TALITOS_CRCUISR_LO));
478                 break;
479         }
480
481         for (i = 0; i < 8; i++)
482                 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
483                         in_be32(priv->reg + TALITOS_DESCBUF(ch) + 8*i),
484                         in_be32(priv->reg + TALITOS_DESCBUF_LO(ch) + 8*i));
485 }
486
487 /*
488  * recover from error interrupts
489  */
490 static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
491 {
492         struct device *dev = (struct device *)data;
493         struct talitos_private *priv = dev_get_drvdata(dev);
494         unsigned int timeout = TALITOS_TIMEOUT;
495         int ch, error, reset_dev = 0, reset_ch = 0;
496         u32 v, v_lo;
497
498         for (ch = 0; ch < priv->num_channels; ch++) {
499                 /* skip channels without errors */
500                 if (!(isr & (1 << (ch * 2 + 1))))
501                         continue;
502
503                 error = -EINVAL;
504
505                 v = in_be32(priv->reg + TALITOS_CCPSR(ch));
506                 v_lo = in_be32(priv->reg + TALITOS_CCPSR_LO(ch));
507
508                 if (v_lo & TALITOS_CCPSR_LO_DOF) {
509                         dev_err(dev, "double fetch fifo overflow error\n");
510                         error = -EAGAIN;
511                         reset_ch = 1;
512                 }
513                 if (v_lo & TALITOS_CCPSR_LO_SOF) {
514                         /* h/w dropped descriptor */
515                         dev_err(dev, "single fetch fifo overflow error\n");
516                         error = -EAGAIN;
517                 }
518                 if (v_lo & TALITOS_CCPSR_LO_MDTE)
519                         dev_err(dev, "master data transfer error\n");
520                 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
521                         dev_err(dev, "s/g data length zero error\n");
522                 if (v_lo & TALITOS_CCPSR_LO_FPZ)
523                         dev_err(dev, "fetch pointer zero error\n");
524                 if (v_lo & TALITOS_CCPSR_LO_IDH)
525                         dev_err(dev, "illegal descriptor header error\n");
526                 if (v_lo & TALITOS_CCPSR_LO_IEU)
527                         dev_err(dev, "invalid execution unit error\n");
528                 if (v_lo & TALITOS_CCPSR_LO_EU)
529                         report_eu_error(dev, ch, current_desc(dev, ch));
530                 if (v_lo & TALITOS_CCPSR_LO_GB)
531                         dev_err(dev, "gather boundary error\n");
532                 if (v_lo & TALITOS_CCPSR_LO_GRL)
533                         dev_err(dev, "gather return/length error\n");
534                 if (v_lo & TALITOS_CCPSR_LO_SB)
535                         dev_err(dev, "scatter boundary error\n");
536                 if (v_lo & TALITOS_CCPSR_LO_SRL)
537                         dev_err(dev, "scatter return/length error\n");
538
539                 flush_channel(dev, ch, error, reset_ch);
540
541                 if (reset_ch) {
542                         reset_channel(dev, ch);
543                 } else {
544                         setbits32(priv->reg + TALITOS_CCCR(ch),
545                                   TALITOS_CCCR_CONT);
546                         setbits32(priv->reg + TALITOS_CCCR_LO(ch), 0);
547                         while ((in_be32(priv->reg + TALITOS_CCCR(ch)) &
548                                TALITOS_CCCR_CONT) && --timeout)
549                                 cpu_relax();
550                         if (timeout == 0) {
551                                 dev_err(dev, "failed to restart channel %d\n",
552                                         ch);
553                                 reset_dev = 1;
554                         }
555                 }
556         }
557         if (reset_dev || isr & ~TALITOS_ISR_CHERR || isr_lo) {
558                 dev_err(dev, "done overflow, internal time out, or rngu error: "
559                         "ISR 0x%08x_%08x\n", isr, isr_lo);
560
561                 /* purge request queues */
562                 for (ch = 0; ch < priv->num_channels; ch++)
563                         flush_channel(dev, ch, -EIO, 1);
564
565                 /* reset and reinitialize the device */
566                 init_device(dev);
567         }
568 }
569
570 static irqreturn_t talitos_interrupt(int irq, void *data)
571 {
572         struct device *dev = data;
573         struct talitos_private *priv = dev_get_drvdata(dev);
574         u32 isr, isr_lo;
575
576         isr = in_be32(priv->reg + TALITOS_ISR);
577         isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);
578
579         if (unlikely((isr & ~TALITOS_ISR_CHDONE) || isr_lo)) {
580                 /*
581                  * Acknowledge error interrupts here.
582                  * Done interrupts are ack'ed as part of done_task.
583                  */
584                 out_be32(priv->reg + TALITOS_ICR, isr);
585                 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);
586
587                 talitos_error((unsigned long)data, isr, isr_lo);
588         } else
589                 if (likely(isr & TALITOS_ISR_CHDONE)) {
590                         /* mask further done interrupts. */
591                         clrbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_DONE);
592                         /* done_task will unmask done interrupts at exit */
593                         tasklet_schedule(&priv->done_task);
594                 }
595
596         return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE;
597 }
598
599 /*
600  * hwrng
601  */
602 static int talitos_rng_data_present(struct hwrng *rng, int wait)
603 {
604         struct device *dev = (struct device *)rng->priv;
605         struct talitos_private *priv = dev_get_drvdata(dev);
606         u32 ofl;
607         int i;
608
609         for (i = 0; i < 20; i++) {
610                 ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) &
611                       TALITOS_RNGUSR_LO_OFL;
612                 if (ofl || !wait)
613                         break;
614                 udelay(10);
615         }
616
617         return !!ofl;
618 }
619
620 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
621 {
622         struct device *dev = (struct device *)rng->priv;
623         struct talitos_private *priv = dev_get_drvdata(dev);
624
625         /* rng fifo requires 64-bit accesses */
626         *data = in_be32(priv->reg + TALITOS_RNGU_FIFO);
627         *data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO);
628
629         return sizeof(u32);
630 }
631
632 static int talitos_rng_init(struct hwrng *rng)
633 {
634         struct device *dev = (struct device *)rng->priv;
635         struct talitos_private *priv = dev_get_drvdata(dev);
636         unsigned int timeout = TALITOS_TIMEOUT;
637
638         setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR);
639         while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD)
640                && --timeout)
641                 cpu_relax();
642         if (timeout == 0) {
643                 dev_err(dev, "failed to reset rng hw\n");
644                 return -ENODEV;
645         }
646
647         /* start generating */
648         setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0);
649
650         return 0;
651 }
652
653 static int talitos_register_rng(struct device *dev)
654 {
655         struct talitos_private *priv = dev_get_drvdata(dev);
656
657         priv->rng.name          = dev_driver_string(dev),
658         priv->rng.init          = talitos_rng_init,
659         priv->rng.data_present  = talitos_rng_data_present,
660         priv->rng.data_read     = talitos_rng_data_read,
661         priv->rng.priv          = (unsigned long)dev;
662
663         return hwrng_register(&priv->rng);
664 }
665
666 static void talitos_unregister_rng(struct device *dev)
667 {
668         struct talitos_private *priv = dev_get_drvdata(dev);
669
670         hwrng_unregister(&priv->rng);
671 }
672
673 /*
674  * crypto alg
675  */
676 #define TALITOS_CRA_PRIORITY            3000
677 #define TALITOS_MAX_KEY_SIZE            64
678 #define TALITOS_MAX_IV_LENGTH           16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
679
680 #define MD5_DIGEST_SIZE   16
681
682 struct talitos_ctx {
683         struct device *dev;
684         __be32 desc_hdr_template;
685         u8 key[TALITOS_MAX_KEY_SIZE];
686         u8 iv[TALITOS_MAX_IV_LENGTH];
687         unsigned int keylen;
688         unsigned int enckeylen;
689         unsigned int authkeylen;
690         unsigned int authsize;
691 };
692
693 static int aead_authenc_setauthsize(struct crypto_aead *authenc,
694                                                  unsigned int authsize)
695 {
696         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
697
698         ctx->authsize = authsize;
699
700         return 0;
701 }
702
703 static int aead_authenc_setkey(struct crypto_aead *authenc,
704                                             const u8 *key, unsigned int keylen)
705 {
706         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
707         struct rtattr *rta = (void *)key;
708         struct crypto_authenc_key_param *param;
709         unsigned int authkeylen;
710         unsigned int enckeylen;
711
712         if (!RTA_OK(rta, keylen))
713                 goto badkey;
714
715         if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
716                 goto badkey;
717
718         if (RTA_PAYLOAD(rta) < sizeof(*param))
719                 goto badkey;
720
721         param = RTA_DATA(rta);
722         enckeylen = be32_to_cpu(param->enckeylen);
723
724         key += RTA_ALIGN(rta->rta_len);
725         keylen -= RTA_ALIGN(rta->rta_len);
726
727         if (keylen < enckeylen)
728                 goto badkey;
729
730         authkeylen = keylen - enckeylen;
731
732         if (keylen > TALITOS_MAX_KEY_SIZE)
733                 goto badkey;
734
735         memcpy(&ctx->key, key, keylen);
736
737         ctx->keylen = keylen;
738         ctx->enckeylen = enckeylen;
739         ctx->authkeylen = authkeylen;
740
741         return 0;
742
743 badkey:
744         crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
745         return -EINVAL;
746 }
747
748 /*
749  * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor
750  * @src_nents: number of segments in input scatterlist
751  * @dst_nents: number of segments in output scatterlist
752  * @dma_len: length of dma mapped link_tbl space
753  * @dma_link_tbl: bus physical address of link_tbl
754  * @desc: h/w descriptor
755  * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
756  *
757  * if decrypting (with authcheck), or either one of src_nents or dst_nents
758  * is greater than 1, an integrity check value is concatenated to the end
759  * of link_tbl data
760  */
761 struct ipsec_esp_edesc {
762         int src_nents;
763         int dst_nents;
764         int dma_len;
765         dma_addr_t dma_link_tbl;
766         struct talitos_desc desc;
767         struct talitos_ptr link_tbl[0];
768 };
769
770 static void ipsec_esp_unmap(struct device *dev,
771                             struct ipsec_esp_edesc *edesc,
772                             struct aead_request *areq)
773 {
774         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
775         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
776         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
777         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
778
779         dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE);
780
781         if (areq->src != areq->dst) {
782                 dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1,
783                              DMA_TO_DEVICE);
784                 dma_unmap_sg(dev, areq->dst, edesc->dst_nents ? : 1,
785                              DMA_FROM_DEVICE);
786         } else {
787                 dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1,
788                              DMA_BIDIRECTIONAL);
789         }
790
791         if (edesc->dma_len)
792                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
793                                  DMA_BIDIRECTIONAL);
794 }
795
796 /*
797  * ipsec_esp descriptor callbacks
798  */
799 static void ipsec_esp_encrypt_done(struct device *dev,
800                                    struct talitos_desc *desc, void *context,
801                                    int err)
802 {
803         struct aead_request *areq = context;
804         struct ipsec_esp_edesc *edesc =
805                  container_of(desc, struct ipsec_esp_edesc, desc);
806         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
807         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
808         struct scatterlist *sg;
809         void *icvdata;
810
811         ipsec_esp_unmap(dev, edesc, areq);
812
813         /* copy the generated ICV to dst */
814         if (edesc->dma_len) {
815                 icvdata = &edesc->link_tbl[edesc->src_nents +
816                                            edesc->dst_nents + 2];
817                 sg = sg_last(areq->dst, edesc->dst_nents);
818                 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
819                        icvdata, ctx->authsize);
820         }
821
822         kfree(edesc);
823
824         aead_request_complete(areq, err);
825 }
826
827 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
828                                    struct talitos_desc *desc, void *context,
829                                    int err)
830 {
831         struct aead_request *req = context;
832         struct ipsec_esp_edesc *edesc =
833                  container_of(desc, struct ipsec_esp_edesc, desc);
834         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
835         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
836         struct scatterlist *sg;
837         void *icvdata;
838
839         ipsec_esp_unmap(dev, edesc, req);
840
841         if (!err) {
842                 /* auth check */
843                 if (edesc->dma_len)
844                         icvdata = &edesc->link_tbl[edesc->src_nents +
845                                                    edesc->dst_nents + 2];
846                 else
847                         icvdata = &edesc->link_tbl[0];
848
849                 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
850                 err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
851                              ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
852         }
853
854         kfree(edesc);
855
856         aead_request_complete(req, err);
857 }
858
859 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
860                                    struct talitos_desc *desc, void *context,
861                                    int err)
862 {
863         struct aead_request *req = context;
864         struct ipsec_esp_edesc *edesc =
865                  container_of(desc, struct ipsec_esp_edesc, desc);
866
867         ipsec_esp_unmap(dev, edesc, req);
868
869         /* check ICV auth status */
870         if (!err)
871                 if ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
872                     DESC_HDR_LO_ICCR1_PASS)
873                         err = -EBADMSG;
874
875         kfree(edesc);
876
877         aead_request_complete(req, err);
878 }
879
880 /*
881  * convert scatterlist to SEC h/w link table format
882  * stop at cryptlen bytes
883  */
884 static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
885                            int cryptlen, struct talitos_ptr *link_tbl_ptr)
886 {
887         int n_sg = sg_count;
888
889         while (n_sg--) {
890                 link_tbl_ptr->ptr = cpu_to_be32(sg_dma_address(sg));
891                 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
892                 link_tbl_ptr->j_extent = 0;
893                 link_tbl_ptr++;
894                 cryptlen -= sg_dma_len(sg);
895                 sg = sg_next(sg);
896         }
897
898         /* adjust (decrease) last one (or two) entry's len to cryptlen */
899         link_tbl_ptr--;
900         while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
901                 /* Empty this entry, and move to previous one */
902                 cryptlen += be16_to_cpu(link_tbl_ptr->len);
903                 link_tbl_ptr->len = 0;
904                 sg_count--;
905                 link_tbl_ptr--;
906         }
907         link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
908                                         + cryptlen);
909
910         /* tag end of link table */
911         link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
912
913         return sg_count;
914 }
915
916 /*
917  * fill in and submit ipsec_esp descriptor
918  */
919 static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
920                      u8 *giv, u64 seq,
921                      void (*callback) (struct device *dev,
922                                        struct talitos_desc *desc,
923                                        void *context, int error))
924 {
925         struct crypto_aead *aead = crypto_aead_reqtfm(areq);
926         struct talitos_ctx *ctx = crypto_aead_ctx(aead);
927         struct device *dev = ctx->dev;
928         struct talitos_desc *desc = &edesc->desc;
929         unsigned int cryptlen = areq->cryptlen;
930         unsigned int authsize = ctx->authsize;
931         unsigned int ivsize;
932         int sg_count, ret;
933         int sg_link_tbl_len;
934
935         /* hmac key */
936         map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
937                                0, DMA_TO_DEVICE);
938         /* hmac data */
939         map_single_talitos_ptr(dev, &desc->ptr[1], sg_virt(areq->src) -
940                                sg_virt(areq->assoc), sg_virt(areq->assoc), 0,
941                                DMA_TO_DEVICE);
942         /* cipher iv */
943         ivsize = crypto_aead_ivsize(aead);
944         map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
945                                DMA_TO_DEVICE);
946
947         /* cipher key */
948         map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
949                                (char *)&ctx->key + ctx->authkeylen, 0,
950                                DMA_TO_DEVICE);
951
952         /*
953          * cipher in
954          * map and adjust cipher len to aead request cryptlen.
955          * extent is bytes of HMAC postpended to ciphertext,
956          * typically 12 for ipsec
957          */
958         desc->ptr[4].len = cpu_to_be16(cryptlen);
959         desc->ptr[4].j_extent = authsize;
960
961         if (areq->src == areq->dst)
962                 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1,
963                                       DMA_BIDIRECTIONAL);
964         else
965                 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1,
966                                       DMA_TO_DEVICE);
967
968         if (sg_count == 1) {
969                 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
970         } else {
971                 sg_link_tbl_len = cryptlen;
972
973                 if ((edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) &&
974                         (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) {
975                         sg_link_tbl_len = cryptlen + authsize;
976                 }
977                 sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
978                                           &edesc->link_tbl[0]);
979                 if (sg_count > 1) {
980                         desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
981                         desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl);
982                         dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
983                                                    edesc->dma_len, DMA_BIDIRECTIONAL);
984                 } else {
985                         /* Only one segment now, so no link tbl needed */
986                         desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
987                 }
988         }
989
990         /* cipher out */
991         desc->ptr[5].len = cpu_to_be16(cryptlen);
992         desc->ptr[5].j_extent = authsize;
993
994         if (areq->src != areq->dst) {
995                 sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
996                                       DMA_FROM_DEVICE);
997         }
998
999         if (sg_count == 1) {
1000                 desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst));
1001         } else {
1002                 struct talitos_ptr *link_tbl_ptr =
1003                         &edesc->link_tbl[edesc->src_nents + 1];
1004
1005                 desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *)
1006                                                edesc->dma_link_tbl +
1007                                                edesc->src_nents + 1);
1008                 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1009                                           link_tbl_ptr);
1010
1011                 /* Add an entry to the link table for ICV data */
1012                 link_tbl_ptr += sg_count - 1;
1013                 link_tbl_ptr->j_extent = 0;
1014                 sg_count++;
1015                 link_tbl_ptr++;
1016                 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1017                 link_tbl_ptr->len = cpu_to_be16(authsize);
1018
1019                 /* icv data follows link tables */
1020                 link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *)
1021                                                 edesc->dma_link_tbl +
1022                                                 edesc->src_nents +
1023                                                 edesc->dst_nents + 2);
1024
1025                 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1026                 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1027                                            edesc->dma_len, DMA_BIDIRECTIONAL);
1028         }
1029
1030         /* iv out */
1031         map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0,
1032                                DMA_FROM_DEVICE);
1033
1034         ret = talitos_submit(dev, desc, callback, areq);
1035         if (ret != -EINPROGRESS) {
1036                 ipsec_esp_unmap(dev, edesc, areq);
1037                 kfree(edesc);
1038         }
1039         return ret;
1040 }
1041
1042
1043 /*
1044  * derive number of elements in scatterlist
1045  */
1046 static int sg_count(struct scatterlist *sg_list, int nbytes)
1047 {
1048         struct scatterlist *sg = sg_list;
1049         int sg_nents = 0;
1050
1051         while (nbytes) {
1052                 sg_nents++;
1053                 nbytes -= sg->length;
1054                 sg = sg_next(sg);
1055         }
1056
1057         return sg_nents;
1058 }
1059
1060 /*
1061  * allocate and map the ipsec_esp extended descriptor
1062  */
1063 static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
1064                                                      int icv_stashing)
1065 {
1066         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1067         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1068         struct ipsec_esp_edesc *edesc;
1069         int src_nents, dst_nents, alloc_len, dma_len;
1070         gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1071                       GFP_ATOMIC;
1072
1073         if (areq->cryptlen + ctx->authsize > TALITOS_MAX_DATA_LEN) {
1074                 dev_err(ctx->dev, "cryptlen exceeds h/w max limit\n");
1075                 return ERR_PTR(-EINVAL);
1076         }
1077
1078         src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize);
1079         src_nents = (src_nents == 1) ? 0 : src_nents;
1080
1081         if (areq->dst == areq->src) {
1082                 dst_nents = src_nents;
1083         } else {
1084                 dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize);
1085                 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1086         }
1087
1088         /*
1089          * allocate space for base edesc plus the link tables,
1090          * allowing for two separate entries for ICV and generated ICV (+ 2),
1091          * and the ICV data itself
1092          */
1093         alloc_len = sizeof(struct ipsec_esp_edesc);
1094         if (src_nents || dst_nents) {
1095                 dma_len = (src_nents + dst_nents + 2) *
1096                                  sizeof(struct talitos_ptr) + ctx->authsize;
1097                 alloc_len += dma_len;
1098         } else {
1099                 dma_len = 0;
1100                 alloc_len += icv_stashing ? ctx->authsize : 0;
1101         }
1102
1103         edesc = kmalloc(alloc_len, GFP_DMA | flags);
1104         if (!edesc) {
1105                 dev_err(ctx->dev, "could not allocate edescriptor\n");
1106                 return ERR_PTR(-ENOMEM);
1107         }
1108
1109         edesc->src_nents = src_nents;
1110         edesc->dst_nents = dst_nents;
1111         edesc->dma_len = dma_len;
1112         edesc->dma_link_tbl = dma_map_single(ctx->dev, &edesc->link_tbl[0],
1113                                              edesc->dma_len, DMA_BIDIRECTIONAL);
1114
1115         return edesc;
1116 }
1117
1118 static int aead_authenc_encrypt(struct aead_request *req)
1119 {
1120         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1121         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1122         struct ipsec_esp_edesc *edesc;
1123
1124         /* allocate extended descriptor */
1125         edesc = ipsec_esp_edesc_alloc(req, 0);
1126         if (IS_ERR(edesc))
1127                 return PTR_ERR(edesc);
1128
1129         /* set encrypt */
1130         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1131
1132         return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done);
1133 }
1134
1135
1136
1137 static int aead_authenc_decrypt(struct aead_request *req)
1138 {
1139         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1140         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1141         unsigned int authsize = ctx->authsize;
1142         struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1143         struct ipsec_esp_edesc *edesc;
1144         struct scatterlist *sg;
1145         void *icvdata;
1146
1147         req->cryptlen -= authsize;
1148
1149         /* allocate extended descriptor */
1150         edesc = ipsec_esp_edesc_alloc(req, 1);
1151         if (IS_ERR(edesc))
1152                 return PTR_ERR(edesc);
1153
1154         if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1155             (((!edesc->src_nents && !edesc->dst_nents) ||
1156                 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT))) {
1157
1158                 /* decrypt and check the ICV */
1159                 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND |
1160                                   DESC_HDR_MODE1_MDEU_CICV;
1161
1162                 /* reset integrity check result bits */
1163                 edesc->desc.hdr_lo = 0;
1164
1165                 return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_hwauth_done);
1166
1167         } else {
1168
1169                 /* Have to check the ICV with software */
1170
1171                 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1172
1173                 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1174                 if (edesc->dma_len)
1175                         icvdata = &edesc->link_tbl[edesc->src_nents +
1176                                                    edesc->dst_nents + 2];
1177                 else
1178                         icvdata = &edesc->link_tbl[0];
1179
1180                 sg = sg_last(req->src, edesc->src_nents ? : 1);
1181
1182                 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1183                        ctx->authsize);
1184
1185                 return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done);
1186         }
1187 }
1188
1189 static int aead_authenc_givencrypt(
1190         struct aead_givcrypt_request *req)
1191 {
1192         struct aead_request *areq = &req->areq;
1193         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1194         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1195         struct ipsec_esp_edesc *edesc;
1196
1197         /* allocate extended descriptor */
1198         edesc = ipsec_esp_edesc_alloc(areq, 0);
1199         if (IS_ERR(edesc))
1200                 return PTR_ERR(edesc);
1201
1202         /* set encrypt */
1203         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1204
1205         memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1206         /* avoid consecutive packets going out with same IV */
1207         *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1208
1209         return ipsec_esp(edesc, areq, req->giv, req->seq,
1210                          ipsec_esp_encrypt_done);
1211 }
1212
1213 struct talitos_alg_template {
1214         char name[CRYPTO_MAX_ALG_NAME];
1215         char driver_name[CRYPTO_MAX_ALG_NAME];
1216         unsigned int blocksize;
1217         struct aead_alg aead;
1218         struct device *dev;
1219         __be32 desc_hdr_template;
1220 };
1221
1222 static struct talitos_alg_template driver_algs[] = {
1223         /* single-pass ipsec_esp descriptor */
1224         {
1225                 .name = "authenc(hmac(sha1),cbc(aes))",
1226                 .driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
1227                 .blocksize = AES_BLOCK_SIZE,
1228                 .aead = {
1229                         .setkey = aead_authenc_setkey,
1230                         .setauthsize = aead_authenc_setauthsize,
1231                         .encrypt = aead_authenc_encrypt,
1232                         .decrypt = aead_authenc_decrypt,
1233                         .givencrypt = aead_authenc_givencrypt,
1234                         .geniv = "<built-in>",
1235                         .ivsize = AES_BLOCK_SIZE,
1236                         .maxauthsize = SHA1_DIGEST_SIZE,
1237                         },
1238                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1239                                      DESC_HDR_SEL0_AESU |
1240                                      DESC_HDR_MODE0_AESU_CBC |
1241                                      DESC_HDR_SEL1_MDEUA |
1242                                      DESC_HDR_MODE1_MDEU_INIT |
1243                                      DESC_HDR_MODE1_MDEU_PAD |
1244                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1245         },
1246         {
1247                 .name = "authenc(hmac(sha1),cbc(des3_ede))",
1248                 .driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
1249                 .blocksize = DES3_EDE_BLOCK_SIZE,
1250                 .aead = {
1251                         .setkey = aead_authenc_setkey,
1252                         .setauthsize = aead_authenc_setauthsize,
1253                         .encrypt = aead_authenc_encrypt,
1254                         .decrypt = aead_authenc_decrypt,
1255                         .givencrypt = aead_authenc_givencrypt,
1256                         .geniv = "<built-in>",
1257                         .ivsize = DES3_EDE_BLOCK_SIZE,
1258                         .maxauthsize = SHA1_DIGEST_SIZE,
1259                         },
1260                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1261                                      DESC_HDR_SEL0_DEU |
1262                                      DESC_HDR_MODE0_DEU_CBC |
1263                                      DESC_HDR_MODE0_DEU_3DES |
1264                                      DESC_HDR_SEL1_MDEUA |
1265                                      DESC_HDR_MODE1_MDEU_INIT |
1266                                      DESC_HDR_MODE1_MDEU_PAD |
1267                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1268         },
1269         {
1270                 .name = "authenc(hmac(sha256),cbc(aes))",
1271                 .driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
1272                 .blocksize = AES_BLOCK_SIZE,
1273                 .aead = {
1274                         .setkey = aead_authenc_setkey,
1275                         .setauthsize = aead_authenc_setauthsize,
1276                         .encrypt = aead_authenc_encrypt,
1277                         .decrypt = aead_authenc_decrypt,
1278                         .givencrypt = aead_authenc_givencrypt,
1279                         .geniv = "<built-in>",
1280                         .ivsize = AES_BLOCK_SIZE,
1281                         .maxauthsize = SHA256_DIGEST_SIZE,
1282                         },
1283                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1284                                      DESC_HDR_SEL0_AESU |
1285                                      DESC_HDR_MODE0_AESU_CBC |
1286                                      DESC_HDR_SEL1_MDEUA |
1287                                      DESC_HDR_MODE1_MDEU_INIT |
1288                                      DESC_HDR_MODE1_MDEU_PAD |
1289                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
1290         },
1291         {
1292                 .name = "authenc(hmac(sha256),cbc(des3_ede))",
1293                 .driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
1294                 .blocksize = DES3_EDE_BLOCK_SIZE,
1295                 .aead = {
1296                         .setkey = aead_authenc_setkey,
1297                         .setauthsize = aead_authenc_setauthsize,
1298                         .encrypt = aead_authenc_encrypt,
1299                         .decrypt = aead_authenc_decrypt,
1300                         .givencrypt = aead_authenc_givencrypt,
1301                         .geniv = "<built-in>",
1302                         .ivsize = DES3_EDE_BLOCK_SIZE,
1303                         .maxauthsize = SHA256_DIGEST_SIZE,
1304                         },
1305                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1306                                      DESC_HDR_SEL0_DEU |
1307                                      DESC_HDR_MODE0_DEU_CBC |
1308                                      DESC_HDR_MODE0_DEU_3DES |
1309                                      DESC_HDR_SEL1_MDEUA |
1310                                      DESC_HDR_MODE1_MDEU_INIT |
1311                                      DESC_HDR_MODE1_MDEU_PAD |
1312                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
1313         },
1314         {
1315                 .name = "authenc(hmac(md5),cbc(aes))",
1316                 .driver_name = "authenc-hmac-md5-cbc-aes-talitos",
1317                 .blocksize = AES_BLOCK_SIZE,
1318                 .aead = {
1319                         .setkey = aead_authenc_setkey,
1320                         .setauthsize = aead_authenc_setauthsize,
1321                         .encrypt = aead_authenc_encrypt,
1322                         .decrypt = aead_authenc_decrypt,
1323                         .givencrypt = aead_authenc_givencrypt,
1324                         .geniv = "<built-in>",
1325                         .ivsize = AES_BLOCK_SIZE,
1326                         .maxauthsize = MD5_DIGEST_SIZE,
1327                         },
1328                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1329                                      DESC_HDR_SEL0_AESU |
1330                                      DESC_HDR_MODE0_AESU_CBC |
1331                                      DESC_HDR_SEL1_MDEUA |
1332                                      DESC_HDR_MODE1_MDEU_INIT |
1333                                      DESC_HDR_MODE1_MDEU_PAD |
1334                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
1335         },
1336         {
1337                 .name = "authenc(hmac(md5),cbc(des3_ede))",
1338                 .driver_name = "authenc-hmac-md5-cbc-3des-talitos",
1339                 .blocksize = DES3_EDE_BLOCK_SIZE,
1340                 .aead = {
1341                         .setkey = aead_authenc_setkey,
1342                         .setauthsize = aead_authenc_setauthsize,
1343                         .encrypt = aead_authenc_encrypt,
1344                         .decrypt = aead_authenc_decrypt,
1345                         .givencrypt = aead_authenc_givencrypt,
1346                         .geniv = "<built-in>",
1347                         .ivsize = DES3_EDE_BLOCK_SIZE,
1348                         .maxauthsize = MD5_DIGEST_SIZE,
1349                         },
1350                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1351                                      DESC_HDR_SEL0_DEU |
1352                                      DESC_HDR_MODE0_DEU_CBC |
1353                                      DESC_HDR_MODE0_DEU_3DES |
1354                                      DESC_HDR_SEL1_MDEUA |
1355                                      DESC_HDR_MODE1_MDEU_INIT |
1356                                      DESC_HDR_MODE1_MDEU_PAD |
1357                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
1358         }
1359 };
1360
1361 struct talitos_crypto_alg {
1362         struct list_head entry;
1363         struct device *dev;
1364         __be32 desc_hdr_template;
1365         struct crypto_alg crypto_alg;
1366 };
1367
1368 static int talitos_cra_init(struct crypto_tfm *tfm)
1369 {
1370         struct crypto_alg *alg = tfm->__crt_alg;
1371         struct talitos_crypto_alg *talitos_alg =
1372                  container_of(alg, struct talitos_crypto_alg, crypto_alg);
1373         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
1374
1375         /* update context with ptr to dev */
1376         ctx->dev = talitos_alg->dev;
1377         /* copy descriptor header template value */
1378         ctx->desc_hdr_template = talitos_alg->desc_hdr_template;
1379
1380         /* random first IV */
1381         get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
1382
1383         return 0;
1384 }
1385
1386 /*
1387  * given the alg's descriptor header template, determine whether descriptor
1388  * type and primary/secondary execution units required match the hw
1389  * capabilities description provided in the device tree node.
1390  */
1391 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
1392 {
1393         struct talitos_private *priv = dev_get_drvdata(dev);
1394         int ret;
1395
1396         ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
1397               (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
1398
1399         if (SECONDARY_EU(desc_hdr_template))
1400                 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
1401                               & priv->exec_units);
1402
1403         return ret;
1404 }
1405
1406 static int talitos_remove(struct of_device *ofdev)
1407 {
1408         struct device *dev = &ofdev->dev;
1409         struct talitos_private *priv = dev_get_drvdata(dev);
1410         struct talitos_crypto_alg *t_alg, *n;
1411         int i;
1412
1413         list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
1414                 crypto_unregister_alg(&t_alg->crypto_alg);
1415                 list_del(&t_alg->entry);
1416                 kfree(t_alg);
1417         }
1418
1419         if (hw_supports(dev, DESC_HDR_SEL0_RNG))
1420                 talitos_unregister_rng(dev);
1421
1422         kfree(priv->submit_count);
1423         kfree(priv->tail);
1424         kfree(priv->head);
1425
1426         if (priv->fifo)
1427                 for (i = 0; i < priv->num_channels; i++)
1428                         kfree(priv->fifo[i]);
1429
1430         kfree(priv->fifo);
1431         kfree(priv->head_lock);
1432         kfree(priv->tail_lock);
1433
1434         if (priv->irq != NO_IRQ) {
1435                 free_irq(priv->irq, dev);
1436                 irq_dispose_mapping(priv->irq);
1437         }
1438
1439         tasklet_kill(&priv->done_task);
1440
1441         iounmap(priv->reg);
1442
1443         dev_set_drvdata(dev, NULL);
1444
1445         kfree(priv);
1446
1447         return 0;
1448 }
1449
1450 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
1451                                                     struct talitos_alg_template
1452                                                            *template)
1453 {
1454         struct talitos_crypto_alg *t_alg;
1455         struct crypto_alg *alg;
1456
1457         t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
1458         if (!t_alg)
1459                 return ERR_PTR(-ENOMEM);
1460
1461         alg = &t_alg->crypto_alg;
1462
1463         snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
1464         snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1465                  template->driver_name);
1466         alg->cra_module = THIS_MODULE;
1467         alg->cra_init = talitos_cra_init;
1468         alg->cra_priority = TALITOS_CRA_PRIORITY;
1469         alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
1470         alg->cra_blocksize = template->blocksize;
1471         alg->cra_alignmask = 0;
1472         alg->cra_type = &crypto_aead_type;
1473         alg->cra_ctxsize = sizeof(struct talitos_ctx);
1474         alg->cra_u.aead = template->aead;
1475
1476         t_alg->desc_hdr_template = template->desc_hdr_template;
1477         t_alg->dev = dev;
1478
1479         return t_alg;
1480 }
1481
1482 static int talitos_probe(struct of_device *ofdev,
1483                          const struct of_device_id *match)
1484 {
1485         struct device *dev = &ofdev->dev;
1486         struct device_node *np = ofdev->node;
1487         struct talitos_private *priv;
1488         const unsigned int *prop;
1489         int i, err;
1490
1491         priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
1492         if (!priv)
1493                 return -ENOMEM;
1494
1495         dev_set_drvdata(dev, priv);
1496
1497         priv->ofdev = ofdev;
1498
1499         tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev);
1500
1501         INIT_LIST_HEAD(&priv->alg_list);
1502
1503         priv->irq = irq_of_parse_and_map(np, 0);
1504
1505         if (priv->irq == NO_IRQ) {
1506                 dev_err(dev, "failed to map irq\n");
1507                 err = -EINVAL;
1508                 goto err_out;
1509         }
1510
1511         /* get the irq line */
1512         err = request_irq(priv->irq, talitos_interrupt, 0,
1513                           dev_driver_string(dev), dev);
1514         if (err) {
1515                 dev_err(dev, "failed to request irq %d\n", priv->irq);
1516                 irq_dispose_mapping(priv->irq);
1517                 priv->irq = NO_IRQ;
1518                 goto err_out;
1519         }
1520
1521         priv->reg = of_iomap(np, 0);
1522         if (!priv->reg) {
1523                 dev_err(dev, "failed to of_iomap\n");
1524                 err = -ENOMEM;
1525                 goto err_out;
1526         }
1527
1528         /* get SEC version capabilities from device tree */
1529         prop = of_get_property(np, "fsl,num-channels", NULL);
1530         if (prop)
1531                 priv->num_channels = *prop;
1532
1533         prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
1534         if (prop)
1535                 priv->chfifo_len = *prop;
1536
1537         prop = of_get_property(np, "fsl,exec-units-mask", NULL);
1538         if (prop)
1539                 priv->exec_units = *prop;
1540
1541         prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
1542         if (prop)
1543                 priv->desc_types = *prop;
1544
1545         if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
1546             !priv->exec_units || !priv->desc_types) {
1547                 dev_err(dev, "invalid property data in device tree node\n");
1548                 err = -EINVAL;
1549                 goto err_out;
1550         }
1551
1552         if (of_device_is_compatible(np, "fsl,sec3.0"))
1553                 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
1554
1555         if (of_device_is_compatible(np, "fsl,sec2.1"))
1556                 priv->features |= TALITOS_FTR_HW_AUTH_CHECK;
1557
1558         priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
1559                                   GFP_KERNEL);
1560         priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
1561                                   GFP_KERNEL);
1562         if (!priv->head_lock || !priv->tail_lock) {
1563                 dev_err(dev, "failed to allocate fifo locks\n");
1564                 err = -ENOMEM;
1565                 goto err_out;
1566         }
1567
1568         for (i = 0; i < priv->num_channels; i++) {
1569                 spin_lock_init(&priv->head_lock[i]);
1570                 spin_lock_init(&priv->tail_lock[i]);
1571         }
1572
1573         priv->fifo = kmalloc(sizeof(struct talitos_request *) *
1574                              priv->num_channels, GFP_KERNEL);
1575         if (!priv->fifo) {
1576                 dev_err(dev, "failed to allocate request fifo\n");
1577                 err = -ENOMEM;
1578                 goto err_out;
1579         }
1580
1581         priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
1582
1583         for (i = 0; i < priv->num_channels; i++) {
1584                 priv->fifo[i] = kzalloc(sizeof(struct talitos_request) *
1585                                         priv->fifo_len, GFP_KERNEL);
1586                 if (!priv->fifo[i]) {
1587                         dev_err(dev, "failed to allocate request fifo %d\n", i);
1588                         err = -ENOMEM;
1589                         goto err_out;
1590                 }
1591         }
1592
1593         priv->submit_count = kmalloc(sizeof(atomic_t) * priv->num_channels,
1594                                      GFP_KERNEL);
1595         if (!priv->submit_count) {
1596                 dev_err(dev, "failed to allocate fifo submit count space\n");
1597                 err = -ENOMEM;
1598                 goto err_out;
1599         }
1600         for (i = 0; i < priv->num_channels; i++)
1601                 atomic_set(&priv->submit_count[i], -(priv->chfifo_len - 1));
1602
1603         priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
1604         priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
1605         if (!priv->head || !priv->tail) {
1606                 dev_err(dev, "failed to allocate request index space\n");
1607                 err = -ENOMEM;
1608                 goto err_out;
1609         }
1610
1611         /* reset and initialize the h/w */
1612         err = init_device(dev);
1613         if (err) {
1614                 dev_err(dev, "failed to initialize device\n");
1615                 goto err_out;
1616         }
1617
1618         /* register the RNG, if available */
1619         if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
1620                 err = talitos_register_rng(dev);
1621                 if (err) {
1622                         dev_err(dev, "failed to register hwrng: %d\n", err);
1623                         goto err_out;
1624                 } else
1625                         dev_info(dev, "hwrng\n");
1626         }
1627
1628         /* register crypto algorithms the device supports */
1629         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
1630                 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
1631                         struct talitos_crypto_alg *t_alg;
1632
1633                         t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
1634                         if (IS_ERR(t_alg)) {
1635                                 err = PTR_ERR(t_alg);
1636                                 goto err_out;
1637                         }
1638
1639                         err = crypto_register_alg(&t_alg->crypto_alg);
1640                         if (err) {
1641                                 dev_err(dev, "%s alg registration failed\n",
1642                                         t_alg->crypto_alg.cra_driver_name);
1643                                 kfree(t_alg);
1644                         } else {
1645                                 list_add_tail(&t_alg->entry, &priv->alg_list);
1646                                 dev_info(dev, "%s\n",
1647                                          t_alg->crypto_alg.cra_driver_name);
1648                         }
1649                 }
1650         }
1651
1652         return 0;
1653
1654 err_out:
1655         talitos_remove(ofdev);
1656
1657         return err;
1658 }
1659
1660 static struct of_device_id talitos_match[] = {
1661         {
1662                 .compatible = "fsl,sec2.0",
1663         },
1664         {},
1665 };
1666 MODULE_DEVICE_TABLE(of, talitos_match);
1667
1668 static struct of_platform_driver talitos_driver = {
1669         .name = "talitos",
1670         .match_table = talitos_match,
1671         .probe = talitos_probe,
1672         .remove = talitos_remove,
1673 };
1674
1675 static int __init talitos_init(void)
1676 {
1677         return of_register_platform_driver(&talitos_driver);
1678 }
1679 module_init(talitos_init);
1680
1681 static void __exit talitos_exit(void)
1682 {
1683         of_unregister_platform_driver(&talitos_driver);
1684 }
1685 module_exit(talitos_exit);
1686
1687 MODULE_LICENSE("GPL");
1688 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
1689 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");