crypto: talitos - align locks on cache lines
[safe/jmp/linux-2.6] / drivers / crypto / talitos.c
1 /*
2  * talitos - Freescale Integrated Security Engine (SEC) device driver
3  *
4  * Copyright (c) 2008 Freescale Semiconductor, Inc.
5  *
6  * Scatterlist Crypto API glue code copied from files with the following:
7  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * Crypto algorithm registration code copied from hifn driver:
10  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11  * All rights reserved.
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26  */
27
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_platform.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/io.h>
38 #include <linux/spinlock.h>
39 #include <linux/rtnetlink.h>
40
41 #include <crypto/algapi.h>
42 #include <crypto/aes.h>
43 #include <crypto/des.h>
44 #include <crypto/sha.h>
45 #include <crypto/aead.h>
46 #include <crypto/authenc.h>
47 #include <crypto/skcipher.h>
48 #include <crypto/scatterwalk.h>
49
50 #include "talitos.h"
51
52 #define TALITOS_TIMEOUT 100000
53 #define TALITOS_MAX_DATA_LEN 65535
54
55 #define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f)
56 #define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf)
57 #define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf)
58
59 /* descriptor pointer entry */
60 struct talitos_ptr {
61         __be16 len;     /* length */
62         u8 j_extent;    /* jump to sg link table and/or extent */
63         u8 eptr;        /* extended address */
64         __be32 ptr;     /* address */
65 };
66
67 /* descriptor */
68 struct talitos_desc {
69         __be32 hdr;                     /* header high bits */
70         __be32 hdr_lo;                  /* header low bits */
71         struct talitos_ptr ptr[7];      /* ptr/len pair array */
72 };
73
74 /**
75  * talitos_request - descriptor submission request
76  * @desc: descriptor pointer (kernel virtual)
77  * @dma_desc: descriptor's physical bus address
78  * @callback: whom to call when descriptor processing is done
79  * @context: caller context (optional)
80  */
81 struct talitos_request {
82         struct talitos_desc *desc;
83         dma_addr_t dma_desc;
84         void (*callback) (struct device *dev, struct talitos_desc *desc,
85                           void *context, int error);
86         void *context;
87 };
88
89 /* per-channel fifo management */
90 struct talitos_channel {
91         /* request fifo */
92         struct talitos_request *fifo;
93
94         /* number of requests pending in channel h/w fifo */
95         atomic_t submit_count ____cacheline_aligned;
96
97         /* request submission (head) lock */
98         spinlock_t head_lock ____cacheline_aligned;
99         /* index to next free descriptor request */
100         int head;
101
102         /* request release (tail) lock */
103         spinlock_t tail_lock ____cacheline_aligned;
104         /* index to next in-progress/done descriptor request */
105         int tail;
106 };
107
108 struct talitos_private {
109         struct device *dev;
110         struct of_device *ofdev;
111         void __iomem *reg;
112         int irq;
113
114         /* SEC version geometry (from device tree node) */
115         unsigned int num_channels;
116         unsigned int chfifo_len;
117         unsigned int exec_units;
118         unsigned int desc_types;
119
120         /* SEC Compatibility info */
121         unsigned long features;
122
123         /*
124          * length of the request fifo
125          * fifo_len is chfifo_len rounded up to next power of 2
126          * so we can use bitwise ops to wrap
127          */
128         unsigned int fifo_len;
129
130         struct talitos_channel *chan;
131
132         /* next channel to be assigned next incoming descriptor */
133         atomic_t last_chan ____cacheline_aligned;
134
135         /* request callback tasklet */
136         struct tasklet_struct done_task;
137
138         /* list of registered algorithms */
139         struct list_head alg_list;
140
141         /* hwrng device */
142         struct hwrng rng;
143 };
144
145 /* .features flag */
146 #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
147 #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
148
149 /*
150  * map virtual single (contiguous) pointer to h/w descriptor pointer
151  */
152 static void map_single_talitos_ptr(struct device *dev,
153                                    struct talitos_ptr *talitos_ptr,
154                                    unsigned short len, void *data,
155                                    unsigned char extent,
156                                    enum dma_data_direction dir)
157 {
158         talitos_ptr->len = cpu_to_be16(len);
159         talitos_ptr->ptr = cpu_to_be32(dma_map_single(dev, data, len, dir));
160         talitos_ptr->j_extent = extent;
161 }
162
163 /*
164  * unmap bus single (contiguous) h/w descriptor pointer
165  */
166 static void unmap_single_talitos_ptr(struct device *dev,
167                                      struct talitos_ptr *talitos_ptr,
168                                      enum dma_data_direction dir)
169 {
170         dma_unmap_single(dev, be32_to_cpu(talitos_ptr->ptr),
171                          be16_to_cpu(talitos_ptr->len), dir);
172 }
173
174 static int reset_channel(struct device *dev, int ch)
175 {
176         struct talitos_private *priv = dev_get_drvdata(dev);
177         unsigned int timeout = TALITOS_TIMEOUT;
178
179         setbits32(priv->reg + TALITOS_CCCR(ch), TALITOS_CCCR_RESET);
180
181         while ((in_be32(priv->reg + TALITOS_CCCR(ch)) & TALITOS_CCCR_RESET)
182                && --timeout)
183                 cpu_relax();
184
185         if (timeout == 0) {
186                 dev_err(dev, "failed to reset channel %d\n", ch);
187                 return -EIO;
188         }
189
190         /* set done writeback and IRQ */
191         setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE |
192                   TALITOS_CCCR_LO_CDIE);
193
194         /* and ICCR writeback, if available */
195         if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
196                 setbits32(priv->reg + TALITOS_CCCR_LO(ch),
197                           TALITOS_CCCR_LO_IWSE);
198
199         return 0;
200 }
201
202 static int reset_device(struct device *dev)
203 {
204         struct talitos_private *priv = dev_get_drvdata(dev);
205         unsigned int timeout = TALITOS_TIMEOUT;
206
207         setbits32(priv->reg + TALITOS_MCR, TALITOS_MCR_SWR);
208
209         while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
210                && --timeout)
211                 cpu_relax();
212
213         if (timeout == 0) {
214                 dev_err(dev, "failed to reset device\n");
215                 return -EIO;
216         }
217
218         return 0;
219 }
220
221 /*
222  * Reset and initialize the device
223  */
224 static int init_device(struct device *dev)
225 {
226         struct talitos_private *priv = dev_get_drvdata(dev);
227         int ch, err;
228
229         /*
230          * Master reset
231          * errata documentation: warning: certain SEC interrupts
232          * are not fully cleared by writing the MCR:SWR bit,
233          * set bit twice to completely reset
234          */
235         err = reset_device(dev);
236         if (err)
237                 return err;
238
239         err = reset_device(dev);
240         if (err)
241                 return err;
242
243         /* reset channels */
244         for (ch = 0; ch < priv->num_channels; ch++) {
245                 err = reset_channel(dev, ch);
246                 if (err)
247                         return err;
248         }
249
250         /* enable channel done and error interrupts */
251         setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
252         setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
253
254         /* disable integrity check error interrupts (use writeback instead) */
255         if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
256                 setbits32(priv->reg + TALITOS_MDEUICR_LO,
257                           TALITOS_MDEUICR_LO_ICE);
258
259         return 0;
260 }
261
262 /**
263  * talitos_submit - submits a descriptor to the device for processing
264  * @dev:        the SEC device to be used
265  * @desc:       the descriptor to be processed by the device
266  * @callback:   whom to call when processing is complete
267  * @context:    a handle for use by caller (optional)
268  *
269  * desc must contain valid dma-mapped (bus physical) address pointers.
270  * callback must check err and feedback in descriptor header
271  * for device processing status.
272  */
273 static int talitos_submit(struct device *dev, struct talitos_desc *desc,
274                           void (*callback)(struct device *dev,
275                                            struct talitos_desc *desc,
276                                            void *context, int error),
277                           void *context)
278 {
279         struct talitos_private *priv = dev_get_drvdata(dev);
280         struct talitos_request *request;
281         unsigned long flags, ch;
282         int head;
283
284         /* select done notification */
285         desc->hdr |= DESC_HDR_DONE_NOTIFY;
286
287         /* emulate SEC's round-robin channel fifo polling scheme */
288         ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1);
289
290         spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
291
292         if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
293                 /* h/w fifo is full */
294                 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
295                 return -EAGAIN;
296         }
297
298         head = priv->chan[ch].head;
299         request = &priv->chan[ch].fifo[head];
300
301         /* map descriptor and save caller data */
302         request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
303                                            DMA_BIDIRECTIONAL);
304         request->callback = callback;
305         request->context = context;
306
307         /* increment fifo head */
308         priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
309
310         smp_wmb();
311         request->desc = desc;
312
313         /* GO! */
314         wmb();
315         out_be32(priv->reg + TALITOS_FF_LO(ch), request->dma_desc);
316
317         spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
318
319         return -EINPROGRESS;
320 }
321
322 /*
323  * process what was done, notify callback of error if not
324  */
325 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
326 {
327         struct talitos_private *priv = dev_get_drvdata(dev);
328         struct talitos_request *request, saved_req;
329         unsigned long flags;
330         int tail, status;
331
332         spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
333
334         tail = priv->chan[ch].tail;
335         while (priv->chan[ch].fifo[tail].desc) {
336                 request = &priv->chan[ch].fifo[tail];
337
338                 /* descriptors with their done bits set don't get the error */
339                 rmb();
340                 if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
341                         status = 0;
342                 else
343                         if (!error)
344                                 break;
345                         else
346                                 status = error;
347
348                 dma_unmap_single(dev, request->dma_desc,
349                                  sizeof(struct talitos_desc),
350                                  DMA_BIDIRECTIONAL);
351
352                 /* copy entries so we can call callback outside lock */
353                 saved_req.desc = request->desc;
354                 saved_req.callback = request->callback;
355                 saved_req.context = request->context;
356
357                 /* release request entry in fifo */
358                 smp_wmb();
359                 request->desc = NULL;
360
361                 /* increment fifo tail */
362                 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
363
364                 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
365
366                 atomic_dec(&priv->chan[ch].submit_count);
367
368                 saved_req.callback(dev, saved_req.desc, saved_req.context,
369                                    status);
370                 /* channel may resume processing in single desc error case */
371                 if (error && !reset_ch && status == error)
372                         return;
373                 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
374                 tail = priv->chan[ch].tail;
375         }
376
377         spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
378 }
379
380 /*
381  * process completed requests for channels that have done status
382  */
383 static void talitos_done(unsigned long data)
384 {
385         struct device *dev = (struct device *)data;
386         struct talitos_private *priv = dev_get_drvdata(dev);
387         int ch;
388
389         for (ch = 0; ch < priv->num_channels; ch++)
390                 flush_channel(dev, ch, 0, 0);
391
392         /* At this point, all completed channels have been processed.
393          * Unmask done interrupts for channels completed later on.
394          */
395         setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
396         setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
397 }
398
399 /*
400  * locate current (offending) descriptor
401  */
402 static struct talitos_desc *current_desc(struct device *dev, int ch)
403 {
404         struct talitos_private *priv = dev_get_drvdata(dev);
405         int tail = priv->chan[ch].tail;
406         dma_addr_t cur_desc;
407
408         cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch));
409
410         while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
411                 tail = (tail + 1) & (priv->fifo_len - 1);
412                 if (tail == priv->chan[ch].tail) {
413                         dev_err(dev, "couldn't locate current descriptor\n");
414                         return NULL;
415                 }
416         }
417
418         return priv->chan[ch].fifo[tail].desc;
419 }
420
421 /*
422  * user diagnostics; report root cause of error based on execution unit status
423  */
424 static void report_eu_error(struct device *dev, int ch,
425                             struct talitos_desc *desc)
426 {
427         struct talitos_private *priv = dev_get_drvdata(dev);
428         int i;
429
430         switch (desc->hdr & DESC_HDR_SEL0_MASK) {
431         case DESC_HDR_SEL0_AFEU:
432                 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
433                         in_be32(priv->reg + TALITOS_AFEUISR),
434                         in_be32(priv->reg + TALITOS_AFEUISR_LO));
435                 break;
436         case DESC_HDR_SEL0_DEU:
437                 dev_err(dev, "DEUISR 0x%08x_%08x\n",
438                         in_be32(priv->reg + TALITOS_DEUISR),
439                         in_be32(priv->reg + TALITOS_DEUISR_LO));
440                 break;
441         case DESC_HDR_SEL0_MDEUA:
442         case DESC_HDR_SEL0_MDEUB:
443                 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
444                         in_be32(priv->reg + TALITOS_MDEUISR),
445                         in_be32(priv->reg + TALITOS_MDEUISR_LO));
446                 break;
447         case DESC_HDR_SEL0_RNG:
448                 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
449                         in_be32(priv->reg + TALITOS_RNGUISR),
450                         in_be32(priv->reg + TALITOS_RNGUISR_LO));
451                 break;
452         case DESC_HDR_SEL0_PKEU:
453                 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
454                         in_be32(priv->reg + TALITOS_PKEUISR),
455                         in_be32(priv->reg + TALITOS_PKEUISR_LO));
456                 break;
457         case DESC_HDR_SEL0_AESU:
458                 dev_err(dev, "AESUISR 0x%08x_%08x\n",
459                         in_be32(priv->reg + TALITOS_AESUISR),
460                         in_be32(priv->reg + TALITOS_AESUISR_LO));
461                 break;
462         case DESC_HDR_SEL0_CRCU:
463                 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
464                         in_be32(priv->reg + TALITOS_CRCUISR),
465                         in_be32(priv->reg + TALITOS_CRCUISR_LO));
466                 break;
467         case DESC_HDR_SEL0_KEU:
468                 dev_err(dev, "KEUISR 0x%08x_%08x\n",
469                         in_be32(priv->reg + TALITOS_KEUISR),
470                         in_be32(priv->reg + TALITOS_KEUISR_LO));
471                 break;
472         }
473
474         switch (desc->hdr & DESC_HDR_SEL1_MASK) {
475         case DESC_HDR_SEL1_MDEUA:
476         case DESC_HDR_SEL1_MDEUB:
477                 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
478                         in_be32(priv->reg + TALITOS_MDEUISR),
479                         in_be32(priv->reg + TALITOS_MDEUISR_LO));
480                 break;
481         case DESC_HDR_SEL1_CRCU:
482                 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
483                         in_be32(priv->reg + TALITOS_CRCUISR),
484                         in_be32(priv->reg + TALITOS_CRCUISR_LO));
485                 break;
486         }
487
488         for (i = 0; i < 8; i++)
489                 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
490                         in_be32(priv->reg + TALITOS_DESCBUF(ch) + 8*i),
491                         in_be32(priv->reg + TALITOS_DESCBUF_LO(ch) + 8*i));
492 }
493
494 /*
495  * recover from error interrupts
496  */
497 static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
498 {
499         struct device *dev = (struct device *)data;
500         struct talitos_private *priv = dev_get_drvdata(dev);
501         unsigned int timeout = TALITOS_TIMEOUT;
502         int ch, error, reset_dev = 0, reset_ch = 0;
503         u32 v, v_lo;
504
505         for (ch = 0; ch < priv->num_channels; ch++) {
506                 /* skip channels without errors */
507                 if (!(isr & (1 << (ch * 2 + 1))))
508                         continue;
509
510                 error = -EINVAL;
511
512                 v = in_be32(priv->reg + TALITOS_CCPSR(ch));
513                 v_lo = in_be32(priv->reg + TALITOS_CCPSR_LO(ch));
514
515                 if (v_lo & TALITOS_CCPSR_LO_DOF) {
516                         dev_err(dev, "double fetch fifo overflow error\n");
517                         error = -EAGAIN;
518                         reset_ch = 1;
519                 }
520                 if (v_lo & TALITOS_CCPSR_LO_SOF) {
521                         /* h/w dropped descriptor */
522                         dev_err(dev, "single fetch fifo overflow error\n");
523                         error = -EAGAIN;
524                 }
525                 if (v_lo & TALITOS_CCPSR_LO_MDTE)
526                         dev_err(dev, "master data transfer error\n");
527                 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
528                         dev_err(dev, "s/g data length zero error\n");
529                 if (v_lo & TALITOS_CCPSR_LO_FPZ)
530                         dev_err(dev, "fetch pointer zero error\n");
531                 if (v_lo & TALITOS_CCPSR_LO_IDH)
532                         dev_err(dev, "illegal descriptor header error\n");
533                 if (v_lo & TALITOS_CCPSR_LO_IEU)
534                         dev_err(dev, "invalid execution unit error\n");
535                 if (v_lo & TALITOS_CCPSR_LO_EU)
536                         report_eu_error(dev, ch, current_desc(dev, ch));
537                 if (v_lo & TALITOS_CCPSR_LO_GB)
538                         dev_err(dev, "gather boundary error\n");
539                 if (v_lo & TALITOS_CCPSR_LO_GRL)
540                         dev_err(dev, "gather return/length error\n");
541                 if (v_lo & TALITOS_CCPSR_LO_SB)
542                         dev_err(dev, "scatter boundary error\n");
543                 if (v_lo & TALITOS_CCPSR_LO_SRL)
544                         dev_err(dev, "scatter return/length error\n");
545
546                 flush_channel(dev, ch, error, reset_ch);
547
548                 if (reset_ch) {
549                         reset_channel(dev, ch);
550                 } else {
551                         setbits32(priv->reg + TALITOS_CCCR(ch),
552                                   TALITOS_CCCR_CONT);
553                         setbits32(priv->reg + TALITOS_CCCR_LO(ch), 0);
554                         while ((in_be32(priv->reg + TALITOS_CCCR(ch)) &
555                                TALITOS_CCCR_CONT) && --timeout)
556                                 cpu_relax();
557                         if (timeout == 0) {
558                                 dev_err(dev, "failed to restart channel %d\n",
559                                         ch);
560                                 reset_dev = 1;
561                         }
562                 }
563         }
564         if (reset_dev || isr & ~TALITOS_ISR_CHERR || isr_lo) {
565                 dev_err(dev, "done overflow, internal time out, or rngu error: "
566                         "ISR 0x%08x_%08x\n", isr, isr_lo);
567
568                 /* purge request queues */
569                 for (ch = 0; ch < priv->num_channels; ch++)
570                         flush_channel(dev, ch, -EIO, 1);
571
572                 /* reset and reinitialize the device */
573                 init_device(dev);
574         }
575 }
576
577 static irqreturn_t talitos_interrupt(int irq, void *data)
578 {
579         struct device *dev = data;
580         struct talitos_private *priv = dev_get_drvdata(dev);
581         u32 isr, isr_lo;
582
583         isr = in_be32(priv->reg + TALITOS_ISR);
584         isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);
585         /* Acknowledge interrupt */
586         out_be32(priv->reg + TALITOS_ICR, isr);
587         out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);
588
589         if (unlikely((isr & ~TALITOS_ISR_CHDONE) || isr_lo))
590                 talitos_error((unsigned long)data, isr, isr_lo);
591         else
592                 if (likely(isr & TALITOS_ISR_CHDONE)) {
593                         /* mask further done interrupts. */
594                         clrbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_DONE);
595                         /* done_task will unmask done interrupts at exit */
596                         tasklet_schedule(&priv->done_task);
597                 }
598
599         return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE;
600 }
601
602 /*
603  * hwrng
604  */
605 static int talitos_rng_data_present(struct hwrng *rng, int wait)
606 {
607         struct device *dev = (struct device *)rng->priv;
608         struct talitos_private *priv = dev_get_drvdata(dev);
609         u32 ofl;
610         int i;
611
612         for (i = 0; i < 20; i++) {
613                 ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) &
614                       TALITOS_RNGUSR_LO_OFL;
615                 if (ofl || !wait)
616                         break;
617                 udelay(10);
618         }
619
620         return !!ofl;
621 }
622
623 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
624 {
625         struct device *dev = (struct device *)rng->priv;
626         struct talitos_private *priv = dev_get_drvdata(dev);
627
628         /* rng fifo requires 64-bit accesses */
629         *data = in_be32(priv->reg + TALITOS_RNGU_FIFO);
630         *data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO);
631
632         return sizeof(u32);
633 }
634
635 static int talitos_rng_init(struct hwrng *rng)
636 {
637         struct device *dev = (struct device *)rng->priv;
638         struct talitos_private *priv = dev_get_drvdata(dev);
639         unsigned int timeout = TALITOS_TIMEOUT;
640
641         setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR);
642         while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD)
643                && --timeout)
644                 cpu_relax();
645         if (timeout == 0) {
646                 dev_err(dev, "failed to reset rng hw\n");
647                 return -ENODEV;
648         }
649
650         /* start generating */
651         setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0);
652
653         return 0;
654 }
655
656 static int talitos_register_rng(struct device *dev)
657 {
658         struct talitos_private *priv = dev_get_drvdata(dev);
659
660         priv->rng.name          = dev_driver_string(dev),
661         priv->rng.init          = talitos_rng_init,
662         priv->rng.data_present  = talitos_rng_data_present,
663         priv->rng.data_read     = talitos_rng_data_read,
664         priv->rng.priv          = (unsigned long)dev;
665
666         return hwrng_register(&priv->rng);
667 }
668
669 static void talitos_unregister_rng(struct device *dev)
670 {
671         struct talitos_private *priv = dev_get_drvdata(dev);
672
673         hwrng_unregister(&priv->rng);
674 }
675
676 /*
677  * crypto alg
678  */
679 #define TALITOS_CRA_PRIORITY            3000
680 #define TALITOS_MAX_KEY_SIZE            64
681 #define TALITOS_MAX_IV_LENGTH           16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
682
683 #define MD5_DIGEST_SIZE   16
684
685 struct talitos_ctx {
686         struct device *dev;
687         __be32 desc_hdr_template;
688         u8 key[TALITOS_MAX_KEY_SIZE];
689         u8 iv[TALITOS_MAX_IV_LENGTH];
690         unsigned int keylen;
691         unsigned int enckeylen;
692         unsigned int authkeylen;
693         unsigned int authsize;
694 };
695
696 static int aead_setauthsize(struct crypto_aead *authenc,
697                             unsigned int authsize)
698 {
699         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
700
701         ctx->authsize = authsize;
702
703         return 0;
704 }
705
706 static int aead_setkey(struct crypto_aead *authenc,
707                        const u8 *key, unsigned int keylen)
708 {
709         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
710         struct rtattr *rta = (void *)key;
711         struct crypto_authenc_key_param *param;
712         unsigned int authkeylen;
713         unsigned int enckeylen;
714
715         if (!RTA_OK(rta, keylen))
716                 goto badkey;
717
718         if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
719                 goto badkey;
720
721         if (RTA_PAYLOAD(rta) < sizeof(*param))
722                 goto badkey;
723
724         param = RTA_DATA(rta);
725         enckeylen = be32_to_cpu(param->enckeylen);
726
727         key += RTA_ALIGN(rta->rta_len);
728         keylen -= RTA_ALIGN(rta->rta_len);
729
730         if (keylen < enckeylen)
731                 goto badkey;
732
733         authkeylen = keylen - enckeylen;
734
735         if (keylen > TALITOS_MAX_KEY_SIZE)
736                 goto badkey;
737
738         memcpy(&ctx->key, key, keylen);
739
740         ctx->keylen = keylen;
741         ctx->enckeylen = enckeylen;
742         ctx->authkeylen = authkeylen;
743
744         return 0;
745
746 badkey:
747         crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
748         return -EINVAL;
749 }
750
751 /*
752  * talitos_edesc - s/w-extended descriptor
753  * @src_nents: number of segments in input scatterlist
754  * @dst_nents: number of segments in output scatterlist
755  * @dma_len: length of dma mapped link_tbl space
756  * @dma_link_tbl: bus physical address of link_tbl
757  * @desc: h/w descriptor
758  * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
759  *
760  * if decrypting (with authcheck), or either one of src_nents or dst_nents
761  * is greater than 1, an integrity check value is concatenated to the end
762  * of link_tbl data
763  */
764 struct talitos_edesc {
765         int src_nents;
766         int dst_nents;
767         int src_is_chained;
768         int dst_is_chained;
769         int dma_len;
770         dma_addr_t dma_link_tbl;
771         struct talitos_desc desc;
772         struct talitos_ptr link_tbl[0];
773 };
774
775 static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
776                           unsigned int nents, enum dma_data_direction dir,
777                           int chained)
778 {
779         if (unlikely(chained))
780                 while (sg) {
781                         dma_map_sg(dev, sg, 1, dir);
782                         sg = scatterwalk_sg_next(sg);
783                 }
784         else
785                 dma_map_sg(dev, sg, nents, dir);
786         return nents;
787 }
788
789 static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
790                                    enum dma_data_direction dir)
791 {
792         while (sg) {
793                 dma_unmap_sg(dev, sg, 1, dir);
794                 sg = scatterwalk_sg_next(sg);
795         }
796 }
797
798 static void talitos_sg_unmap(struct device *dev,
799                              struct talitos_edesc *edesc,
800                              struct scatterlist *src,
801                              struct scatterlist *dst)
802 {
803         unsigned int src_nents = edesc->src_nents ? : 1;
804         unsigned int dst_nents = edesc->dst_nents ? : 1;
805
806         if (src != dst) {
807                 if (edesc->src_is_chained)
808                         talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
809                 else
810                         dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
811
812                 if (edesc->dst_is_chained)
813                         talitos_unmap_sg_chain(dev, dst, DMA_FROM_DEVICE);
814                 else
815                         dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
816         } else
817                 if (edesc->src_is_chained)
818                         talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
819                 else
820                         dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
821 }
822
823 static void ipsec_esp_unmap(struct device *dev,
824                             struct talitos_edesc *edesc,
825                             struct aead_request *areq)
826 {
827         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
828         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
829         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
830         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
831
832         dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE);
833
834         talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
835
836         if (edesc->dma_len)
837                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
838                                  DMA_BIDIRECTIONAL);
839 }
840
841 /*
842  * ipsec_esp descriptor callbacks
843  */
844 static void ipsec_esp_encrypt_done(struct device *dev,
845                                    struct talitos_desc *desc, void *context,
846                                    int err)
847 {
848         struct aead_request *areq = context;
849         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
850         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
851         struct talitos_edesc *edesc;
852         struct scatterlist *sg;
853         void *icvdata;
854
855         edesc = container_of(desc, struct talitos_edesc, desc);
856
857         ipsec_esp_unmap(dev, edesc, areq);
858
859         /* copy the generated ICV to dst */
860         if (edesc->dma_len) {
861                 icvdata = &edesc->link_tbl[edesc->src_nents +
862                                            edesc->dst_nents + 2];
863                 sg = sg_last(areq->dst, edesc->dst_nents);
864                 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
865                        icvdata, ctx->authsize);
866         }
867
868         kfree(edesc);
869
870         aead_request_complete(areq, err);
871 }
872
873 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
874                                           struct talitos_desc *desc,
875                                           void *context, int err)
876 {
877         struct aead_request *req = context;
878         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
879         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
880         struct talitos_edesc *edesc;
881         struct scatterlist *sg;
882         void *icvdata;
883
884         edesc = container_of(desc, struct talitos_edesc, desc);
885
886         ipsec_esp_unmap(dev, edesc, req);
887
888         if (!err) {
889                 /* auth check */
890                 if (edesc->dma_len)
891                         icvdata = &edesc->link_tbl[edesc->src_nents +
892                                                    edesc->dst_nents + 2];
893                 else
894                         icvdata = &edesc->link_tbl[0];
895
896                 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
897                 err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
898                              ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
899         }
900
901         kfree(edesc);
902
903         aead_request_complete(req, err);
904 }
905
906 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
907                                           struct talitos_desc *desc,
908                                           void *context, int err)
909 {
910         struct aead_request *req = context;
911         struct talitos_edesc *edesc;
912
913         edesc = container_of(desc, struct talitos_edesc, desc);
914
915         ipsec_esp_unmap(dev, edesc, req);
916
917         /* check ICV auth status */
918         if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
919                      DESC_HDR_LO_ICCR1_PASS))
920                 err = -EBADMSG;
921
922         kfree(edesc);
923
924         aead_request_complete(req, err);
925 }
926
927 /*
928  * convert scatterlist to SEC h/w link table format
929  * stop at cryptlen bytes
930  */
931 static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
932                            int cryptlen, struct talitos_ptr *link_tbl_ptr)
933 {
934         int n_sg = sg_count;
935
936         while (n_sg--) {
937                 link_tbl_ptr->ptr = cpu_to_be32(sg_dma_address(sg));
938                 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
939                 link_tbl_ptr->j_extent = 0;
940                 link_tbl_ptr++;
941                 cryptlen -= sg_dma_len(sg);
942                 sg = scatterwalk_sg_next(sg);
943         }
944
945         /* adjust (decrease) last one (or two) entry's len to cryptlen */
946         link_tbl_ptr--;
947         while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
948                 /* Empty this entry, and move to previous one */
949                 cryptlen += be16_to_cpu(link_tbl_ptr->len);
950                 link_tbl_ptr->len = 0;
951                 sg_count--;
952                 link_tbl_ptr--;
953         }
954         link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
955                                         + cryptlen);
956
957         /* tag end of link table */
958         link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
959
960         return sg_count;
961 }
962
963 /*
964  * fill in and submit ipsec_esp descriptor
965  */
966 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
967                      u8 *giv, u64 seq,
968                      void (*callback) (struct device *dev,
969                                        struct talitos_desc *desc,
970                                        void *context, int error))
971 {
972         struct crypto_aead *aead = crypto_aead_reqtfm(areq);
973         struct talitos_ctx *ctx = crypto_aead_ctx(aead);
974         struct device *dev = ctx->dev;
975         struct talitos_desc *desc = &edesc->desc;
976         unsigned int cryptlen = areq->cryptlen;
977         unsigned int authsize = ctx->authsize;
978         unsigned int ivsize = crypto_aead_ivsize(aead);
979         int sg_count, ret;
980         int sg_link_tbl_len;
981
982         /* hmac key */
983         map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
984                                0, DMA_TO_DEVICE);
985         /* hmac data */
986         map_single_talitos_ptr(dev, &desc->ptr[1], areq->assoclen + ivsize,
987                                sg_virt(areq->assoc), 0, DMA_TO_DEVICE);
988         /* cipher iv */
989         map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
990                                DMA_TO_DEVICE);
991
992         /* cipher key */
993         map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
994                                (char *)&ctx->key + ctx->authkeylen, 0,
995                                DMA_TO_DEVICE);
996
997         /*
998          * cipher in
999          * map and adjust cipher len to aead request cryptlen.
1000          * extent is bytes of HMAC postpended to ciphertext,
1001          * typically 12 for ipsec
1002          */
1003         desc->ptr[4].len = cpu_to_be16(cryptlen);
1004         desc->ptr[4].j_extent = authsize;
1005
1006         sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1007                                   (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1008                                                            : DMA_TO_DEVICE,
1009                                   edesc->src_is_chained);
1010
1011         if (sg_count == 1) {
1012                 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
1013         } else {
1014                 sg_link_tbl_len = cryptlen;
1015
1016                 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1017                         sg_link_tbl_len = cryptlen + authsize;
1018
1019                 sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
1020                                           &edesc->link_tbl[0]);
1021                 if (sg_count > 1) {
1022                         desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1023                         desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl);
1024                         dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1025                                                    edesc->dma_len,
1026                                                    DMA_BIDIRECTIONAL);
1027                 } else {
1028                         /* Only one segment now, so no link tbl needed */
1029                         desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->
1030                                                                       src));
1031                 }
1032         }
1033
1034         /* cipher out */
1035         desc->ptr[5].len = cpu_to_be16(cryptlen);
1036         desc->ptr[5].j_extent = authsize;
1037
1038         if (areq->src != areq->dst)
1039                 sg_count = talitos_map_sg(dev, areq->dst,
1040                                           edesc->dst_nents ? : 1,
1041                                           DMA_FROM_DEVICE,
1042                                           edesc->dst_is_chained);
1043
1044         if (sg_count == 1) {
1045                 desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst));
1046         } else {
1047                 struct talitos_ptr *link_tbl_ptr =
1048                         &edesc->link_tbl[edesc->src_nents + 1];
1049
1050                 desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *)
1051                                                edesc->dma_link_tbl +
1052                                                edesc->src_nents + 1);
1053                 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1054                                           link_tbl_ptr);
1055
1056                 /* Add an entry to the link table for ICV data */
1057                 link_tbl_ptr += sg_count - 1;
1058                 link_tbl_ptr->j_extent = 0;
1059                 sg_count++;
1060                 link_tbl_ptr++;
1061                 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1062                 link_tbl_ptr->len = cpu_to_be16(authsize);
1063
1064                 /* icv data follows link tables */
1065                 link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *)
1066                                                 edesc->dma_link_tbl +
1067                                                 edesc->src_nents +
1068                                                 edesc->dst_nents + 2);
1069
1070                 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1071                 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1072                                            edesc->dma_len, DMA_BIDIRECTIONAL);
1073         }
1074
1075         /* iv out */
1076         map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0,
1077                                DMA_FROM_DEVICE);
1078
1079         ret = talitos_submit(dev, desc, callback, areq);
1080         if (ret != -EINPROGRESS) {
1081                 ipsec_esp_unmap(dev, edesc, areq);
1082                 kfree(edesc);
1083         }
1084         return ret;
1085 }
1086
1087 /*
1088  * derive number of elements in scatterlist
1089  */
1090 static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
1091 {
1092         struct scatterlist *sg = sg_list;
1093         int sg_nents = 0;
1094
1095         *chained = 0;
1096         while (nbytes > 0) {
1097                 sg_nents++;
1098                 nbytes -= sg->length;
1099                 if (!sg_is_last(sg) && (sg + 1)->length == 0)
1100                         *chained = 1;
1101                 sg = scatterwalk_sg_next(sg);
1102         }
1103
1104         return sg_nents;
1105 }
1106
1107 /*
1108  * allocate and map the extended descriptor
1109  */
1110 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1111                                                  struct scatterlist *src,
1112                                                  struct scatterlist *dst,
1113                                                  unsigned int cryptlen,
1114                                                  unsigned int authsize,
1115                                                  int icv_stashing,
1116                                                  u32 cryptoflags)
1117 {
1118         struct talitos_edesc *edesc;
1119         int src_nents, dst_nents, alloc_len, dma_len;
1120         int src_chained, dst_chained = 0;
1121         gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1122                       GFP_ATOMIC;
1123
1124         if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
1125                 dev_err(dev, "length exceeds h/w max limit\n");
1126                 return ERR_PTR(-EINVAL);
1127         }
1128
1129         src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1130         src_nents = (src_nents == 1) ? 0 : src_nents;
1131
1132         if (dst == src) {
1133                 dst_nents = src_nents;
1134         } else {
1135                 dst_nents = sg_count(dst, cryptlen + authsize, &dst_chained);
1136                 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1137         }
1138
1139         /*
1140          * allocate space for base edesc plus the link tables,
1141          * allowing for two separate entries for ICV and generated ICV (+ 2),
1142          * and the ICV data itself
1143          */
1144         alloc_len = sizeof(struct talitos_edesc);
1145         if (src_nents || dst_nents) {
1146                 dma_len = (src_nents + dst_nents + 2) *
1147                                  sizeof(struct talitos_ptr) + authsize;
1148                 alloc_len += dma_len;
1149         } else {
1150                 dma_len = 0;
1151                 alloc_len += icv_stashing ? authsize : 0;
1152         }
1153
1154         edesc = kmalloc(alloc_len, GFP_DMA | flags);
1155         if (!edesc) {
1156                 dev_err(dev, "could not allocate edescriptor\n");
1157                 return ERR_PTR(-ENOMEM);
1158         }
1159
1160         edesc->src_nents = src_nents;
1161         edesc->dst_nents = dst_nents;
1162         edesc->src_is_chained = src_chained;
1163         edesc->dst_is_chained = dst_chained;
1164         edesc->dma_len = dma_len;
1165         edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1166                                              edesc->dma_len, DMA_BIDIRECTIONAL);
1167
1168         return edesc;
1169 }
1170
1171 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq,
1172                                               int icv_stashing)
1173 {
1174         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1175         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1176
1177         return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1178                                    areq->cryptlen, ctx->authsize, icv_stashing,
1179                                    areq->base.flags);
1180 }
1181
1182 static int aead_encrypt(struct aead_request *req)
1183 {
1184         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1185         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1186         struct talitos_edesc *edesc;
1187
1188         /* allocate extended descriptor */
1189         edesc = aead_edesc_alloc(req, 0);
1190         if (IS_ERR(edesc))
1191                 return PTR_ERR(edesc);
1192
1193         /* set encrypt */
1194         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1195
1196         return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done);
1197 }
1198
1199 static int aead_decrypt(struct aead_request *req)
1200 {
1201         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1202         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1203         unsigned int authsize = ctx->authsize;
1204         struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1205         struct talitos_edesc *edesc;
1206         struct scatterlist *sg;
1207         void *icvdata;
1208
1209         req->cryptlen -= authsize;
1210
1211         /* allocate extended descriptor */
1212         edesc = aead_edesc_alloc(req, 1);
1213         if (IS_ERR(edesc))
1214                 return PTR_ERR(edesc);
1215
1216         if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1217             ((!edesc->src_nents && !edesc->dst_nents) ||
1218              priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1219
1220                 /* decrypt and check the ICV */
1221                 edesc->desc.hdr = ctx->desc_hdr_template |
1222                                   DESC_HDR_DIR_INBOUND |
1223                                   DESC_HDR_MODE1_MDEU_CICV;
1224
1225                 /* reset integrity check result bits */
1226                 edesc->desc.hdr_lo = 0;
1227
1228                 return ipsec_esp(edesc, req, NULL, 0,
1229                                  ipsec_esp_decrypt_hwauth_done);
1230
1231         }
1232
1233         /* Have to check the ICV with software */
1234         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1235
1236         /* stash incoming ICV for later cmp with ICV generated by the h/w */
1237         if (edesc->dma_len)
1238                 icvdata = &edesc->link_tbl[edesc->src_nents +
1239                                            edesc->dst_nents + 2];
1240         else
1241                 icvdata = &edesc->link_tbl[0];
1242
1243         sg = sg_last(req->src, edesc->src_nents ? : 1);
1244
1245         memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1246                ctx->authsize);
1247
1248         return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done);
1249 }
1250
1251 static int aead_givencrypt(struct aead_givcrypt_request *req)
1252 {
1253         struct aead_request *areq = &req->areq;
1254         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1255         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1256         struct talitos_edesc *edesc;
1257
1258         /* allocate extended descriptor */
1259         edesc = aead_edesc_alloc(areq, 0);
1260         if (IS_ERR(edesc))
1261                 return PTR_ERR(edesc);
1262
1263         /* set encrypt */
1264         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1265
1266         memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1267         /* avoid consecutive packets going out with same IV */
1268         *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1269
1270         return ipsec_esp(edesc, areq, req->giv, req->seq,
1271                          ipsec_esp_encrypt_done);
1272 }
1273
1274 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1275                              const u8 *key, unsigned int keylen)
1276 {
1277         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1278         struct ablkcipher_alg *alg = crypto_ablkcipher_alg(cipher);
1279
1280         if (keylen > TALITOS_MAX_KEY_SIZE)
1281                 goto badkey;
1282
1283         if (keylen < alg->min_keysize || keylen > alg->max_keysize)
1284                 goto badkey;
1285
1286         memcpy(&ctx->key, key, keylen);
1287         ctx->keylen = keylen;
1288
1289         return 0;
1290
1291 badkey:
1292         crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1293         return -EINVAL;
1294 }
1295
1296 static void common_nonsnoop_unmap(struct device *dev,
1297                                   struct talitos_edesc *edesc,
1298                                   struct ablkcipher_request *areq)
1299 {
1300         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1301         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1302         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1303
1304         talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
1305
1306         if (edesc->dma_len)
1307                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1308                                  DMA_BIDIRECTIONAL);
1309 }
1310
1311 static void ablkcipher_done(struct device *dev,
1312                             struct talitos_desc *desc, void *context,
1313                             int err)
1314 {
1315         struct ablkcipher_request *areq = context;
1316         struct talitos_edesc *edesc;
1317
1318         edesc = container_of(desc, struct talitos_edesc, desc);
1319
1320         common_nonsnoop_unmap(dev, edesc, areq);
1321
1322         kfree(edesc);
1323
1324         areq->base.complete(&areq->base, err);
1325 }
1326
1327 static int common_nonsnoop(struct talitos_edesc *edesc,
1328                            struct ablkcipher_request *areq,
1329                            u8 *giv,
1330                            void (*callback) (struct device *dev,
1331                                              struct talitos_desc *desc,
1332                                              void *context, int error))
1333 {
1334         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1335         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1336         struct device *dev = ctx->dev;
1337         struct talitos_desc *desc = &edesc->desc;
1338         unsigned int cryptlen = areq->nbytes;
1339         unsigned int ivsize;
1340         int sg_count, ret;
1341
1342         /* first DWORD empty */
1343         desc->ptr[0].len = 0;
1344         desc->ptr[0].ptr = 0;
1345         desc->ptr[0].j_extent = 0;
1346
1347         /* cipher iv */
1348         ivsize = crypto_ablkcipher_ivsize(cipher);
1349         map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, giv ?: areq->info, 0,
1350                                DMA_TO_DEVICE);
1351
1352         /* cipher key */
1353         map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1354                                (char *)&ctx->key, 0, DMA_TO_DEVICE);
1355
1356         /*
1357          * cipher in
1358          */
1359         desc->ptr[3].len = cpu_to_be16(cryptlen);
1360         desc->ptr[3].j_extent = 0;
1361
1362         sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1363                                   (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1364                                                            : DMA_TO_DEVICE,
1365                                   edesc->src_is_chained);
1366
1367         if (sg_count == 1) {
1368                 desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src));
1369         } else {
1370                 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
1371                                           &edesc->link_tbl[0]);
1372                 if (sg_count > 1) {
1373                         desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1374                         desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl);
1375                         dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1376                                                    edesc->dma_len,
1377                                                    DMA_BIDIRECTIONAL);
1378                 } else {
1379                         /* Only one segment now, so no link tbl needed */
1380                         desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->
1381                                                                       src));
1382                 }
1383         }
1384
1385         /* cipher out */
1386         desc->ptr[4].len = cpu_to_be16(cryptlen);
1387         desc->ptr[4].j_extent = 0;
1388
1389         if (areq->src != areq->dst)
1390                 sg_count = talitos_map_sg(dev, areq->dst,
1391                                           edesc->dst_nents ? : 1,
1392                                           DMA_FROM_DEVICE,
1393                                           edesc->dst_is_chained);
1394
1395         if (sg_count == 1) {
1396                 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->dst));
1397         } else {
1398                 struct talitos_ptr *link_tbl_ptr =
1399                         &edesc->link_tbl[edesc->src_nents + 1];
1400
1401                 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1402                 desc->ptr[4].ptr = cpu_to_be32((struct talitos_ptr *)
1403                                                edesc->dma_link_tbl +
1404                                                edesc->src_nents + 1);
1405                 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1406                                           link_tbl_ptr);
1407                 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1408                                            edesc->dma_len, DMA_BIDIRECTIONAL);
1409         }
1410
1411         /* iv out */
1412         map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0,
1413                                DMA_FROM_DEVICE);
1414
1415         /* last DWORD empty */
1416         desc->ptr[6].len = 0;
1417         desc->ptr[6].ptr = 0;
1418         desc->ptr[6].j_extent = 0;
1419
1420         ret = talitos_submit(dev, desc, callback, areq);
1421         if (ret != -EINPROGRESS) {
1422                 common_nonsnoop_unmap(dev, edesc, areq);
1423                 kfree(edesc);
1424         }
1425         return ret;
1426 }
1427
1428 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1429                                                     areq)
1430 {
1431         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1432         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1433
1434         return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, areq->nbytes,
1435                                    0, 0, areq->base.flags);
1436 }
1437
1438 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1439 {
1440         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1441         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1442         struct talitos_edesc *edesc;
1443
1444         /* allocate extended descriptor */
1445         edesc = ablkcipher_edesc_alloc(areq);
1446         if (IS_ERR(edesc))
1447                 return PTR_ERR(edesc);
1448
1449         /* set encrypt */
1450         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1451
1452         return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
1453 }
1454
1455 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1456 {
1457         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1458         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1459         struct talitos_edesc *edesc;
1460
1461         /* allocate extended descriptor */
1462         edesc = ablkcipher_edesc_alloc(areq);
1463         if (IS_ERR(edesc))
1464                 return PTR_ERR(edesc);
1465
1466         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1467
1468         return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
1469 }
1470
1471 struct talitos_alg_template {
1472         struct crypto_alg alg;
1473         __be32 desc_hdr_template;
1474 };
1475
1476 static struct talitos_alg_template driver_algs[] = {
1477         /* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
1478         {
1479                 .alg = {
1480                         .cra_name = "authenc(hmac(sha1),cbc(aes))",
1481                         .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
1482                         .cra_blocksize = AES_BLOCK_SIZE,
1483                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1484                         .cra_type = &crypto_aead_type,
1485                         .cra_aead = {
1486                                 .setkey = aead_setkey,
1487                                 .setauthsize = aead_setauthsize,
1488                                 .encrypt = aead_encrypt,
1489                                 .decrypt = aead_decrypt,
1490                                 .givencrypt = aead_givencrypt,
1491                                 .geniv = "<built-in>",
1492                                 .ivsize = AES_BLOCK_SIZE,
1493                                 .maxauthsize = SHA1_DIGEST_SIZE,
1494                         }
1495                 },
1496                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1497                                      DESC_HDR_SEL0_AESU |
1498                                      DESC_HDR_MODE0_AESU_CBC |
1499                                      DESC_HDR_SEL1_MDEUA |
1500                                      DESC_HDR_MODE1_MDEU_INIT |
1501                                      DESC_HDR_MODE1_MDEU_PAD |
1502                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1503         },
1504         {
1505                 .alg = {
1506                         .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1507                         .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
1508                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1509                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1510                         .cra_type = &crypto_aead_type,
1511                         .cra_aead = {
1512                                 .setkey = aead_setkey,
1513                                 .setauthsize = aead_setauthsize,
1514                                 .encrypt = aead_encrypt,
1515                                 .decrypt = aead_decrypt,
1516                                 .givencrypt = aead_givencrypt,
1517                                 .geniv = "<built-in>",
1518                                 .ivsize = DES3_EDE_BLOCK_SIZE,
1519                                 .maxauthsize = SHA1_DIGEST_SIZE,
1520                         }
1521                 },
1522                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1523                                      DESC_HDR_SEL0_DEU |
1524                                      DESC_HDR_MODE0_DEU_CBC |
1525                                      DESC_HDR_MODE0_DEU_3DES |
1526                                      DESC_HDR_SEL1_MDEUA |
1527                                      DESC_HDR_MODE1_MDEU_INIT |
1528                                      DESC_HDR_MODE1_MDEU_PAD |
1529                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1530         },
1531         {
1532                 .alg = {
1533                         .cra_name = "authenc(hmac(sha256),cbc(aes))",
1534                         .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
1535                         .cra_blocksize = AES_BLOCK_SIZE,
1536                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1537                         .cra_type = &crypto_aead_type,
1538                         .cra_aead = {
1539                                 .setkey = aead_setkey,
1540                                 .setauthsize = aead_setauthsize,
1541                                 .encrypt = aead_encrypt,
1542                                 .decrypt = aead_decrypt,
1543                                 .givencrypt = aead_givencrypt,
1544                                 .geniv = "<built-in>",
1545                                 .ivsize = AES_BLOCK_SIZE,
1546                                 .maxauthsize = SHA256_DIGEST_SIZE,
1547                         }
1548                 },
1549                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1550                                      DESC_HDR_SEL0_AESU |
1551                                      DESC_HDR_MODE0_AESU_CBC |
1552                                      DESC_HDR_SEL1_MDEUA |
1553                                      DESC_HDR_MODE1_MDEU_INIT |
1554                                      DESC_HDR_MODE1_MDEU_PAD |
1555                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
1556         },
1557         {
1558                 .alg = {
1559                         .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
1560                         .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
1561                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1562                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1563                         .cra_type = &crypto_aead_type,
1564                         .cra_aead = {
1565                                 .setkey = aead_setkey,
1566                                 .setauthsize = aead_setauthsize,
1567                                 .encrypt = aead_encrypt,
1568                                 .decrypt = aead_decrypt,
1569                                 .givencrypt = aead_givencrypt,
1570                                 .geniv = "<built-in>",
1571                                 .ivsize = DES3_EDE_BLOCK_SIZE,
1572                                 .maxauthsize = SHA256_DIGEST_SIZE,
1573                         }
1574                 },
1575                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1576                                      DESC_HDR_SEL0_DEU |
1577                                      DESC_HDR_MODE0_DEU_CBC |
1578                                      DESC_HDR_MODE0_DEU_3DES |
1579                                      DESC_HDR_SEL1_MDEUA |
1580                                      DESC_HDR_MODE1_MDEU_INIT |
1581                                      DESC_HDR_MODE1_MDEU_PAD |
1582                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
1583         },
1584         {
1585                 .alg = {
1586                         .cra_name = "authenc(hmac(md5),cbc(aes))",
1587                         .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
1588                         .cra_blocksize = AES_BLOCK_SIZE,
1589                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1590                         .cra_type = &crypto_aead_type,
1591                         .cra_aead = {
1592                                 .setkey = aead_setkey,
1593                                 .setauthsize = aead_setauthsize,
1594                                 .encrypt = aead_encrypt,
1595                                 .decrypt = aead_decrypt,
1596                                 .givencrypt = aead_givencrypt,
1597                                 .geniv = "<built-in>",
1598                                 .ivsize = AES_BLOCK_SIZE,
1599                                 .maxauthsize = MD5_DIGEST_SIZE,
1600                         }
1601                 },
1602                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1603                                      DESC_HDR_SEL0_AESU |
1604                                      DESC_HDR_MODE0_AESU_CBC |
1605                                      DESC_HDR_SEL1_MDEUA |
1606                                      DESC_HDR_MODE1_MDEU_INIT |
1607                                      DESC_HDR_MODE1_MDEU_PAD |
1608                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
1609         },
1610         {
1611                 .alg = {
1612                         .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1613                         .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
1614                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1615                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1616                         .cra_type = &crypto_aead_type,
1617                         .cra_aead = {
1618                                 .setkey = aead_setkey,
1619                                 .setauthsize = aead_setauthsize,
1620                                 .encrypt = aead_encrypt,
1621                                 .decrypt = aead_decrypt,
1622                                 .givencrypt = aead_givencrypt,
1623                                 .geniv = "<built-in>",
1624                                 .ivsize = DES3_EDE_BLOCK_SIZE,
1625                                 .maxauthsize = MD5_DIGEST_SIZE,
1626                         }
1627                 },
1628                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1629                                      DESC_HDR_SEL0_DEU |
1630                                      DESC_HDR_MODE0_DEU_CBC |
1631                                      DESC_HDR_MODE0_DEU_3DES |
1632                                      DESC_HDR_SEL1_MDEUA |
1633                                      DESC_HDR_MODE1_MDEU_INIT |
1634                                      DESC_HDR_MODE1_MDEU_PAD |
1635                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
1636         },
1637         /* ABLKCIPHER algorithms. */
1638         {
1639                 .alg = {
1640                         .cra_name = "cbc(aes)",
1641                         .cra_driver_name = "cbc-aes-talitos",
1642                         .cra_blocksize = AES_BLOCK_SIZE,
1643                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1644                                      CRYPTO_ALG_ASYNC,
1645                         .cra_type = &crypto_ablkcipher_type,
1646                         .cra_ablkcipher = {
1647                                 .setkey = ablkcipher_setkey,
1648                                 .encrypt = ablkcipher_encrypt,
1649                                 .decrypt = ablkcipher_decrypt,
1650                                 .geniv = "eseqiv",
1651                                 .min_keysize = AES_MIN_KEY_SIZE,
1652                                 .max_keysize = AES_MAX_KEY_SIZE,
1653                                 .ivsize = AES_BLOCK_SIZE,
1654                         }
1655                 },
1656                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
1657                                      DESC_HDR_SEL0_AESU |
1658                                      DESC_HDR_MODE0_AESU_CBC,
1659         },
1660         {
1661                 .alg = {
1662                         .cra_name = "cbc(des3_ede)",
1663                         .cra_driver_name = "cbc-3des-talitos",
1664                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1665                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1666                                      CRYPTO_ALG_ASYNC,
1667                         .cra_type = &crypto_ablkcipher_type,
1668                         .cra_ablkcipher = {
1669                                 .setkey = ablkcipher_setkey,
1670                                 .encrypt = ablkcipher_encrypt,
1671                                 .decrypt = ablkcipher_decrypt,
1672                                 .geniv = "eseqiv",
1673                                 .min_keysize = DES3_EDE_KEY_SIZE,
1674                                 .max_keysize = DES3_EDE_KEY_SIZE,
1675                                 .ivsize = DES3_EDE_BLOCK_SIZE,
1676                         }
1677                 },
1678                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
1679                                      DESC_HDR_SEL0_DEU |
1680                                      DESC_HDR_MODE0_DEU_CBC |
1681                                      DESC_HDR_MODE0_DEU_3DES,
1682         }
1683 };
1684
1685 struct talitos_crypto_alg {
1686         struct list_head entry;
1687         struct device *dev;
1688         __be32 desc_hdr_template;
1689         struct crypto_alg crypto_alg;
1690 };
1691
1692 static int talitos_cra_init(struct crypto_tfm *tfm)
1693 {
1694         struct crypto_alg *alg = tfm->__crt_alg;
1695         struct talitos_crypto_alg *talitos_alg;
1696         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
1697
1698         talitos_alg =  container_of(alg, struct talitos_crypto_alg, crypto_alg);
1699
1700         /* update context with ptr to dev */
1701         ctx->dev = talitos_alg->dev;
1702
1703         /* copy descriptor header template value */
1704         ctx->desc_hdr_template = talitos_alg->desc_hdr_template;
1705
1706         /* random first IV */
1707         get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
1708
1709         return 0;
1710 }
1711
1712 /*
1713  * given the alg's descriptor header template, determine whether descriptor
1714  * type and primary/secondary execution units required match the hw
1715  * capabilities description provided in the device tree node.
1716  */
1717 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
1718 {
1719         struct talitos_private *priv = dev_get_drvdata(dev);
1720         int ret;
1721
1722         ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
1723               (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
1724
1725         if (SECONDARY_EU(desc_hdr_template))
1726                 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
1727                               & priv->exec_units);
1728
1729         return ret;
1730 }
1731
1732 static int talitos_remove(struct of_device *ofdev)
1733 {
1734         struct device *dev = &ofdev->dev;
1735         struct talitos_private *priv = dev_get_drvdata(dev);
1736         struct talitos_crypto_alg *t_alg, *n;
1737         int i;
1738
1739         list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
1740                 crypto_unregister_alg(&t_alg->crypto_alg);
1741                 list_del(&t_alg->entry);
1742                 kfree(t_alg);
1743         }
1744
1745         if (hw_supports(dev, DESC_HDR_SEL0_RNG))
1746                 talitos_unregister_rng(dev);
1747
1748         for (i = 0; i < priv->num_channels; i++)
1749                 if (priv->chan[i].fifo)
1750                         kfree(priv->chan[i].fifo);
1751
1752         kfree(priv->chan);
1753
1754         if (priv->irq != NO_IRQ) {
1755                 free_irq(priv->irq, dev);
1756                 irq_dispose_mapping(priv->irq);
1757         }
1758
1759         tasklet_kill(&priv->done_task);
1760
1761         iounmap(priv->reg);
1762
1763         dev_set_drvdata(dev, NULL);
1764
1765         kfree(priv);
1766
1767         return 0;
1768 }
1769
1770 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
1771                                                     struct talitos_alg_template
1772                                                            *template)
1773 {
1774         struct talitos_crypto_alg *t_alg;
1775         struct crypto_alg *alg;
1776
1777         t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
1778         if (!t_alg)
1779                 return ERR_PTR(-ENOMEM);
1780
1781         alg = &t_alg->crypto_alg;
1782         *alg = template->alg;
1783
1784         alg->cra_module = THIS_MODULE;
1785         alg->cra_init = talitos_cra_init;
1786         alg->cra_priority = TALITOS_CRA_PRIORITY;
1787         alg->cra_alignmask = 0;
1788         alg->cra_ctxsize = sizeof(struct talitos_ctx);
1789
1790         t_alg->desc_hdr_template = template->desc_hdr_template;
1791         t_alg->dev = dev;
1792
1793         return t_alg;
1794 }
1795
1796 static int talitos_probe(struct of_device *ofdev,
1797                          const struct of_device_id *match)
1798 {
1799         struct device *dev = &ofdev->dev;
1800         struct device_node *np = ofdev->node;
1801         struct talitos_private *priv;
1802         const unsigned int *prop;
1803         int i, err;
1804
1805         priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
1806         if (!priv)
1807                 return -ENOMEM;
1808
1809         dev_set_drvdata(dev, priv);
1810
1811         priv->ofdev = ofdev;
1812
1813         tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev);
1814
1815         INIT_LIST_HEAD(&priv->alg_list);
1816
1817         priv->irq = irq_of_parse_and_map(np, 0);
1818
1819         if (priv->irq == NO_IRQ) {
1820                 dev_err(dev, "failed to map irq\n");
1821                 err = -EINVAL;
1822                 goto err_out;
1823         }
1824
1825         /* get the irq line */
1826         err = request_irq(priv->irq, talitos_interrupt, 0,
1827                           dev_driver_string(dev), dev);
1828         if (err) {
1829                 dev_err(dev, "failed to request irq %d\n", priv->irq);
1830                 irq_dispose_mapping(priv->irq);
1831                 priv->irq = NO_IRQ;
1832                 goto err_out;
1833         }
1834
1835         priv->reg = of_iomap(np, 0);
1836         if (!priv->reg) {
1837                 dev_err(dev, "failed to of_iomap\n");
1838                 err = -ENOMEM;
1839                 goto err_out;
1840         }
1841
1842         /* get SEC version capabilities from device tree */
1843         prop = of_get_property(np, "fsl,num-channels", NULL);
1844         if (prop)
1845                 priv->num_channels = *prop;
1846
1847         prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
1848         if (prop)
1849                 priv->chfifo_len = *prop;
1850
1851         prop = of_get_property(np, "fsl,exec-units-mask", NULL);
1852         if (prop)
1853                 priv->exec_units = *prop;
1854
1855         prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
1856         if (prop)
1857                 priv->desc_types = *prop;
1858
1859         if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
1860             !priv->exec_units || !priv->desc_types) {
1861                 dev_err(dev, "invalid property data in device tree node\n");
1862                 err = -EINVAL;
1863                 goto err_out;
1864         }
1865
1866         if (of_device_is_compatible(np, "fsl,sec3.0"))
1867                 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
1868
1869         if (of_device_is_compatible(np, "fsl,sec2.1"))
1870                 priv->features |= TALITOS_FTR_HW_AUTH_CHECK;
1871
1872         priv->chan = kzalloc(sizeof(struct talitos_channel) *
1873                              priv->num_channels, GFP_KERNEL);
1874         if (!priv->chan) {
1875                 dev_err(dev, "failed to allocate channel management space\n");
1876                 err = -ENOMEM;
1877                 goto err_out;
1878         }
1879
1880         for (i = 0; i < priv->num_channels; i++) {
1881                 spin_lock_init(&priv->chan[i].head_lock);
1882                 spin_lock_init(&priv->chan[i].tail_lock);
1883         }
1884
1885         priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
1886
1887         for (i = 0; i < priv->num_channels; i++) {
1888                 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
1889                                              priv->fifo_len, GFP_KERNEL);
1890                 if (!priv->chan[i].fifo) {
1891                         dev_err(dev, "failed to allocate request fifo %d\n", i);
1892                         err = -ENOMEM;
1893                         goto err_out;
1894                 }
1895         }
1896
1897         for (i = 0; i < priv->num_channels; i++)
1898                 atomic_set(&priv->chan[i].submit_count,
1899                            -(priv->chfifo_len - 1));
1900
1901         /* reset and initialize the h/w */
1902         err = init_device(dev);
1903         if (err) {
1904                 dev_err(dev, "failed to initialize device\n");
1905                 goto err_out;
1906         }
1907
1908         /* register the RNG, if available */
1909         if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
1910                 err = talitos_register_rng(dev);
1911                 if (err) {
1912                         dev_err(dev, "failed to register hwrng: %d\n", err);
1913                         goto err_out;
1914                 } else
1915                         dev_info(dev, "hwrng\n");
1916         }
1917
1918         /* register crypto algorithms the device supports */
1919         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
1920                 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
1921                         struct talitos_crypto_alg *t_alg;
1922
1923                         t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
1924                         if (IS_ERR(t_alg)) {
1925                                 err = PTR_ERR(t_alg);
1926                                 goto err_out;
1927                         }
1928
1929                         err = crypto_register_alg(&t_alg->crypto_alg);
1930                         if (err) {
1931                                 dev_err(dev, "%s alg registration failed\n",
1932                                         t_alg->crypto_alg.cra_driver_name);
1933                                 kfree(t_alg);
1934                         } else {
1935                                 list_add_tail(&t_alg->entry, &priv->alg_list);
1936                                 dev_info(dev, "%s\n",
1937                                          t_alg->crypto_alg.cra_driver_name);
1938                         }
1939                 }
1940         }
1941
1942         return 0;
1943
1944 err_out:
1945         talitos_remove(ofdev);
1946
1947         return err;
1948 }
1949
1950 static struct of_device_id talitos_match[] = {
1951         {
1952                 .compatible = "fsl,sec2.0",
1953         },
1954         {},
1955 };
1956 MODULE_DEVICE_TABLE(of, talitos_match);
1957
1958 static struct of_platform_driver talitos_driver = {
1959         .name = "talitos",
1960         .match_table = talitos_match,
1961         .probe = talitos_probe,
1962         .remove = talitos_remove,
1963 };
1964
1965 static int __init talitos_init(void)
1966 {
1967         return of_register_platform_driver(&talitos_driver);
1968 }
1969 module_init(talitos_init);
1970
1971 static void __exit talitos_exit(void)
1972 {
1973         of_unregister_platform_driver(&talitos_driver);
1974 }
1975 module_exit(talitos_exit);
1976
1977 MODULE_LICENSE("GPL");
1978 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
1979 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");