include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / drivers / crypto / talitos.c
index 79fdba2..dc558a0 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/io.h>
 #include <linux/spinlock.h>
 #include <linux/rtnetlink.h>
+#include <linux/slab.h>
 
 #include <crypto/algapi.h>
 #include <crypto/aes.h>
@@ -44,6 +45,8 @@
 #include <crypto/sha.h>
 #include <crypto/aead.h>
 #include <crypto/authenc.h>
+#include <crypto/skcipher.h>
+#include <crypto/scatterwalk.h>
 
 #include "talitos.h"
 
@@ -84,6 +87,25 @@ struct talitos_request {
        void *context;
 };
 
+/* per-channel fifo management */
+struct talitos_channel {
+       /* request fifo */
+       struct talitos_request *fifo;
+
+       /* number of requests pending in channel h/w fifo */
+       atomic_t submit_count ____cacheline_aligned;
+
+       /* request submission (head) lock */
+       spinlock_t head_lock ____cacheline_aligned;
+       /* index to next free descriptor request */
+       int head;
+
+       /* request release (tail) lock */
+       spinlock_t tail_lock ____cacheline_aligned;
+       /* index to next in-progress/done descriptor request */
+       int tail;
+};
+
 struct talitos_private {
        struct device *dev;
        struct of_device *ofdev;
@@ -96,14 +118,8 @@ struct talitos_private {
        unsigned int exec_units;
        unsigned int desc_types;
 
-       /* next channel to be assigned next incoming descriptor */
-       atomic_t last_chan;
-
-       /* per-channel number of requests pending in channel h/w fifo */
-       atomic_t *submit_count;
-
-       /* per-channel request fifo */
-       struct talitos_request **fifo;
+       /* SEC Compatibility info */
+       unsigned long features;
 
        /*
         * length of the request fifo
@@ -112,19 +128,13 @@ struct talitos_private {
         */
        unsigned int fifo_len;
 
-       /* per-channel index to next free descriptor request */
-       int *head;
-
-       /* per-channel index to next in-progress/done descriptor request */
-       int *tail;
+       struct talitos_channel *chan;
 
-       /* per-channel request submission (head) and release (tail) locks */
-       spinlock_t *head_lock;
-       spinlock_t *tail_lock;
+       /* next channel to be assigned next incoming descriptor */
+       atomic_t last_chan ____cacheline_aligned;
 
        /* request callback tasklet */
        struct tasklet_struct done_task;
-       struct tasklet_struct error_task;
 
        /* list of registered algorithms */
        struct list_head alg_list;
@@ -133,6 +143,16 @@ struct talitos_private {
        struct hwrng rng;
 };
 
+/* .features flag */
+#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
+#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
+
+static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
+{
+       talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
+       talitos_ptr->eptr = cpu_to_be32(upper_32_bits(dma_addr));
+}
+
 /*
  * map virtual single (contiguous) pointer to h/w descriptor pointer
  */
@@ -142,8 +162,10 @@ static void map_single_talitos_ptr(struct device *dev,
                                   unsigned char extent,
                                   enum dma_data_direction dir)
 {
+       dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
+
        talitos_ptr->len = cpu_to_be16(len);
-       talitos_ptr->ptr = cpu_to_be32(dma_map_single(dev, data, len, dir));
+       to_talitos_ptr(talitos_ptr, dma_addr);
        talitos_ptr->j_extent = extent;
 }
 
@@ -174,9 +196,14 @@ static int reset_channel(struct device *dev, int ch)
                return -EIO;
        }
 
-       /* set done writeback and IRQ */
-       setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE |
-                 TALITOS_CCCR_LO_CDIE);
+       /* set 36-bit addressing, done writeback enable and done IRQ enable */
+       setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_EAE |
+                 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
+
+       /* and ICCR writeback, if available */
+       if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
+               setbits32(priv->reg + TALITOS_CCCR_LO(ch),
+                         TALITOS_CCCR_LO_IWSE);
 
        return 0;
 }
@@ -233,6 +260,11 @@ static int init_device(struct device *dev)
        setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
        setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
 
+       /* disable integrity check error interrupts (use writeback instead) */
+       if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
+               setbits32(priv->reg + TALITOS_MDEUICR_LO,
+                         TALITOS_MDEUICR_LO_ICE);
+
        return 0;
 }
 
@@ -264,16 +296,16 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
        /* emulate SEC's round-robin channel fifo polling scheme */
        ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1);
 
-       spin_lock_irqsave(&priv->head_lock[ch], flags);
+       spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
 
-       if (!atomic_inc_not_zero(&priv->submit_count[ch])) {
+       if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
                /* h/w fifo is full */
-               spin_unlock_irqrestore(&priv->head_lock[ch], flags);
+               spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
                return -EAGAIN;
        }
 
-       head = priv->head[ch];
-       request = &priv->fifo[ch][head];
+       head = priv->chan[ch].head;
+       request = &priv->chan[ch].fifo[head];
 
        /* map descriptor and save caller data */
        request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
@@ -282,16 +314,19 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
        request->context = context;
 
        /* increment fifo head */
-       priv->head[ch] = (priv->head[ch] + 1) & (priv->fifo_len - 1);
+       priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
 
        smp_wmb();
        request->desc = desc;
 
        /* GO! */
        wmb();
-       out_be32(priv->reg + TALITOS_FF_LO(ch), request->dma_desc);
+       out_be32(priv->reg + TALITOS_FF(ch),
+                cpu_to_be32(upper_32_bits(request->dma_desc)));
+       out_be32(priv->reg + TALITOS_FF_LO(ch),
+                cpu_to_be32(lower_32_bits(request->dma_desc)));
 
-       spin_unlock_irqrestore(&priv->head_lock[ch], flags);
+       spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
 
        return -EINPROGRESS;
 }
@@ -306,11 +341,11 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
        unsigned long flags;
        int tail, status;
 
-       spin_lock_irqsave(&priv->tail_lock[ch], flags);
+       spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
 
-       tail = priv->tail[ch];
-       while (priv->fifo[ch][tail].desc) {
-               request = &priv->fifo[ch][tail];
+       tail = priv->chan[ch].tail;
+       while (priv->chan[ch].fifo[tail].desc) {
+               request = &priv->chan[ch].fifo[tail];
 
                /* descriptors with their done bits set don't get the error */
                rmb();
@@ -323,7 +358,8 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
                                status = error;
 
                dma_unmap_single(dev, request->dma_desc,
-                       sizeof(struct talitos_desc), DMA_BIDIRECTIONAL);
+                                sizeof(struct talitos_desc),
+                                DMA_BIDIRECTIONAL);
 
                /* copy entries so we can call callback outside lock */
                saved_req.desc = request->desc;
@@ -335,22 +371,22 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
                request->desc = NULL;
 
                /* increment fifo tail */
-               priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1);
+               priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
 
-               spin_unlock_irqrestore(&priv->tail_lock[ch], flags);
+               spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
 
-               atomic_dec(&priv->submit_count[ch]);
+               atomic_dec(&priv->chan[ch].submit_count);
 
                saved_req.callback(dev, saved_req.desc, saved_req.context,
                                   status);
                /* channel may resume processing in single desc error case */
                if (error && !reset_ch && status == error)
                        return;
-               spin_lock_irqsave(&priv->tail_lock[ch], flags);
-               tail = priv->tail[ch];
+               spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
+               tail = priv->chan[ch].tail;
        }
 
-       spin_unlock_irqrestore(&priv->tail_lock[ch], flags);
+       spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
 }
 
 /*
@@ -364,6 +400,12 @@ static void talitos_done(unsigned long data)
 
        for (ch = 0; ch < priv->num_channels; ch++)
                flush_channel(dev, ch, 0, 0);
+
+       /* At this point, all completed channels have been processed.
+        * Unmask done interrupts for channels completed later on.
+        */
+       setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
+       setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
 }
 
 /*
@@ -372,26 +414,27 @@ static void talitos_done(unsigned long data)
 static struct talitos_desc *current_desc(struct device *dev, int ch)
 {
        struct talitos_private *priv = dev_get_drvdata(dev);
-       int tail = priv->tail[ch];
+       int tail = priv->chan[ch].tail;
        dma_addr_t cur_desc;
 
        cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch));
 
-       while (priv->fifo[ch][tail].dma_desc != cur_desc) {
+       while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
                tail = (tail + 1) & (priv->fifo_len - 1);
-               if (tail == priv->tail[ch]) {
+               if (tail == priv->chan[ch].tail) {
                        dev_err(dev, "couldn't locate current descriptor\n");
                        return NULL;
                }
        }
 
-       return priv->fifo[ch][tail].desc;
+       return priv->chan[ch].fifo[tail].desc;
 }
 
 /*
  * user diagnostics; report root cause of error based on execution unit status
  */
-static void report_eu_error(struct device *dev, int ch, struct talitos_desc *desc)
+static void report_eu_error(struct device *dev, int ch,
+                           struct talitos_desc *desc)
 {
        struct talitos_private *priv = dev_get_drvdata(dev);
        int i;
@@ -463,16 +506,13 @@ static void report_eu_error(struct device *dev, int ch, struct talitos_desc *des
 /*
  * recover from error interrupts
  */
-static void talitos_error(unsigned long data)
+static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
 {
        struct device *dev = (struct device *)data;
        struct talitos_private *priv = dev_get_drvdata(dev);
        unsigned int timeout = TALITOS_TIMEOUT;
        int ch, error, reset_dev = 0, reset_ch = 0;
-       u32 isr, isr_lo, v, v_lo;
-
-       isr = in_be32(priv->reg + TALITOS_ISR);
-       isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);
+       u32 v, v_lo;
 
        for (ch = 0; ch < priv->num_channels; ch++) {
                /* skip channels without errors */
@@ -554,16 +594,19 @@ static irqreturn_t talitos_interrupt(int irq, void *data)
 
        isr = in_be32(priv->reg + TALITOS_ISR);
        isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);
-
-       /* ack */
+       /* Acknowledge interrupt */
        out_be32(priv->reg + TALITOS_ICR, isr);
        out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);
 
        if (unlikely((isr & ~TALITOS_ISR_CHDONE) || isr_lo))
-               talitos_error((unsigned long)data);
+               talitos_error((unsigned long)data, isr, isr_lo);
        else
-               if (likely(isr & TALITOS_ISR_CHDONE))
+               if (likely(isr & TALITOS_ISR_CHDONE)) {
+                       /* mask further done interrupts. */
+                       clrbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_DONE);
+                       /* done_task will unmask done interrupts at exit */
                        tasklet_schedule(&priv->done_task);
+               }
 
        return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE;
 }
@@ -662,8 +705,8 @@ struct talitos_ctx {
        unsigned int authsize;
 };
 
-static int aead_authenc_setauthsize(struct crypto_aead *authenc,
-                                                unsigned int authsize)
+static int aead_setauthsize(struct crypto_aead *authenc,
+                           unsigned int authsize)
 {
        struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 
@@ -672,8 +715,8 @@ static int aead_authenc_setauthsize(struct crypto_aead *authenc,
        return 0;
 }
 
-static int aead_authenc_setkey(struct crypto_aead *authenc,
-                                           const u8 *key, unsigned int keylen)
+static int aead_setkey(struct crypto_aead *authenc,
+                      const u8 *key, unsigned int keylen)
 {
        struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
        struct rtattr *rta = (void *)key;
@@ -718,7 +761,7 @@ badkey:
 }
 
 /*
- * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor
+ * talitos_edesc - s/w-extended descriptor
  * @src_nents: number of segments in input scatterlist
  * @dst_nents: number of segments in output scatterlist
  * @dma_len: length of dma mapped link_tbl space
@@ -730,17 +773,67 @@ badkey:
  * is greater than 1, an integrity check value is concatenated to the end
  * of link_tbl data
  */
-struct ipsec_esp_edesc {
+struct talitos_edesc {
        int src_nents;
        int dst_nents;
+       int src_is_chained;
+       int dst_is_chained;
        int dma_len;
        dma_addr_t dma_link_tbl;
        struct talitos_desc desc;
        struct talitos_ptr link_tbl[0];
 };
 
+static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
+                         unsigned int nents, enum dma_data_direction dir,
+                         int chained)
+{
+       if (unlikely(chained))
+               while (sg) {
+                       dma_map_sg(dev, sg, 1, dir);
+                       sg = scatterwalk_sg_next(sg);
+               }
+       else
+               dma_map_sg(dev, sg, nents, dir);
+       return nents;
+}
+
+static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
+                                  enum dma_data_direction dir)
+{
+       while (sg) {
+               dma_unmap_sg(dev, sg, 1, dir);
+               sg = scatterwalk_sg_next(sg);
+       }
+}
+
+static void talitos_sg_unmap(struct device *dev,
+                            struct talitos_edesc *edesc,
+                            struct scatterlist *src,
+                            struct scatterlist *dst)
+{
+       unsigned int src_nents = edesc->src_nents ? : 1;
+       unsigned int dst_nents = edesc->dst_nents ? : 1;
+
+       if (src != dst) {
+               if (edesc->src_is_chained)
+                       talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
+               else
+                       dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+
+               if (edesc->dst_is_chained)
+                       talitos_unmap_sg_chain(dev, dst, DMA_FROM_DEVICE);
+               else
+                       dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+       } else
+               if (edesc->src_is_chained)
+                       talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
+               else
+                       dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+}
+
 static void ipsec_esp_unmap(struct device *dev,
-                           struct ipsec_esp_edesc *edesc,
+                           struct talitos_edesc *edesc,
                            struct aead_request *areq)
 {
        unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
@@ -750,15 +843,7 @@ static void ipsec_esp_unmap(struct device *dev,
 
        dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE);
 
-       if (areq->src != areq->dst) {
-               dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1,
-                            DMA_TO_DEVICE);
-               dma_unmap_sg(dev, areq->dst, edesc->dst_nents ? : 1,
-                            DMA_FROM_DEVICE);
-       } else {
-               dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1,
-                            DMA_BIDIRECTIONAL);
-       }
+       talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
 
        if (edesc->dma_len)
                dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
@@ -773,19 +858,20 @@ static void ipsec_esp_encrypt_done(struct device *dev,
                                   int err)
 {
        struct aead_request *areq = context;
-       struct ipsec_esp_edesc *edesc =
-                container_of(desc, struct ipsec_esp_edesc, desc);
        struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
        struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+       struct talitos_edesc *edesc;
        struct scatterlist *sg;
        void *icvdata;
 
+       edesc = container_of(desc, struct talitos_edesc, desc);
+
        ipsec_esp_unmap(dev, edesc, areq);
 
        /* copy the generated ICV to dst */
        if (edesc->dma_len) {
                icvdata = &edesc->link_tbl[edesc->src_nents +
-                                          edesc->dst_nents + 1];
+                                          edesc->dst_nents + 2];
                sg = sg_last(areq->dst, edesc->dst_nents);
                memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
                       icvdata, ctx->authsize);
@@ -796,25 +882,26 @@ static void ipsec_esp_encrypt_done(struct device *dev,
        aead_request_complete(areq, err);
 }
 
-static void ipsec_esp_decrypt_done(struct device *dev,
-                                  struct talitos_desc *desc, void *context,
-                                  int err)
+static void ipsec_esp_decrypt_swauth_done(struct device *dev,
+                                         struct talitos_desc *desc,
+                                         void *context, int err)
 {
        struct aead_request *req = context;
-       struct ipsec_esp_edesc *edesc =
-                container_of(desc, struct ipsec_esp_edesc, desc);
        struct crypto_aead *authenc = crypto_aead_reqtfm(req);
        struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+       struct talitos_edesc *edesc;
        struct scatterlist *sg;
        void *icvdata;
 
+       edesc = container_of(desc, struct talitos_edesc, desc);
+
        ipsec_esp_unmap(dev, edesc, req);
 
        if (!err) {
                /* auth check */
                if (edesc->dma_len)
                        icvdata = &edesc->link_tbl[edesc->src_nents +
-                                                  edesc->dst_nents + 1];
+                                                  edesc->dst_nents + 2];
                else
                        icvdata = &edesc->link_tbl[0];
 
@@ -828,6 +915,27 @@ static void ipsec_esp_decrypt_done(struct device *dev,
        aead_request_complete(req, err);
 }
 
+static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
+                                         struct talitos_desc *desc,
+                                         void *context, int err)
+{
+       struct aead_request *req = context;
+       struct talitos_edesc *edesc;
+
+       edesc = container_of(desc, struct talitos_edesc, desc);
+
+       ipsec_esp_unmap(dev, edesc, req);
+
+       /* check ICV auth status */
+       if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
+                    DESC_HDR_LO_ICCR1_PASS))
+               err = -EBADMSG;
+
+       kfree(edesc);
+
+       aead_request_complete(req, err);
+}
+
 /*
  * convert scatterlist to SEC h/w link table format
  * stop at cryptlen bytes
@@ -838,17 +946,17 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
        int n_sg = sg_count;
 
        while (n_sg--) {
-               link_tbl_ptr->ptr = cpu_to_be32(sg_dma_address(sg));
+               to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg));
                link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
                link_tbl_ptr->j_extent = 0;
                link_tbl_ptr++;
                cryptlen -= sg_dma_len(sg);
-               sg = sg_next(sg);
+               sg = scatterwalk_sg_next(sg);
        }
 
        /* adjust (decrease) last one (or two) entry's len to cryptlen */
        link_tbl_ptr--;
-       while (link_tbl_ptr->len <= (-cryptlen)) {
+       while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
                /* Empty this entry, and move to previous one */
                cryptlen += be16_to_cpu(link_tbl_ptr->len);
                link_tbl_ptr->len = 0;
@@ -867,7 +975,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
 /*
  * fill in and submit ipsec_esp descriptor
  */
-static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
+static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
                     u8 *giv, u64 seq,
                     void (*callback) (struct device *dev,
                                       struct talitos_desc *desc,
@@ -879,18 +987,17 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
        struct talitos_desc *desc = &edesc->desc;
        unsigned int cryptlen = areq->cryptlen;
        unsigned int authsize = ctx->authsize;
-       unsigned int ivsize;
-       int sg_count;
+       unsigned int ivsize = crypto_aead_ivsize(aead);
+       int sg_count, ret;
+       int sg_link_tbl_len;
 
        /* hmac key */
        map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
                               0, DMA_TO_DEVICE);
        /* hmac data */
-       map_single_talitos_ptr(dev, &desc->ptr[1], sg_virt(areq->src) -
-                              sg_virt(areq->assoc), sg_virt(areq->assoc), 0,
-                              DMA_TO_DEVICE);
+       map_single_talitos_ptr(dev, &desc->ptr[1], areq->assoclen + ivsize,
+                              sg_virt(areq->assoc), 0, DMA_TO_DEVICE);
        /* cipher iv */
-       ivsize = crypto_aead_ivsize(aead);
        map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
                               DMA_TO_DEVICE);
 
@@ -908,26 +1015,31 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
        desc->ptr[4].len = cpu_to_be16(cryptlen);
        desc->ptr[4].j_extent = authsize;
 
-       if (areq->src == areq->dst)
-               sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1,
-                                     DMA_BIDIRECTIONAL);
-       else
-               sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1,
-                                     DMA_TO_DEVICE);
+       sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
+                                 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
+                                                          : DMA_TO_DEVICE,
+                                 edesc->src_is_chained);
 
        if (sg_count == 1) {
-               desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
+               to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
        } else {
-               sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
+               sg_link_tbl_len = cryptlen;
+
+               if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
+                       sg_link_tbl_len = cryptlen + authsize;
+
+               sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
                                          &edesc->link_tbl[0]);
                if (sg_count > 1) {
                        desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
-                       desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl);
-                       dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
-                                                  edesc->dma_len, DMA_BIDIRECTIONAL);
+                       to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl);
+                       dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+                                                  edesc->dma_len,
+                                                  DMA_BIDIRECTIONAL);
                } else {
                        /* Only one segment now, so no link tbl needed */
-                       desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
+                       to_talitos_ptr(&desc->ptr[4],
+                                      sg_dma_address(areq->src));
                }
        }
 
@@ -935,46 +1047,36 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
        desc->ptr[5].len = cpu_to_be16(cryptlen);
        desc->ptr[5].j_extent = authsize;
 
-       if (areq->src != areq->dst) {
-               sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
-                                     DMA_FROM_DEVICE);
-       }
+       if (areq->src != areq->dst)
+               sg_count = talitos_map_sg(dev, areq->dst,
+                                         edesc->dst_nents ? : 1,
+                                         DMA_FROM_DEVICE,
+                                         edesc->dst_is_chained);
 
        if (sg_count == 1) {
-               desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst));
+               to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
        } else {
                struct talitos_ptr *link_tbl_ptr =
-                       &edesc->link_tbl[edesc->src_nents];
-               struct scatterlist *sg;
-
-               desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *)
-                                              edesc->dma_link_tbl +
-                                              edesc->src_nents);
-               if (areq->src == areq->dst) {
-                       memcpy(link_tbl_ptr, &edesc->link_tbl[0],
-                              edesc->src_nents * sizeof(struct talitos_ptr));
-               } else {
-                       sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
-                                                 link_tbl_ptr);
-               }
-               link_tbl_ptr += sg_count - 1;
+                       &edesc->link_tbl[edesc->src_nents + 1];
 
-               /* handle case where sg_last contains the ICV exclusively */
-               sg = sg_last(areq->dst, edesc->dst_nents);
-               if (sg->length == ctx->authsize)
-                       link_tbl_ptr--;
+               to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
+                              (edesc->src_nents + 1) *
+                              sizeof(struct talitos_ptr));
+               sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
+                                         link_tbl_ptr);
 
+               /* Add an entry to the link table for ICV data */
+               link_tbl_ptr += sg_count - 1;
                link_tbl_ptr->j_extent = 0;
+               sg_count++;
                link_tbl_ptr++;
                link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
                link_tbl_ptr->len = cpu_to_be16(authsize);
 
                /* icv data follows link tables */
-               link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *)
-                                               edesc->dma_link_tbl +
-                                               edesc->src_nents +
-                                               edesc->dst_nents + 1);
-
+               to_talitos_ptr(link_tbl_ptr, edesc->dma_link_tbl +
+                              (edesc->src_nents + edesc->dst_nents + 2) *
+                              sizeof(struct talitos_ptr));
                desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
                dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
                                           edesc->dma_len, DMA_BIDIRECTIONAL);
@@ -984,93 +1086,117 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
        map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0,
                               DMA_FROM_DEVICE);
 
-       return talitos_submit(dev, desc, callback, areq);
+       ret = talitos_submit(dev, desc, callback, areq);
+       if (ret != -EINPROGRESS) {
+               ipsec_esp_unmap(dev, edesc, areq);
+               kfree(edesc);
+       }
+       return ret;
 }
 
-
 /*
  * derive number of elements in scatterlist
  */
-static int sg_count(struct scatterlist *sg_list, int nbytes)
+static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
 {
        struct scatterlist *sg = sg_list;
        int sg_nents = 0;
 
-       while (nbytes) {
+       *chained = 0;
+       while (nbytes > 0) {
                sg_nents++;
                nbytes -= sg->length;
-               sg = sg_next(sg);
+               if (!sg_is_last(sg) && (sg + 1)->length == 0)
+                       *chained = 1;
+               sg = scatterwalk_sg_next(sg);
        }
 
        return sg_nents;
 }
 
 /*
- * allocate and map the ipsec_esp extended descriptor
+ * allocate and map the extended descriptor
  */
-static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
-                                                    int icv_stashing)
+static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
+                                                struct scatterlist *src,
+                                                struct scatterlist *dst,
+                                                unsigned int cryptlen,
+                                                unsigned int authsize,
+                                                int icv_stashing,
+                                                u32 cryptoflags)
 {
-       struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
-       struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
-       struct ipsec_esp_edesc *edesc;
+       struct talitos_edesc *edesc;
        int src_nents, dst_nents, alloc_len, dma_len;
-       gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+       int src_chained, dst_chained = 0;
+       gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
                      GFP_ATOMIC;
 
-       if (areq->cryptlen + ctx->authsize > TALITOS_MAX_DATA_LEN) {
-               dev_err(ctx->dev, "cryptlen exceeds h/w max limit\n");
+       if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
+               dev_err(dev, "length exceeds h/w max limit\n");
                return ERR_PTR(-EINVAL);
        }
 
-       src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize);
+       src_nents = sg_count(src, cryptlen + authsize, &src_chained);
        src_nents = (src_nents == 1) ? 0 : src_nents;
 
-       if (areq->dst == areq->src) {
+       if (dst == src) {
                dst_nents = src_nents;
        } else {
-               dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize);
+               dst_nents = sg_count(dst, cryptlen + authsize, &dst_chained);
                dst_nents = (dst_nents == 1) ? 0 : dst_nents;
        }
 
        /*
         * allocate space for base edesc plus the link tables,
-        * allowing for a separate entry for the generated ICV (+ 1),
+        * allowing for two separate entries for ICV and generated ICV (+ 2),
         * and the ICV data itself
         */
-       alloc_len = sizeof(struct ipsec_esp_edesc);
+       alloc_len = sizeof(struct talitos_edesc);
        if (src_nents || dst_nents) {
-               dma_len = (src_nents + dst_nents + 1) *
-                                sizeof(struct talitos_ptr) + ctx->authsize;
+               dma_len = (src_nents + dst_nents + 2) *
+                                sizeof(struct talitos_ptr) + authsize;
                alloc_len += dma_len;
        } else {
                dma_len = 0;
-               alloc_len += icv_stashing ? ctx->authsize : 0;
+               alloc_len += icv_stashing ? authsize : 0;
        }
 
        edesc = kmalloc(alloc_len, GFP_DMA | flags);
        if (!edesc) {
-               dev_err(ctx->dev, "could not allocate edescriptor\n");
+               dev_err(dev, "could not allocate edescriptor\n");
                return ERR_PTR(-ENOMEM);
        }
 
        edesc->src_nents = src_nents;
        edesc->dst_nents = dst_nents;
+       edesc->src_is_chained = src_chained;
+       edesc->dst_is_chained = dst_chained;
        edesc->dma_len = dma_len;
-       edesc->dma_link_tbl = dma_map_single(ctx->dev, &edesc->link_tbl[0],
+       edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
                                             edesc->dma_len, DMA_BIDIRECTIONAL);
 
        return edesc;
 }
 
-static int aead_authenc_encrypt(struct aead_request *req)
+static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq,
+                                             int icv_stashing)
+{
+       struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
+       struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+
+       return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
+                                  areq->cryptlen, ctx->authsize, icv_stashing,
+                                  areq->base.flags);
+}
+
+static int aead_encrypt(struct aead_request *req)
 {
        struct crypto_aead *authenc = crypto_aead_reqtfm(req);
        struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
-       struct ipsec_esp_edesc *edesc;
+       struct talitos_edesc *edesc;
 
        /* allocate extended descriptor */
-       edesc = ipsec_esp_edesc_alloc(req, 0);
+       edesc = aead_edesc_alloc(req, 0);
        if (IS_ERR(edesc))
                return PTR_ERR(edesc);
 
@@ -1080,26 +1206,47 @@ static int aead_authenc_encrypt(struct aead_request *req)
        return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done);
 }
 
-static int aead_authenc_decrypt(struct aead_request *req)
+static int aead_decrypt(struct aead_request *req)
 {
        struct crypto_aead *authenc = crypto_aead_reqtfm(req);
        struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
        unsigned int authsize = ctx->authsize;
-       struct ipsec_esp_edesc *edesc;
+       struct talitos_private *priv = dev_get_drvdata(ctx->dev);
+       struct talitos_edesc *edesc;
        struct scatterlist *sg;
        void *icvdata;
 
        req->cryptlen -= authsize;
 
        /* allocate extended descriptor */
-       edesc = ipsec_esp_edesc_alloc(req, 1);
+       edesc = aead_edesc_alloc(req, 1);
        if (IS_ERR(edesc))
                return PTR_ERR(edesc);
 
+       if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
+           ((!edesc->src_nents && !edesc->dst_nents) ||
+            priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
+
+               /* decrypt and check the ICV */
+               edesc->desc.hdr = ctx->desc_hdr_template |
+                                 DESC_HDR_DIR_INBOUND |
+                                 DESC_HDR_MODE1_MDEU_CICV;
+
+               /* reset integrity check result bits */
+               edesc->desc.hdr_lo = 0;
+
+               return ipsec_esp(edesc, req, NULL, 0,
+                                ipsec_esp_decrypt_hwauth_done);
+
+       }
+
+       /* Have to check the ICV with software */
+       edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
+
        /* stash incoming ICV for later cmp with ICV generated by the h/w */
        if (edesc->dma_len)
                icvdata = &edesc->link_tbl[edesc->src_nents +
-                                          edesc->dst_nents + 1];
+                                          edesc->dst_nents + 2];
        else
                icvdata = &edesc->link_tbl[0];
 
@@ -1108,22 +1255,18 @@ static int aead_authenc_decrypt(struct aead_request *req)
        memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
               ctx->authsize);
 
-       /* decrypt */
-       edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
-
-       return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_done);
+       return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done);
 }
 
-static int aead_authenc_givencrypt(
-       struct aead_givcrypt_request *req)
+static int aead_givencrypt(struct aead_givcrypt_request *req)
 {
        struct aead_request *areq = &req->areq;
        struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
        struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
-       struct ipsec_esp_edesc *edesc;
+       struct talitos_edesc *edesc;
 
        /* allocate extended descriptor */
-       edesc = ipsec_esp_edesc_alloc(areq, 0);
+       edesc = aead_edesc_alloc(areq, 0);
        if (IS_ERR(edesc))
                return PTR_ERR(edesc);
 
@@ -1131,36 +1274,235 @@ static int aead_authenc_givencrypt(
        edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
 
        memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
+       /* avoid consecutive packets going out with same IV */
+       *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
 
        return ipsec_esp(edesc, areq, req->giv, req->seq,
                         ipsec_esp_encrypt_done);
 }
 
+static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+                            const u8 *key, unsigned int keylen)
+{
+       struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+       struct ablkcipher_alg *alg = crypto_ablkcipher_alg(cipher);
+
+       if (keylen > TALITOS_MAX_KEY_SIZE)
+               goto badkey;
+
+       if (keylen < alg->min_keysize || keylen > alg->max_keysize)
+               goto badkey;
+
+       memcpy(&ctx->key, key, keylen);
+       ctx->keylen = keylen;
+
+       return 0;
+
+badkey:
+       crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+       return -EINVAL;
+}
+
+static void common_nonsnoop_unmap(struct device *dev,
+                                 struct talitos_edesc *edesc,
+                                 struct ablkcipher_request *areq)
+{
+       unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
+       unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
+       unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
+
+       talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
+
+       if (edesc->dma_len)
+               dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
+                                DMA_BIDIRECTIONAL);
+}
+
+static void ablkcipher_done(struct device *dev,
+                           struct talitos_desc *desc, void *context,
+                           int err)
+{
+       struct ablkcipher_request *areq = context;
+       struct talitos_edesc *edesc;
+
+       edesc = container_of(desc, struct talitos_edesc, desc);
+
+       common_nonsnoop_unmap(dev, edesc, areq);
+
+       kfree(edesc);
+
+       areq->base.complete(&areq->base, err);
+}
+
+static int common_nonsnoop(struct talitos_edesc *edesc,
+                          struct ablkcipher_request *areq,
+                          u8 *giv,
+                          void (*callback) (struct device *dev,
+                                            struct talitos_desc *desc,
+                                            void *context, int error))
+{
+       struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+       struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+       struct device *dev = ctx->dev;
+       struct talitos_desc *desc = &edesc->desc;
+       unsigned int cryptlen = areq->nbytes;
+       unsigned int ivsize;
+       int sg_count, ret;
+
+       /* first DWORD empty */
+       desc->ptr[0].len = 0;
+       to_talitos_ptr(&desc->ptr[0], 0);
+       desc->ptr[0].j_extent = 0;
+
+       /* cipher iv */
+       ivsize = crypto_ablkcipher_ivsize(cipher);
+       map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, giv ?: areq->info, 0,
+                              DMA_TO_DEVICE);
+
+       /* cipher key */
+       map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
+                              (char *)&ctx->key, 0, DMA_TO_DEVICE);
+
+       /*
+        * cipher in
+        */
+       desc->ptr[3].len = cpu_to_be16(cryptlen);
+       desc->ptr[3].j_extent = 0;
+
+       sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
+                                 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
+                                                          : DMA_TO_DEVICE,
+                                 edesc->src_is_chained);
+
+       if (sg_count == 1) {
+               to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src));
+       } else {
+               sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
+                                         &edesc->link_tbl[0]);
+               if (sg_count > 1) {
+                       to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
+                       desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
+                       dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+                                                  edesc->dma_len,
+                                                  DMA_BIDIRECTIONAL);
+               } else {
+                       /* Only one segment now, so no link tbl needed */
+                       to_talitos_ptr(&desc->ptr[3],
+                                      sg_dma_address(areq->src));
+               }
+       }
+
+       /* cipher out */
+       desc->ptr[4].len = cpu_to_be16(cryptlen);
+       desc->ptr[4].j_extent = 0;
+
+       if (areq->src != areq->dst)
+               sg_count = talitos_map_sg(dev, areq->dst,
+                                         edesc->dst_nents ? : 1,
+                                         DMA_FROM_DEVICE,
+                                         edesc->dst_is_chained);
+
+       if (sg_count == 1) {
+               to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst));
+       } else {
+               struct talitos_ptr *link_tbl_ptr =
+                       &edesc->link_tbl[edesc->src_nents + 1];
+
+               to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
+                                             (edesc->src_nents + 1) *
+                                             sizeof(struct talitos_ptr));
+               desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
+               sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
+                                         link_tbl_ptr);
+               dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
+                                          edesc->dma_len, DMA_BIDIRECTIONAL);
+       }
+
+       /* iv out */
+       map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0,
+                              DMA_FROM_DEVICE);
+
+       /* last DWORD empty */
+       desc->ptr[6].len = 0;
+       to_talitos_ptr(&desc->ptr[6], 0);
+       desc->ptr[6].j_extent = 0;
+
+       ret = talitos_submit(dev, desc, callback, areq);
+       if (ret != -EINPROGRESS) {
+               common_nonsnoop_unmap(dev, edesc, areq);
+               kfree(edesc);
+       }
+       return ret;
+}
+
+static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
+                                                   areq)
+{
+       struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+       struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+       return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, areq->nbytes,
+                                  0, 0, areq->base.flags);
+}
+
+static int ablkcipher_encrypt(struct ablkcipher_request *areq)
+{
+       struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+       struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+       struct talitos_edesc *edesc;
+
+       /* allocate extended descriptor */
+       edesc = ablkcipher_edesc_alloc(areq);
+       if (IS_ERR(edesc))
+               return PTR_ERR(edesc);
+
+       /* set encrypt */
+       edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
+
+       return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
+}
+
+static int ablkcipher_decrypt(struct ablkcipher_request *areq)
+{
+       struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+       struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+       struct talitos_edesc *edesc;
+
+       /* allocate extended descriptor */
+       edesc = ablkcipher_edesc_alloc(areq);
+       if (IS_ERR(edesc))
+               return PTR_ERR(edesc);
+
+       edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
+
+       return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
+}
+
 struct talitos_alg_template {
-       char name[CRYPTO_MAX_ALG_NAME];
-       char driver_name[CRYPTO_MAX_ALG_NAME];
-       unsigned int blocksize;
-       struct aead_alg aead;
-       struct device *dev;
+       struct crypto_alg alg;
        __be32 desc_hdr_template;
 };
 
 static struct talitos_alg_template driver_algs[] = {
-       /* single-pass ipsec_esp descriptor */
+       /* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
        {
-               .name = "authenc(hmac(sha1),cbc(aes))",
-               .driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
-               .blocksize = AES_BLOCK_SIZE,
-               .aead = {
-                       .setkey = aead_authenc_setkey,
-                       .setauthsize = aead_authenc_setauthsize,
-                       .encrypt = aead_authenc_encrypt,
-                       .decrypt = aead_authenc_decrypt,
-                       .givencrypt = aead_authenc_givencrypt,
-                       .geniv = "<built-in>",
-                       .ivsize = AES_BLOCK_SIZE,
-                       .maxauthsize = SHA1_DIGEST_SIZE,
-                       },
+               .alg = {
+                       .cra_name = "authenc(hmac(sha1),cbc(aes))",
+                       .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
+                       .cra_blocksize = AES_BLOCK_SIZE,
+                       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+                       .cra_type = &crypto_aead_type,
+                       .cra_aead = {
+                               .setkey = aead_setkey,
+                               .setauthsize = aead_setauthsize,
+                               .encrypt = aead_encrypt,
+                               .decrypt = aead_decrypt,
+                               .givencrypt = aead_givencrypt,
+                               .geniv = "<built-in>",
+                               .ivsize = AES_BLOCK_SIZE,
+                               .maxauthsize = SHA1_DIGEST_SIZE,
+                       }
+               },
                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
                                     DESC_HDR_SEL0_AESU |
                                     DESC_HDR_MODE0_AESU_CBC |
@@ -1170,19 +1512,23 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
        },
        {
-               .name = "authenc(hmac(sha1),cbc(des3_ede))",
-               .driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
-               .blocksize = DES3_EDE_BLOCK_SIZE,
-               .aead = {
-                       .setkey = aead_authenc_setkey,
-                       .setauthsize = aead_authenc_setauthsize,
-                       .encrypt = aead_authenc_encrypt,
-                       .decrypt = aead_authenc_decrypt,
-                       .givencrypt = aead_authenc_givencrypt,
-                       .geniv = "<built-in>",
-                       .ivsize = DES3_EDE_BLOCK_SIZE,
-                       .maxauthsize = SHA1_DIGEST_SIZE,
-                       },
+               .alg = {
+                       .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
+                       .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
+                       .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+                       .cra_type = &crypto_aead_type,
+                       .cra_aead = {
+                               .setkey = aead_setkey,
+                               .setauthsize = aead_setauthsize,
+                               .encrypt = aead_encrypt,
+                               .decrypt = aead_decrypt,
+                               .givencrypt = aead_givencrypt,
+                               .geniv = "<built-in>",
+                               .ivsize = DES3_EDE_BLOCK_SIZE,
+                               .maxauthsize = SHA1_DIGEST_SIZE,
+                       }
+               },
                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
                                     DESC_HDR_SEL0_DEU |
                                     DESC_HDR_MODE0_DEU_CBC |
@@ -1193,19 +1539,23 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
        },
        {
-               .name = "authenc(hmac(sha256),cbc(aes))",
-               .driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
-               .blocksize = AES_BLOCK_SIZE,
-               .aead = {
-                       .setkey = aead_authenc_setkey,
-                       .setauthsize = aead_authenc_setauthsize,
-                       .encrypt = aead_authenc_encrypt,
-                       .decrypt = aead_authenc_decrypt,
-                       .givencrypt = aead_authenc_givencrypt,
-                       .geniv = "<built-in>",
-                       .ivsize = AES_BLOCK_SIZE,
-                       .maxauthsize = SHA256_DIGEST_SIZE,
-                       },
+               .alg = {
+                       .cra_name = "authenc(hmac(sha256),cbc(aes))",
+                       .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
+                       .cra_blocksize = AES_BLOCK_SIZE,
+                       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+                       .cra_type = &crypto_aead_type,
+                       .cra_aead = {
+                               .setkey = aead_setkey,
+                               .setauthsize = aead_setauthsize,
+                               .encrypt = aead_encrypt,
+                               .decrypt = aead_decrypt,
+                               .givencrypt = aead_givencrypt,
+                               .geniv = "<built-in>",
+                               .ivsize = AES_BLOCK_SIZE,
+                               .maxauthsize = SHA256_DIGEST_SIZE,
+                       }
+               },
                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
                                     DESC_HDR_SEL0_AESU |
                                     DESC_HDR_MODE0_AESU_CBC |
@@ -1215,19 +1565,23 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
        },
        {
-               .name = "authenc(hmac(sha256),cbc(des3_ede))",
-               .driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
-               .blocksize = DES3_EDE_BLOCK_SIZE,
-               .aead = {
-                       .setkey = aead_authenc_setkey,
-                       .setauthsize = aead_authenc_setauthsize,
-                       .encrypt = aead_authenc_encrypt,
-                       .decrypt = aead_authenc_decrypt,
-                       .givencrypt = aead_authenc_givencrypt,
-                       .geniv = "<built-in>",
-                       .ivsize = DES3_EDE_BLOCK_SIZE,
-                       .maxauthsize = SHA256_DIGEST_SIZE,
-                       },
+               .alg = {
+                       .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
+                       .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
+                       .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+                       .cra_type = &crypto_aead_type,
+                       .cra_aead = {
+                               .setkey = aead_setkey,
+                               .setauthsize = aead_setauthsize,
+                               .encrypt = aead_encrypt,
+                               .decrypt = aead_decrypt,
+                               .givencrypt = aead_givencrypt,
+                               .geniv = "<built-in>",
+                               .ivsize = DES3_EDE_BLOCK_SIZE,
+                               .maxauthsize = SHA256_DIGEST_SIZE,
+                       }
+               },
                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
                                     DESC_HDR_SEL0_DEU |
                                     DESC_HDR_MODE0_DEU_CBC |
@@ -1238,19 +1592,23 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
        },
        {
-               .name = "authenc(hmac(md5),cbc(aes))",
-               .driver_name = "authenc-hmac-md5-cbc-aes-talitos",
-               .blocksize = AES_BLOCK_SIZE,
-               .aead = {
-                       .setkey = aead_authenc_setkey,
-                       .setauthsize = aead_authenc_setauthsize,
-                       .encrypt = aead_authenc_encrypt,
-                       .decrypt = aead_authenc_decrypt,
-                       .givencrypt = aead_authenc_givencrypt,
-                       .geniv = "<built-in>",
-                       .ivsize = AES_BLOCK_SIZE,
-                       .maxauthsize = MD5_DIGEST_SIZE,
-                       },
+               .alg = {
+                       .cra_name = "authenc(hmac(md5),cbc(aes))",
+                       .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
+                       .cra_blocksize = AES_BLOCK_SIZE,
+                       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+                       .cra_type = &crypto_aead_type,
+                       .cra_aead = {
+                               .setkey = aead_setkey,
+                               .setauthsize = aead_setauthsize,
+                               .encrypt = aead_encrypt,
+                               .decrypt = aead_decrypt,
+                               .givencrypt = aead_givencrypt,
+                               .geniv = "<built-in>",
+                               .ivsize = AES_BLOCK_SIZE,
+                               .maxauthsize = MD5_DIGEST_SIZE,
+                       }
+               },
                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
                                     DESC_HDR_SEL0_AESU |
                                     DESC_HDR_MODE0_AESU_CBC |
@@ -1260,19 +1618,23 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
        },
        {
-               .name = "authenc(hmac(md5),cbc(des3_ede))",
-               .driver_name = "authenc-hmac-md5-cbc-3des-talitos",
-               .blocksize = DES3_EDE_BLOCK_SIZE,
-               .aead = {
-                       .setkey = aead_authenc_setkey,
-                       .setauthsize = aead_authenc_setauthsize,
-                       .encrypt = aead_authenc_encrypt,
-                       .decrypt = aead_authenc_decrypt,
-                       .givencrypt = aead_authenc_givencrypt,
-                       .geniv = "<built-in>",
-                       .ivsize = DES3_EDE_BLOCK_SIZE,
-                       .maxauthsize = MD5_DIGEST_SIZE,
-                       },
+               .alg = {
+                       .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+                       .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
+                       .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+                       .cra_type = &crypto_aead_type,
+                       .cra_aead = {
+                               .setkey = aead_setkey,
+                               .setauthsize = aead_setauthsize,
+                               .encrypt = aead_encrypt,
+                               .decrypt = aead_decrypt,
+                               .givencrypt = aead_givencrypt,
+                               .geniv = "<built-in>",
+                               .ivsize = DES3_EDE_BLOCK_SIZE,
+                               .maxauthsize = MD5_DIGEST_SIZE,
+                       }
+               },
                .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
                                     DESC_HDR_SEL0_DEU |
                                     DESC_HDR_MODE0_DEU_CBC |
@@ -1281,6 +1643,52 @@ static struct talitos_alg_template driver_algs[] = {
                                     DESC_HDR_MODE1_MDEU_INIT |
                                     DESC_HDR_MODE1_MDEU_PAD |
                                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
+       },
+       /* ABLKCIPHER algorithms. */
+       {
+               .alg = {
+                       .cra_name = "cbc(aes)",
+                       .cra_driver_name = "cbc-aes-talitos",
+                       .cra_blocksize = AES_BLOCK_SIZE,
+                       .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+                                     CRYPTO_ALG_ASYNC,
+                       .cra_type = &crypto_ablkcipher_type,
+                       .cra_ablkcipher = {
+                               .setkey = ablkcipher_setkey,
+                               .encrypt = ablkcipher_encrypt,
+                               .decrypt = ablkcipher_decrypt,
+                               .geniv = "eseqiv",
+                               .min_keysize = AES_MIN_KEY_SIZE,
+                               .max_keysize = AES_MAX_KEY_SIZE,
+                               .ivsize = AES_BLOCK_SIZE,
+                       }
+               },
+               .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+                                    DESC_HDR_SEL0_AESU |
+                                    DESC_HDR_MODE0_AESU_CBC,
+       },
+       {
+               .alg = {
+                       .cra_name = "cbc(des3_ede)",
+                       .cra_driver_name = "cbc-3des-talitos",
+                       .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+                       .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+                                     CRYPTO_ALG_ASYNC,
+                       .cra_type = &crypto_ablkcipher_type,
+                       .cra_ablkcipher = {
+                               .setkey = ablkcipher_setkey,
+                               .encrypt = ablkcipher_encrypt,
+                               .decrypt = ablkcipher_decrypt,
+                               .geniv = "eseqiv",
+                               .min_keysize = DES3_EDE_KEY_SIZE,
+                               .max_keysize = DES3_EDE_KEY_SIZE,
+                               .ivsize = DES3_EDE_BLOCK_SIZE,
+                       }
+               },
+               .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+                                    DESC_HDR_SEL0_DEU |
+                                    DESC_HDR_MODE0_DEU_CBC |
+                                    DESC_HDR_MODE0_DEU_3DES,
        }
 };
 
@@ -1294,12 +1702,14 @@ struct talitos_crypto_alg {
 static int talitos_cra_init(struct crypto_tfm *tfm)
 {
        struct crypto_alg *alg = tfm->__crt_alg;
-       struct talitos_crypto_alg *talitos_alg =
-                container_of(alg, struct talitos_crypto_alg, crypto_alg);
+       struct talitos_crypto_alg *talitos_alg;
        struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
 
+       talitos_alg =  container_of(alg, struct talitos_crypto_alg, crypto_alg);
+
        /* update context with ptr to dev */
        ctx->dev = talitos_alg->dev;
+
        /* copy descriptor header template value */
        ctx->desc_hdr_template = talitos_alg->desc_hdr_template;
 
@@ -1329,7 +1739,7 @@ static int hw_supports(struct device *dev, __be32 desc_hdr_template)
        return ret;
 }
 
-static int __devexit talitos_remove(struct of_device *ofdev)
+static int talitos_remove(struct of_device *ofdev)
 {
        struct device *dev = &ofdev->dev;
        struct talitos_private *priv = dev_get_drvdata(dev);
@@ -1345,17 +1755,11 @@ static int __devexit talitos_remove(struct of_device *ofdev)
        if (hw_supports(dev, DESC_HDR_SEL0_RNG))
                talitos_unregister_rng(dev);
 
-       kfree(priv->submit_count);
-       kfree(priv->tail);
-       kfree(priv->head);
-
-       if (priv->fifo)
-               for (i = 0; i < priv->num_channels; i++)
-                       kfree(priv->fifo[i]);
+       for (i = 0; i < priv->num_channels; i++)
+               if (priv->chan[i].fifo)
+                       kfree(priv->chan[i].fifo);
 
-       kfree(priv->fifo);
-       kfree(priv->head_lock);
-       kfree(priv->tail_lock);
+       kfree(priv->chan);
 
        if (priv->irq != NO_IRQ) {
                free_irq(priv->irq, dev);
@@ -1363,7 +1767,6 @@ static int __devexit talitos_remove(struct of_device *ofdev)
        }
 
        tasklet_kill(&priv->done_task);
-       tasklet_kill(&priv->error_task);
 
        iounmap(priv->reg);
 
@@ -1386,19 +1789,13 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
                return ERR_PTR(-ENOMEM);
 
        alg = &t_alg->crypto_alg;
+       *alg = template->alg;
 
-       snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
-       snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
-                template->driver_name);
        alg->cra_module = THIS_MODULE;
        alg->cra_init = talitos_cra_init;
        alg->cra_priority = TALITOS_CRA_PRIORITY;
-       alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
-       alg->cra_blocksize = template->blocksize;
        alg->cra_alignmask = 0;
-       alg->cra_type = &crypto_aead_type;
        alg->cra_ctxsize = sizeof(struct talitos_ctx);
-       alg->cra_u.aead = template->aead;
 
        t_alg->desc_hdr_template = template->desc_hdr_template;
        t_alg->dev = dev;
@@ -1424,7 +1821,8 @@ static int talitos_probe(struct of_device *ofdev,
        priv->ofdev = ofdev;
 
        tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev);
-       tasklet_init(&priv->error_task, talitos_error, (unsigned long)dev);
+
+       INIT_LIST_HEAD(&priv->alg_list);
 
        priv->irq = irq_of_parse_and_map(np, 0);
 
@@ -1475,58 +1873,42 @@ static int talitos_probe(struct of_device *ofdev,
                goto err_out;
        }
 
-       priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
-                                 GFP_KERNEL);
-       priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
-                                 GFP_KERNEL);
-       if (!priv->head_lock || !priv->tail_lock) {
-               dev_err(dev, "failed to allocate fifo locks\n");
-               err = -ENOMEM;
-               goto err_out;
-       }
+       if (of_device_is_compatible(np, "fsl,sec3.0"))
+               priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
 
-       for (i = 0; i < priv->num_channels; i++) {
-               spin_lock_init(&priv->head_lock[i]);
-               spin_lock_init(&priv->tail_lock[i]);
-       }
+       if (of_device_is_compatible(np, "fsl,sec2.1"))
+               priv->features |= TALITOS_FTR_HW_AUTH_CHECK;
 
-       priv->fifo = kmalloc(sizeof(struct talitos_request *) *
+       priv->chan = kzalloc(sizeof(struct talitos_channel) *
                             priv->num_channels, GFP_KERNEL);
-       if (!priv->fifo) {
-               dev_err(dev, "failed to allocate request fifo\n");
+       if (!priv->chan) {
+               dev_err(dev, "failed to allocate channel management space\n");
                err = -ENOMEM;
                goto err_out;
        }
 
+       for (i = 0; i < priv->num_channels; i++) {
+               spin_lock_init(&priv->chan[i].head_lock);
+               spin_lock_init(&priv->chan[i].tail_lock);
+       }
+
        priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
 
        for (i = 0; i < priv->num_channels; i++) {
-               priv->fifo[i] = kzalloc(sizeof(struct talitos_request) *
-                                       priv->fifo_len, GFP_KERNEL);
-               if (!priv->fifo[i]) {
+               priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
+                                            priv->fifo_len, GFP_KERNEL);
+               if (!priv->chan[i].fifo) {
                        dev_err(dev, "failed to allocate request fifo %d\n", i);
                        err = -ENOMEM;
                        goto err_out;
                }
        }
 
-       priv->submit_count = kmalloc(sizeof(atomic_t) * priv->num_channels,
-                                    GFP_KERNEL);
-       if (!priv->submit_count) {
-               dev_err(dev, "failed to allocate fifo submit count space\n");
-               err = -ENOMEM;
-               goto err_out;
-       }
        for (i = 0; i < priv->num_channels; i++)
-               atomic_set(&priv->submit_count[i], -priv->chfifo_len);
+               atomic_set(&priv->chan[i].submit_count,
+                          -(priv->chfifo_len - 1));
 
-       priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
-       priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
-       if (!priv->head || !priv->tail) {
-               dev_err(dev, "failed to allocate request index space\n");
-               err = -ENOMEM;
-               goto err_out;
-       }
+       dma_set_mask(dev, DMA_BIT_MASK(36));
 
        /* reset and initialize the h/w */
        err = init_device(dev);
@@ -1546,8 +1928,6 @@ static int talitos_probe(struct of_device *ofdev,
        }
 
        /* register crypto algorithms the device supports */
-       INIT_LIST_HEAD(&priv->alg_list);
-
        for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
                if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
                        struct talitos_crypto_alg *t_alg;
@@ -1579,7 +1959,7 @@ err_out:
        return err;
 }
 
-static struct of_device_id talitos_match[] = {
+static const struct of_device_id talitos_match[] = {
        {
                .compatible = "fsl,sec2.0",
        },
@@ -1591,7 +1971,7 @@ static struct of_platform_driver talitos_driver = {
        .name = "talitos",
        .match_table = talitos_match,
        .probe = talitos_probe,
-       .remove = __devexit_p(talitos_remove),
+       .remove = talitos_remove,
 };
 
 static int __init talitos_init(void)