gss_krb5: introduce encryption type framework
[safe/jmp/linux-2.6] / net / sunrpc / auth_gss / gss_krb5_crypto.c
index bfb6a29..ccd5236 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  linux/net/sunrpc/gss_krb5_crypto.c
  *
- *  Copyright (c) 2000 The Regents of the University of Michigan.
+ *  Copyright (c) 2000-2008 The Regents of the University of Michigan.
  *  All rights reserved.
  *
  *  Andy Adamson   <andros@umich.edu>
@@ -37,7 +37,6 @@
 #include <linux/err.h>
 #include <linux/types.h>
 #include <linux/mm.h>
-#include <linux/slab.h>
 #include <linux/scatterlist.h>
 #include <linux/crypto.h>
 #include <linux/highmem.h>
@@ -59,15 +58,15 @@ krb5_encrypt(
 {
        u32 ret = -EINVAL;
        struct scatterlist sg[1];
-       u8 local_iv[16] = {0};
+       u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
        struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
 
        if (length % crypto_blkcipher_blocksize(tfm) != 0)
                goto out;
 
-       if (crypto_blkcipher_ivsize(tfm) > 16) {
-               dprintk("RPC:       gss_k5encrypt: tfm iv size to large %d\n",
-                        crypto_blkcipher_ivsize(tfm));
+       if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
+               dprintk("RPC:       gss_k5encrypt: tfm iv size too large %d\n",
+                       crypto_blkcipher_ivsize(tfm));
                goto out;
        }
 
@@ -75,7 +74,7 @@ krb5_encrypt(
                memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
 
        memcpy(out, in, length);
-       sg_set_buf(sg, out, length);
+       sg_init_one(sg, out, length);
 
        ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
 out:
@@ -83,8 +82,6 @@ out:
        return ret;
 }
 
-EXPORT_SYMBOL(krb5_encrypt);
-
 u32
 krb5_decrypt(
      struct crypto_blkcipher *tfm,
@@ -95,14 +92,14 @@ krb5_decrypt(
 {
        u32 ret = -EINVAL;
        struct scatterlist sg[1];
-       u8 local_iv[16] = {0};
+       u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
        struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
 
        if (length % crypto_blkcipher_blocksize(tfm) != 0)
                goto out;
 
-       if (crypto_blkcipher_ivsize(tfm) > 16) {
-               dprintk("RPC:       gss_k5decrypt: tfm iv size to large %d\n",
+       if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
+               dprintk("RPC:       gss_k5decrypt: tfm iv size too large %d\n",
                        crypto_blkcipher_ivsize(tfm));
                goto out;
        }
@@ -110,7 +107,7 @@ krb5_decrypt(
                memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));
 
        memcpy(out, in, length);
-       sg_set_buf(sg, out, length);
+       sg_init_one(sg, out, length);
 
        ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
 out:
@@ -118,8 +115,6 @@ out:
        return ret;
 }
 
-EXPORT_SYMBOL(krb5_decrypt);
-
 static int
 checksummer(struct scatterlist *sg, void *data)
 {
@@ -146,7 +141,7 @@ make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body,
        err = crypto_hash_init(&desc);
        if (err)
                goto out;
-       sg_set_buf(sg, header, hdrlen);
+       sg_init_one(sg, header, hdrlen);
        err = crypto_hash_update(&desc, sg, hdrlen);
        if (err)
                goto out;
@@ -161,10 +156,8 @@ out:
        return err ? GSS_S_FAILURE : 0;
 }
 
-EXPORT_SYMBOL(make_checksum);
-
 struct encryptor_desc {
-       u8 iv[8]; /* XXX hard-coded blocksize */
+       u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
        struct blkcipher_desc desc;
        int pos;
        struct xdr_buf *outbuf;
@@ -188,8 +181,6 @@ encryptor(struct scatterlist *sg, void *data)
        /* Worst case is 4 fragments: head, end of page 1, start
         * of page 2, tail.  Anything more is a bug. */
        BUG_ON(desc->fragno > 3);
-       desc->infrags[desc->fragno] = *sg;
-       desc->outfrags[desc->fragno] = *sg;
 
        page_pos = desc->pos - outbuf->head[0].iov_len;
        if (page_pos >= 0 && page_pos < outbuf->page_len) {
@@ -197,29 +188,38 @@ encryptor(struct scatterlist *sg, void *data)
                int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;
                in_page = desc->pages[i];
        } else {
-               in_page = sg->page;
+               in_page = sg_page(sg);
        }
-       desc->infrags[desc->fragno].page = in_page;
+       sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length,
+                   sg->offset);
+       sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length,
+                   sg->offset);
        desc->fragno++;
        desc->fraglen += sg->length;
        desc->pos += sg->length;
 
-       fraglen = thislen & 7; /* XXX hardcoded blocksize */
+       fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
        thislen -= fraglen;
 
        if (thislen == 0)
                return 0;
 
+       sg_mark_end(&desc->infrags[desc->fragno - 1]);
+       sg_mark_end(&desc->outfrags[desc->fragno - 1]);
+
        ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
                                          desc->infrags, thislen);
        if (ret)
                return ret;
+
+       sg_init_table(desc->infrags, 4);
+       sg_init_table(desc->outfrags, 4);
+
        if (fraglen) {
-               desc->outfrags[0].page = sg->page;
-               desc->outfrags[0].offset = sg->offset + sg->length - fraglen;
-               desc->outfrags[0].length = fraglen;
+               sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,
+                               sg->offset + sg->length - fraglen);
                desc->infrags[0] = desc->outfrags[0];
-               desc->infrags[0].page = in_page;
+               sg_assign_page(&desc->infrags[0], in_page);
                desc->fragno = 1;
                desc->fraglen = fraglen;
        } else {
@@ -248,14 +248,15 @@ gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
        desc.fragno = 0;
        desc.fraglen = 0;
 
+       sg_init_table(desc.infrags, 4);
+       sg_init_table(desc.outfrags, 4);
+
        ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
        return ret;
 }
 
-EXPORT_SYMBOL(gss_encrypt_xdr_buf);
-
 struct decryptor_desc {
-       u8 iv[8]; /* XXX hard-coded blocksize */
+       u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
        struct blkcipher_desc desc;
        struct scatterlist frags[4];
        int fragno;
@@ -272,24 +273,29 @@ decryptor(struct scatterlist *sg, void *data)
        /* Worst case is 4 fragments: head, end of page 1, start
         * of page 2, tail.  Anything more is a bug. */
        BUG_ON(desc->fragno > 3);
-       desc->frags[desc->fragno] = *sg;
+       sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length,
+                   sg->offset);
        desc->fragno++;
        desc->fraglen += sg->length;
 
-       fraglen = thislen & 7; /* XXX hardcoded blocksize */
+       fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
        thislen -= fraglen;
 
        if (thislen == 0)
                return 0;
 
+       sg_mark_end(&desc->frags[desc->fragno - 1]);
+
        ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
                                          desc->frags, thislen);
        if (ret)
                return ret;
+
+       sg_init_table(desc->frags, 4);
+
        if (fraglen) {
-               desc->frags[0].page = sg->page;
-               desc->frags[0].offset = sg->offset + sg->length - fraglen;
-               desc->frags[0].length = fraglen;
+               sg_set_page(&desc->frags[0], sg_page(sg), fraglen,
+                               sg->offset + sg->length - fraglen);
                desc->fragno = 1;
                desc->fraglen = fraglen;
        } else {
@@ -314,7 +320,46 @@ gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
        desc.desc.flags = 0;
        desc.fragno = 0;
        desc.fraglen = 0;
+
+       sg_init_table(desc.frags, 4);
+
        return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
 }
 
-EXPORT_SYMBOL(gss_decrypt_xdr_buf);
+/*
+ * This function makes the assumption that it was ultimately called
+ * from gss_wrap().
+ *
+ * The client auth_gss code moves any existing tail data into a
+ * separate page before calling gss_wrap.
+ * The server svcauth_gss code ensures that both the head and the
+ * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
+ *
+ * Even with that guarantee, this function may be called more than
+ * once in the processing of gss_wrap().  The best we can do is
+ * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
+ * largest expected shift will fit within RPC_MAX_AUTH_SIZE.
+ * At run-time we can verify that a single invocation of this
+ * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
+ */
+
+int
+xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
+{
+       u8 *p;
+
+       if (shiftlen == 0)
+               return 0;
+
+       BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);
+       BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
+
+       p = buf->head[0].iov_base + base;
+
+       memmove(p + shiftlen, p, buf->head[0].iov_len - base);
+
+       buf->head[0].iov_len += shiftlen;
+       buf->len += shiftlen;
+
+       return 0;
+}