2 * linux/net/sunrpc/gss_krb5_crypto.c
4 * Copyright (c) 2000-2008 The Regents of the University of Michigan.
7 * Andy Adamson <andros@umich.edu>
8 * Bruce Fields <bfields@umich.edu>
12 * Copyright (C) 1998 by the FundsXpress, INC.
14 * All rights reserved.
16 * Export of this software from the United States of America may require
17 * a specific license from the United States Government. It is the
18 * responsibility of any person or organization contemplating export to
19 * obtain such a license before exporting.
21 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
22 * distribute this software and its documentation for any purpose and
23 * without fee is hereby granted, provided that the above copyright
24 * notice appear in all copies and that both that copyright notice and
25 * this permission notice appear in supporting documentation, and that
26 * the name of FundsXpress. not be used in advertising or publicity pertaining
27 * to distribution of the software without specific, written prior
28 * permission. FundsXpress makes no representations about the suitability of
29 * this software for any purpose. It is provided "as is" without express
30 * or implied warranty.
32 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
33 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
37 #include <linux/err.h>
38 #include <linux/types.h>
40 #include <linux/scatterlist.h>
41 #include <linux/crypto.h>
42 #include <linux/highmem.h>
43 #include <linux/pagemap.h>
44 #include <linux/sunrpc/gss_krb5.h>
45 #include <linux/sunrpc/xdr.h>
48 # define RPCDBG_FACILITY RPCDBG_AUTH
53 struct crypto_blkcipher *tfm,
60 struct scatterlist sg[1];
61 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
62 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
64 if (length % crypto_blkcipher_blocksize(tfm) != 0)
67 if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
68 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
69 crypto_blkcipher_ivsize(tfm));
74 memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
76 memcpy(out, in, length);
77 sg_init_one(sg, out, length);
79 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
81 dprintk("RPC: krb5_encrypt returns %d\n", ret);
87 struct crypto_blkcipher *tfm,
94 struct scatterlist sg[1];
95 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
96 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
98 if (length % crypto_blkcipher_blocksize(tfm) != 0)
101 if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
102 dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
103 crypto_blkcipher_ivsize(tfm));
107 memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));
109 memcpy(out, in, length);
110 sg_init_one(sg, out, length);
112 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
114 dprintk("RPC: gss_k5decrypt returns %d\n",ret);
119 checksummer(struct scatterlist *sg, void *data)
121 struct hash_desc *desc = data;
123 return crypto_hash_update(desc, sg, sg->length);
127 * checksum the plaintext data and hdrlen bytes of the token header
128 * The checksum is performed over the first 8 bytes of the
129 * gss token header and then over the data body
132 make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
133 struct xdr_buf *body, int body_offset, u8 *cksumkey,
134 struct xdr_netobj *cksumout)
136 struct hash_desc desc;
137 struct scatterlist sg[1];
139 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
140 unsigned int checksumlen;
142 if (cksumout->len < kctx->gk5e->cksumlength) {
143 dprintk("%s: checksum buffer length, %u, too small for %s\n",
144 __func__, cksumout->len, kctx->gk5e->name);
145 return GSS_S_FAILURE;
148 desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
149 if (IS_ERR(desc.tfm))
150 return GSS_S_FAILURE;
151 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
153 checksumlen = crypto_hash_digestsize(desc.tfm);
155 if (cksumkey != NULL) {
156 err = crypto_hash_setkey(desc.tfm, cksumkey,
157 kctx->gk5e->keylength);
162 err = crypto_hash_init(&desc);
165 sg_init_one(sg, header, hdrlen);
166 err = crypto_hash_update(&desc, sg, hdrlen);
169 err = xdr_process_buf(body, body_offset, body->len - body_offset,
173 err = crypto_hash_final(&desc, checksumdata);
177 switch (kctx->gk5e->ctype) {
178 case CKSUMTYPE_RSA_MD5:
179 err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata,
180 checksumdata, checksumlen);
183 memcpy(cksumout->data,
184 checksumdata + checksumlen - kctx->gk5e->cksumlength,
185 kctx->gk5e->cksumlength);
191 cksumout->len = kctx->gk5e->cksumlength;
193 crypto_free_hash(desc.tfm);
194 return err ? GSS_S_FAILURE : 0;
197 struct encryptor_desc {
198 u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
199 struct blkcipher_desc desc;
201 struct xdr_buf *outbuf;
203 struct scatterlist infrags[4];
204 struct scatterlist outfrags[4];
210 encryptor(struct scatterlist *sg, void *data)
212 struct encryptor_desc *desc = data;
213 struct xdr_buf *outbuf = desc->outbuf;
214 struct page *in_page;
215 int thislen = desc->fraglen + sg->length;
219 /* Worst case is 4 fragments: head, end of page 1, start
220 * of page 2, tail. Anything more is a bug. */
221 BUG_ON(desc->fragno > 3);
223 page_pos = desc->pos - outbuf->head[0].iov_len;
224 if (page_pos >= 0 && page_pos < outbuf->page_len) {
225 /* pages are not in place: */
226 int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;
227 in_page = desc->pages[i];
229 in_page = sg_page(sg);
231 sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length,
233 sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length,
236 desc->fraglen += sg->length;
237 desc->pos += sg->length;
239 fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
245 sg_mark_end(&desc->infrags[desc->fragno - 1]);
246 sg_mark_end(&desc->outfrags[desc->fragno - 1]);
248 ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
249 desc->infrags, thislen);
253 sg_init_table(desc->infrags, 4);
254 sg_init_table(desc->outfrags, 4);
257 sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,
258 sg->offset + sg->length - fraglen);
259 desc->infrags[0] = desc->outfrags[0];
260 sg_assign_page(&desc->infrags[0], in_page);
262 desc->fraglen = fraglen;
271 gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
272 int offset, struct page **pages)
275 struct encryptor_desc desc;
277 BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
279 memset(desc.iv, 0, sizeof(desc.iv));
281 desc.desc.info = desc.iv;
289 sg_init_table(desc.infrags, 4);
290 sg_init_table(desc.outfrags, 4);
292 ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
296 struct decryptor_desc {
297 u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
298 struct blkcipher_desc desc;
299 struct scatterlist frags[4];
305 decryptor(struct scatterlist *sg, void *data)
307 struct decryptor_desc *desc = data;
308 int thislen = desc->fraglen + sg->length;
311 /* Worst case is 4 fragments: head, end of page 1, start
312 * of page 2, tail. Anything more is a bug. */
313 BUG_ON(desc->fragno > 3);
314 sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length,
317 desc->fraglen += sg->length;
319 fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
325 sg_mark_end(&desc->frags[desc->fragno - 1]);
327 ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
328 desc->frags, thislen);
332 sg_init_table(desc->frags, 4);
335 sg_set_page(&desc->frags[0], sg_page(sg), fraglen,
336 sg->offset + sg->length - fraglen);
338 desc->fraglen = fraglen;
347 gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
350 struct decryptor_desc desc;
353 BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
355 memset(desc.iv, 0, sizeof(desc.iv));
357 desc.desc.info = desc.iv;
362 sg_init_table(desc.frags, 4);
364 return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
368 * This function makes the assumption that it was ultimately called
371 * The client auth_gss code moves any existing tail data into a
372 * separate page before calling gss_wrap.
373 * The server svcauth_gss code ensures that both the head and the
374 * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
376 * Even with that guarantee, this function may be called more than
377 * once in the processing of gss_wrap(). The best we can do is
378 * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
379 * largest expected shift will fit within RPC_MAX_AUTH_SIZE.
380 * At run-time we can verify that a single invocation of this
381 * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
385 xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
392 BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);
393 BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
395 p = buf->head[0].iov_base + base;
397 memmove(p + shiftlen, p, buf->head[0].iov_len - base);
399 buf->head[0].iov_len += shiftlen;
400 buf->len += shiftlen;