2 * linux/net/sunrpc/gss_krb5_crypto.c
4 * Copyright (c) 2000-2008 The Regents of the University of Michigan.
7 * Andy Adamson <andros@umich.edu>
8 * Bruce Fields <bfields@umich.edu>
12 * Copyright (C) 1998 by the FundsXpress, INC.
14 * All rights reserved.
16 * Export of this software from the United States of America may require
17 * a specific license from the United States Government. It is the
18 * responsibility of any person or organization contemplating export to
19 * obtain such a license before exporting.
21 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
22 * distribute this software and its documentation for any purpose and
23 * without fee is hereby granted, provided that the above copyright
24 * notice appear in all copies and that both that copyright notice and
25 * this permission notice appear in supporting documentation, and that
26 * the name of FundsXpress. not be used in advertising or publicity pertaining
27 * to distribution of the software without specific, written prior
28 * permission. FundsXpress makes no representations about the suitability of
29 * this software for any purpose. It is provided "as is" without express
30 * or implied warranty.
32 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
33 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
37 #include <linux/err.h>
38 #include <linux/types.h>
40 #include <linux/scatterlist.h>
41 #include <linux/crypto.h>
42 #include <linux/highmem.h>
43 #include <linux/pagemap.h>
44 #include <linux/random.h>
45 #include <linux/sunrpc/gss_krb5.h>
46 #include <linux/sunrpc/xdr.h>
49 # define RPCDBG_FACILITY RPCDBG_AUTH
54 struct crypto_blkcipher *tfm,
61 struct scatterlist sg[1];
62 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
63 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
65 if (length % crypto_blkcipher_blocksize(tfm) != 0)
68 if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
69 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
70 crypto_blkcipher_ivsize(tfm));
75 memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
77 memcpy(out, in, length);
78 sg_init_one(sg, out, length);
80 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
82 dprintk("RPC: krb5_encrypt returns %d\n", ret);
88 struct crypto_blkcipher *tfm,
95 struct scatterlist sg[1];
96 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
97 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
99 if (length % crypto_blkcipher_blocksize(tfm) != 0)
102 if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
103 dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
104 crypto_blkcipher_ivsize(tfm));
108 memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));
110 memcpy(out, in, length);
111 sg_init_one(sg, out, length);
113 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
115 dprintk("RPC: gss_k5decrypt returns %d\n",ret);
120 checksummer(struct scatterlist *sg, void *data)
122 struct hash_desc *desc = data;
124 return crypto_hash_update(desc, sg, sg->length);
128 * checksum the plaintext data and hdrlen bytes of the token header
129 * The checksum is performed over the first 8 bytes of the
130 * gss token header and then over the data body
133 make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
134 struct xdr_buf *body, int body_offset, u8 *cksumkey,
135 struct xdr_netobj *cksumout)
137 struct hash_desc desc;
138 struct scatterlist sg[1];
140 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
141 unsigned int checksumlen;
143 if (cksumout->len < kctx->gk5e->cksumlength) {
144 dprintk("%s: checksum buffer length, %u, too small for %s\n",
145 __func__, cksumout->len, kctx->gk5e->name);
146 return GSS_S_FAILURE;
149 desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
150 if (IS_ERR(desc.tfm))
151 return GSS_S_FAILURE;
152 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
154 checksumlen = crypto_hash_digestsize(desc.tfm);
156 if (cksumkey != NULL) {
157 err = crypto_hash_setkey(desc.tfm, cksumkey,
158 kctx->gk5e->keylength);
163 err = crypto_hash_init(&desc);
166 sg_init_one(sg, header, hdrlen);
167 err = crypto_hash_update(&desc, sg, hdrlen);
170 err = xdr_process_buf(body, body_offset, body->len - body_offset,
174 err = crypto_hash_final(&desc, checksumdata);
178 switch (kctx->gk5e->ctype) {
179 case CKSUMTYPE_RSA_MD5:
180 err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata,
181 checksumdata, checksumlen);
184 memcpy(cksumout->data,
185 checksumdata + checksumlen - kctx->gk5e->cksumlength,
186 kctx->gk5e->cksumlength);
188 case CKSUMTYPE_HMAC_SHA1_DES3:
189 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
195 cksumout->len = kctx->gk5e->cksumlength;
197 crypto_free_hash(desc.tfm);
198 return err ? GSS_S_FAILURE : 0;
202 * checksum the plaintext data and hdrlen bytes of the token header
203 * Per rfc4121, sec. 4.2.4, the checksum is performed over the data
204 * body then over the first 16 octets of the MIC token
205 * Inclusion of the header data in the calculation of the
206 * checksum is optional.
209 make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
210 struct xdr_buf *body, int body_offset, u8 *cksumkey,
211 struct xdr_netobj *cksumout)
213 struct hash_desc desc;
214 struct scatterlist sg[1];
216 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
217 unsigned int checksumlen;
219 if (kctx->gk5e->keyed_cksum == 0) {
220 dprintk("%s: expected keyed hash for %s\n",
221 __func__, kctx->gk5e->name);
222 return GSS_S_FAILURE;
224 if (cksumkey == NULL) {
225 dprintk("%s: no key supplied for %s\n",
226 __func__, kctx->gk5e->name);
227 return GSS_S_FAILURE;
230 desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
232 if (IS_ERR(desc.tfm))
233 return GSS_S_FAILURE;
234 checksumlen = crypto_hash_digestsize(desc.tfm);
235 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
237 err = crypto_hash_setkey(desc.tfm, cksumkey, kctx->gk5e->keylength);
241 err = crypto_hash_init(&desc);
244 err = xdr_process_buf(body, body_offset, body->len - body_offset,
248 if (header != NULL) {
249 sg_init_one(sg, header, hdrlen);
250 err = crypto_hash_update(&desc, sg, hdrlen);
254 err = crypto_hash_final(&desc, checksumdata);
258 cksumout->len = kctx->gk5e->cksumlength;
260 switch (kctx->gk5e->ctype) {
261 case CKSUMTYPE_HMAC_SHA1_96_AES128:
262 case CKSUMTYPE_HMAC_SHA1_96_AES256:
263 /* note that this truncates the hash */
264 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
271 crypto_free_hash(desc.tfm);
272 return err ? GSS_S_FAILURE : 0;
275 struct encryptor_desc {
276 u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
277 struct blkcipher_desc desc;
279 struct xdr_buf *outbuf;
281 struct scatterlist infrags[4];
282 struct scatterlist outfrags[4];
288 encryptor(struct scatterlist *sg, void *data)
290 struct encryptor_desc *desc = data;
291 struct xdr_buf *outbuf = desc->outbuf;
292 struct page *in_page;
293 int thislen = desc->fraglen + sg->length;
297 /* Worst case is 4 fragments: head, end of page 1, start
298 * of page 2, tail. Anything more is a bug. */
299 BUG_ON(desc->fragno > 3);
301 page_pos = desc->pos - outbuf->head[0].iov_len;
302 if (page_pos >= 0 && page_pos < outbuf->page_len) {
303 /* pages are not in place: */
304 int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;
305 in_page = desc->pages[i];
307 in_page = sg_page(sg);
309 sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length,
311 sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length,
314 desc->fraglen += sg->length;
315 desc->pos += sg->length;
317 fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
323 sg_mark_end(&desc->infrags[desc->fragno - 1]);
324 sg_mark_end(&desc->outfrags[desc->fragno - 1]);
326 ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
327 desc->infrags, thislen);
331 sg_init_table(desc->infrags, 4);
332 sg_init_table(desc->outfrags, 4);
335 sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,
336 sg->offset + sg->length - fraglen);
337 desc->infrags[0] = desc->outfrags[0];
338 sg_assign_page(&desc->infrags[0], in_page);
340 desc->fraglen = fraglen;
349 gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
350 int offset, struct page **pages)
353 struct encryptor_desc desc;
355 BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
357 memset(desc.iv, 0, sizeof(desc.iv));
359 desc.desc.info = desc.iv;
367 sg_init_table(desc.infrags, 4);
368 sg_init_table(desc.outfrags, 4);
370 ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
374 struct decryptor_desc {
375 u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
376 struct blkcipher_desc desc;
377 struct scatterlist frags[4];
383 decryptor(struct scatterlist *sg, void *data)
385 struct decryptor_desc *desc = data;
386 int thislen = desc->fraglen + sg->length;
389 /* Worst case is 4 fragments: head, end of page 1, start
390 * of page 2, tail. Anything more is a bug. */
391 BUG_ON(desc->fragno > 3);
392 sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length,
395 desc->fraglen += sg->length;
397 fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
403 sg_mark_end(&desc->frags[desc->fragno - 1]);
405 ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
406 desc->frags, thislen);
410 sg_init_table(desc->frags, 4);
413 sg_set_page(&desc->frags[0], sg_page(sg), fraglen,
414 sg->offset + sg->length - fraglen);
416 desc->fraglen = fraglen;
425 gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
428 struct decryptor_desc desc;
431 BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
433 memset(desc.iv, 0, sizeof(desc.iv));
435 desc.desc.info = desc.iv;
440 sg_init_table(desc.frags, 4);
442 return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
446 * This function makes the assumption that it was ultimately called
449 * The client auth_gss code moves any existing tail data into a
450 * separate page before calling gss_wrap.
451 * The server svcauth_gss code ensures that both the head and the
452 * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
454 * Even with that guarantee, this function may be called more than
455 * once in the processing of gss_wrap(). The best we can do is
456 * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
457 * largest expected shift will fit within RPC_MAX_AUTH_SIZE.
458 * At run-time we can verify that a single invocation of this
459 * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
463 xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
470 BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);
471 BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
473 p = buf->head[0].iov_base + base;
475 memmove(p + shiftlen, p, buf->head[0].iov_len - base);
477 buf->head[0].iov_len += shiftlen;
478 buf->len += shiftlen;
484 gss_krb5_cts_crypt(struct crypto_blkcipher *cipher, struct xdr_buf *buf,
485 u32 offset, u8 *iv, struct page **pages, int encrypt)
488 struct scatterlist sg[1];
489 struct blkcipher_desc desc = { .tfm = cipher, .info = iv };
490 u8 data[crypto_blkcipher_blocksize(cipher) * 2];
491 struct page **save_pages;
492 u32 len = buf->len - offset;
494 BUG_ON(len > crypto_blkcipher_blocksize(cipher) * 2);
497 * For encryption, we want to read from the cleartext
498 * page cache pages, and write the encrypted data to
499 * the supplied xdr_buf pages.
501 save_pages = buf->pages;
505 ret = read_bytes_from_xdr_buf(buf, offset, data, len);
506 buf->pages = save_pages;
510 sg_init_one(sg, data, len);
513 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
515 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, len);
520 ret = write_bytes_to_xdr_buf(buf, offset, data, len);
527 gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
528 struct xdr_buf *buf, int ec, struct page **pages)
531 struct xdr_netobj hmac;
534 struct crypto_blkcipher *cipher, *aux_cipher;
536 struct page **save_pages;
538 struct encryptor_desc desc;
541 if (kctx->initiate) {
542 cipher = kctx->initiator_enc;
543 aux_cipher = kctx->initiator_enc_aux;
544 cksumkey = kctx->initiator_integ;
546 cipher = kctx->acceptor_enc;
547 aux_cipher = kctx->acceptor_enc_aux;
548 cksumkey = kctx->acceptor_integ;
550 blocksize = crypto_blkcipher_blocksize(cipher);
552 /* hide the gss token header and insert the confounder */
553 offset += GSS_KRB5_TOK_HDR_LEN;
554 if (xdr_extend_head(buf, offset, blocksize))
555 return GSS_S_FAILURE;
556 gss_krb5_make_confounder(buf->head[0].iov_base + offset, blocksize);
557 offset -= GSS_KRB5_TOK_HDR_LEN;
559 if (buf->tail[0].iov_base != NULL) {
560 ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
562 buf->tail[0].iov_base = buf->head[0].iov_base
563 + buf->head[0].iov_len;
564 buf->tail[0].iov_len = 0;
565 ecptr = buf->tail[0].iov_base;
568 memset(ecptr, 'X', ec);
569 buf->tail[0].iov_len += ec;
572 /* copy plaintext gss token header after filler (if any) */
573 memcpy(ecptr + ec, buf->head[0].iov_base + offset,
574 GSS_KRB5_TOK_HDR_LEN);
575 buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
576 buf->len += GSS_KRB5_TOK_HDR_LEN;
579 hmac.len = GSS_KRB5_MAX_CKSUM_LEN;
580 hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
583 * When we are called, pages points to the real page cache
584 * data -- which we can't go and encrypt! buf->pages points
585 * to scratch pages which we are going to send off to the
586 * client/server. Swap in the plaintext pages to calculate
589 save_pages = buf->pages;
592 err = make_checksum_v2(kctx, NULL, 0, buf,
593 offset + GSS_KRB5_TOK_HDR_LEN, cksumkey, &hmac);
594 buf->pages = save_pages;
596 return GSS_S_FAILURE;
598 nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN;
599 nblocks = (nbytes + blocksize - 1) / blocksize;
602 cbcbytes = (nblocks - 2) * blocksize;
604 memset(desc.iv, 0, sizeof(desc.iv));
607 desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
612 desc.desc.info = desc.iv;
614 desc.desc.tfm = aux_cipher;
616 sg_init_table(desc.infrags, 4);
617 sg_init_table(desc.outfrags, 4);
619 err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN,
620 cbcbytes, encryptor, &desc);
625 /* Make sure IV carries forward from any CBC results. */
626 err = gss_krb5_cts_crypt(cipher, buf,
627 offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes,
634 /* Now update buf to account for HMAC */
635 buf->tail[0].iov_len += kctx->gk5e->cksumlength;
636 buf->len += kctx->gk5e->cksumlength;
645 gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
646 u32 *headskip, u32 *tailskip)
648 struct xdr_buf subbuf;
651 struct crypto_blkcipher *cipher, *aux_cipher;
652 struct xdr_netobj our_hmac_obj;
653 u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
654 u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
655 int nblocks, blocksize, cbcbytes;
656 struct decryptor_desc desc;
658 if (kctx->initiate) {
659 cipher = kctx->acceptor_enc;
660 aux_cipher = kctx->acceptor_enc_aux;
661 cksum_key = kctx->acceptor_integ;
663 cipher = kctx->initiator_enc;
664 aux_cipher = kctx->initiator_enc_aux;
665 cksum_key = kctx->initiator_integ;
667 blocksize = crypto_blkcipher_blocksize(cipher);
670 /* create a segment skipping the header and leaving out the checksum */
671 xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
672 (buf->len - offset - GSS_KRB5_TOK_HDR_LEN -
673 kctx->gk5e->cksumlength));
675 nblocks = (subbuf.len + blocksize - 1) / blocksize;
679 cbcbytes = (nblocks - 2) * blocksize;
681 memset(desc.iv, 0, sizeof(desc.iv));
686 desc.desc.info = desc.iv;
688 desc.desc.tfm = aux_cipher;
690 sg_init_table(desc.frags, 4);
692 ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc);
697 /* Make sure IV carries forward from any CBC results. */
698 ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0);
703 /* Calculate our hmac over the plaintext data */
704 our_hmac_obj.len = sizeof(our_hmac);
705 our_hmac_obj.data = our_hmac;
707 ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0,
708 cksum_key, &our_hmac_obj);
712 /* Get the packet's hmac value */
713 ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength,
714 pkt_hmac, kctx->gk5e->cksumlength);
718 if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
722 *headskip = crypto_blkcipher_blocksize(cipher);
723 *tailskip = kctx->gk5e->cksumlength;
725 if (ret && ret != GSS_S_BAD_SIG)