3 * The Regents of the University of Michigan
6 * Permission is granted to use, copy, create derivative works
7 * and redistribute this software and such derivative works
8 * for any purpose, so long as the name of The University of
9 * Michigan is not used in any advertising or publicity
10 * pertaining to the use of distribution of this software
11 * without specific, written prior authorization. If the
12 * above copyright notice or any other identification of the
13 * University of Michigan is included in any copy of any
14 * portion of this software, then the disclaimer below must
17 * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
18 * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
19 * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
20 * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
21 * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
22 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
23 * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
24 * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
25 * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
26 * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
27 * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
31 #include <linux/types.h>
32 #include <linux/jiffies.h>
33 #include <linux/sunrpc/gss_krb5.h>
34 #include <linux/random.h>
35 #include <linux/pagemap.h>
36 #include <linux/crypto.h>
39 # define RPCDBG_FACILITY RPCDBG_AUTH
43 gss_krb5_padding(int blocksize, int length)
45 return blocksize - (length % blocksize);
49 gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize)
51 int padding = gss_krb5_padding(blocksize, buf->len - offset);
55 if (buf->page_len || buf->tail[0].iov_len)
59 p = iov->iov_base + iov->iov_len;
60 iov->iov_len += padding;
62 memset(p, padding, padding);
66 gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
70 size_t len = buf->len;
72 if (len <= buf->head[0].iov_len) {
73 pad = *(u8 *)(buf->head[0].iov_base + len - 1);
74 if (pad > buf->head[0].iov_len)
76 buf->head[0].iov_len -= pad;
79 len -= buf->head[0].iov_len;
80 if (len <= buf->page_len) {
81 unsigned int last = (buf->page_base + len - 1)
83 unsigned int offset = (buf->page_base + len - 1)
84 & (PAGE_CACHE_SIZE - 1);
85 ptr = kmap_atomic(buf->pages[last], KM_USER0);
86 pad = *(ptr + offset);
87 kunmap_atomic(ptr, KM_USER0);
91 BUG_ON(len > buf->tail[0].iov_len);
92 pad = *(u8 *)(buf->tail[0].iov_base + len - 1);
94 /* XXX: NOTE: we do not adjust the page lengths--they represent
95 * a range of data in the real filesystem page cache, and we need
96 * to know that range so the xdr code can properly place read data.
97 * However adjusting the head length, as we do above, is harmless.
98 * In the case of a request that fits into a single page, the server
99 * also uses length and head length together to determine the original
100 * start of the request to copy the request for deferal; so it's
101 * easier on the server if we adjust head and tail length in tandem.
102 * It's not really a problem that we don't fool with the page and
103 * tail lengths, though--at worst badly formed xdr might lead the
104 * server to attempt to parse the padding.
105 * XXX: Document all these weird requirements for gss mechanism
106 * wrap/unwrap functions. */
117 make_confounder(char *p, u32 conflen)
122 /* rfc1964 claims this should be "random". But all that's really
123 * necessary is that it be unique. And not even that is necessary in
124 * our case since our "gssapi" implementation exists only to support
125 * rpcsec_gss, so we know that the only buffers we will ever encrypt
126 * already begin with a unique sequence number. Just to hedge my bets
127 * I'll make a half-hearted attempt at something unique, but ensuring
128 * uniqueness would mean worrying about atomicity and rollover, and I
129 * don't care enough. */
131 /* initialize to random value */
134 i = (i << 32) | random32();
149 /* Assumptions: the head and tail of inbuf are ours to play with.
150 * The pages, however, may be real pages in the page cache and we replace
151 * them with scratch pages from **pages before writing to them. */
152 /* XXX: obviously the above should be documentation of wrap interface,
153 * and shouldn't be in this kerberos-specific file. */
155 /* XXX factor out common code with seal/unseal. */
158 gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
159 struct xdr_buf *buf, struct page **pages)
161 char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
162 struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
164 int blocksize = 0, plainlen;
165 unsigned char *ptr, *msg_start;
168 struct page **tmp_pages;
171 dprintk("RPC: %s\n", __func__);
175 blocksize = crypto_blkcipher_blocksize(kctx->enc);
176 gss_krb5_add_padding(buf, offset, blocksize);
177 BUG_ON((buf->len - offset) % blocksize);
178 plainlen = blocksize + buf->len - offset;
180 headlen = g_token_size(&kctx->mech_used,
181 GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) -
184 ptr = buf->head[0].iov_base + offset;
185 /* shift data to make room for header. */
186 xdr_extend_head(buf, offset, headlen);
188 /* XXX Would be cleverer to encrypt while copying. */
189 BUG_ON((buf->len - offset - headlen) % blocksize);
191 g_make_token_header(&kctx->mech_used,
192 GSS_KRB5_TOK_HDR_LEN +
193 kctx->gk5e->cksumlength + plainlen, &ptr);
196 /* ptr now at header described in rfc 1964, section 1.2.1: */
197 ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff);
198 ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff);
200 msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength;
202 *(__be16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg);
203 memset(ptr + 4, 0xff, 4);
204 *(__be16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg);
206 make_confounder(msg_start, blocksize);
209 tmp_pages = buf->pages;
211 if (make_checksum((char *)kctx->gk5e->cksum_name, ptr, 8, buf,
212 offset + headlen - blocksize, &md5cksum))
213 return GSS_S_FAILURE;
214 buf->pages = tmp_pages;
216 if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
217 md5cksum.data, md5cksum.len))
218 return GSS_S_FAILURE;
219 memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data + md5cksum.len - 8, 8);
221 spin_lock(&krb5_seq_lock);
222 seq_send = kctx->seq_send++;
223 spin_unlock(&krb5_seq_lock);
225 /* XXX would probably be more efficient to compute checksum
226 * and encrypt at the same time: */
227 if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff,
228 seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)))
229 return GSS_S_FAILURE;
231 if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize,
233 return GSS_S_FAILURE;
235 return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
239 gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
243 char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
244 struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
251 void *data_start, *orig_start;
256 dprintk("RPC: gss_unwrap_kerberos\n");
258 ptr = (u8 *)buf->head[0].iov_base + offset;
259 if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
261 return GSS_S_DEFECTIVE_TOKEN;
263 if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) ||
264 (ptr[1] != (KG_TOK_WRAP_MSG & 0xff)))
265 return GSS_S_DEFECTIVE_TOKEN;
267 /* XXX sanity-check bodysize?? */
269 /* get the sign and seal algorithms */
271 signalg = ptr[2] + (ptr[3] << 8);
272 if (signalg != kctx->gk5e->signalg)
273 return GSS_S_DEFECTIVE_TOKEN;
275 sealalg = ptr[4] + (ptr[5] << 8);
276 if (sealalg != kctx->gk5e->sealalg)
277 return GSS_S_DEFECTIVE_TOKEN;
279 if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
280 return GSS_S_DEFECTIVE_TOKEN;
283 * Data starts after token header and checksum. ptr points
284 * to the beginning of the token header
286 crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) -
287 (unsigned char *)buf->head[0].iov_base;
288 if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset))
289 return GSS_S_DEFECTIVE_TOKEN;
291 if (make_checksum((char *)kctx->gk5e->cksum_name, ptr, 8, buf,
292 crypt_offset, &md5cksum))
293 return GSS_S_FAILURE;
295 if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
296 md5cksum.data, md5cksum.len))
297 return GSS_S_FAILURE;
299 if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, 8))
300 return GSS_S_BAD_SIG;
302 /* it got through unscathed. Make sure the context is unexpired */
306 if (now > kctx->endtime)
307 return GSS_S_CONTEXT_EXPIRED;
309 /* do sequencing checks */
311 if (krb5_get_seq_num(kctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8,
312 &direction, &seqnum))
313 return GSS_S_BAD_SIG;
315 if ((kctx->initiate && direction != 0xff) ||
316 (!kctx->initiate && direction != 0))
317 return GSS_S_BAD_SIG;
319 /* Copy the data back to the right position. XXX: Would probably be
320 * better to copy and encrypt at the same time. */
322 blocksize = crypto_blkcipher_blocksize(kctx->enc);
323 data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
325 orig_start = buf->head[0].iov_base + offset;
326 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
327 memmove(orig_start, data_start, data_len);
328 buf->head[0].iov_len -= (data_start - orig_start);
329 buf->len -= (data_start - orig_start);
331 if (gss_krb5_remove_padding(buf, blocksize))
332 return GSS_S_DEFECTIVE_TOKEN;
334 return GSS_S_COMPLETE;
338 gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
339 struct xdr_buf *buf, struct page **pages)
341 struct krb5_ctx *kctx = gctx->internal_ctx_id;
343 switch (kctx->enctype) {
346 case ENCTYPE_DES_CBC_RAW:
347 return gss_wrap_kerberos_v1(kctx, offset, buf, pages);
352 gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
354 struct krb5_ctx *kctx = gctx->internal_ctx_id;
356 switch (kctx->enctype) {
359 case ENCTYPE_DES_CBC_RAW:
360 return gss_unwrap_kerberos_v1(kctx, offset, buf);