1 #include <linux/types.h>
2 #include <linux/jiffies.h>
3 #include <linux/sunrpc/gss_krb5.h>
4 #include <linux/random.h>
5 #include <linux/pagemap.h>
6 #include <linux/crypto.h>
9 # define RPCDBG_FACILITY RPCDBG_AUTH
13 gss_krb5_padding(int blocksize, int length)
15 return blocksize - (length % blocksize);
19 gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize)
21 int padding = gss_krb5_padding(blocksize, buf->len - offset);
25 if (buf->page_len || buf->tail[0].iov_len)
29 p = iov->iov_base + iov->iov_len;
30 iov->iov_len += padding;
32 memset(p, padding, padding);
36 gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
40 size_t len = buf->len;
42 if (len <= buf->head[0].iov_len) {
43 pad = *(u8 *)(buf->head[0].iov_base + len - 1);
44 if (pad > buf->head[0].iov_len)
46 buf->head[0].iov_len -= pad;
49 len -= buf->head[0].iov_len;
50 if (len <= buf->page_len) {
51 unsigned int last = (buf->page_base + len - 1)
53 unsigned int offset = (buf->page_base + len - 1)
54 & (PAGE_CACHE_SIZE - 1);
55 ptr = kmap_atomic(buf->pages[last], KM_USER0);
56 pad = *(ptr + offset);
57 kunmap_atomic(ptr, KM_USER0);
61 BUG_ON(len > buf->tail[0].iov_len);
62 pad = *(u8 *)(buf->tail[0].iov_base + len - 1);
64 /* XXX: NOTE: we do not adjust the page lengths--they represent
65 * a range of data in the real filesystem page cache, and we need
66 * to know that range so the xdr code can properly place read data.
67 * However adjusting the head length, as we do above, is harmless.
68 * In the case of a request that fits into a single page, the server
69 * also uses length and head length together to determine the original
70 * start of the request to copy the request for deferal; so it's
71 * easier on the server if we adjust head and tail length in tandem.
72 * It's not really a problem that we don't fool with the page and
73 * tail lengths, though--at worst badly formed xdr might lead the
74 * server to attempt to parse the padding.
75 * XXX: Document all these weird requirements for gss mechanism
76 * wrap/unwrap functions. */
87 make_confounder(char *p, u32 conflen)
92 /* rfc1964 claims this should be "random". But all that's really
93 * necessary is that it be unique. And not even that is necessary in
94 * our case since our "gssapi" implementation exists only to support
95 * rpcsec_gss, so we know that the only buffers we will ever encrypt
96 * already begin with a unique sequence number. Just to hedge my bets
97 * I'll make a half-hearted attempt at something unique, but ensuring
98 * uniqueness would mean worrying about atomicity and rollover, and I
99 * don't care enough. */
101 /* initialize to random value */
104 i = (i << 32) | random32();
119 /* Assumptions: the head and tail of inbuf are ours to play with.
120 * The pages, however, may be real pages in the page cache and we replace
121 * them with scratch pages from **pages before writing to them. */
122 /* XXX: obviously the above should be documentation of wrap interface,
123 * and shouldn't be in this kerberos-specific file. */
125 /* XXX factor out common code with seal/unseal. */
128 gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
129 struct xdr_buf *buf, struct page **pages)
132 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
133 int blocksize = 0, plainlen;
134 unsigned char *ptr, *msg_start;
137 struct page **tmp_pages;
140 dprintk("RPC: gss_wrap_kerberos\n");
144 blocksize = crypto_blkcipher_blocksize(kctx->enc);
145 gss_krb5_add_padding(buf, offset, blocksize);
146 BUG_ON((buf->len - offset) % blocksize);
147 plainlen = blocksize + buf->len - offset;
149 headlen = g_token_size(&kctx->mech_used, 24 + plainlen) -
152 ptr = buf->head[0].iov_base + offset;
153 /* shift data to make room for header. */
154 xdr_extend_head(buf, offset, headlen);
156 /* XXX Would be cleverer to encrypt while copying. */
157 BUG_ON((buf->len - offset - headlen) % blocksize);
159 g_make_token_header(&kctx->mech_used,
160 GSS_KRB5_TOK_HDR_LEN + 8 + plainlen, &ptr);
163 /* ptr now at header described in rfc 1964, section 1.2.1: */
164 ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff);
165 ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff);
167 msg_start = ptr + 24;
169 *(__be16 *)(ptr + 2) = htons(SGN_ALG_DES_MAC_MD5);
170 memset(ptr + 4, 0xff, 4);
171 *(__be16 *)(ptr + 4) = htons(SEAL_ALG_DES);
173 make_confounder(msg_start, blocksize);
176 tmp_pages = buf->pages;
178 if (make_checksum("md5", ptr, 8, buf,
179 offset + headlen - blocksize, &md5cksum))
180 return GSS_S_FAILURE;
181 buf->pages = tmp_pages;
183 if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
184 md5cksum.data, md5cksum.len))
185 return GSS_S_FAILURE;
186 memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data + md5cksum.len - 8, 8);
188 spin_lock(&krb5_seq_lock);
189 seq_send = kctx->seq_send++;
190 spin_unlock(&krb5_seq_lock);
192 /* XXX would probably be more efficient to compute checksum
193 * and encrypt at the same time: */
194 if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff,
195 seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)))
196 return GSS_S_FAILURE;
198 if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize,
200 return GSS_S_FAILURE;
202 return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
206 gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
211 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
217 void *data_start, *orig_start;
221 dprintk("RPC: gss_unwrap_kerberos\n");
223 ptr = (u8 *)buf->head[0].iov_base + offset;
224 if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
226 return GSS_S_DEFECTIVE_TOKEN;
228 if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) ||
229 (ptr[1] != (KG_TOK_WRAP_MSG & 0xff)))
230 return GSS_S_DEFECTIVE_TOKEN;
232 /* XXX sanity-check bodysize?? */
234 /* get the sign and seal algorithms */
236 signalg = ptr[2] + (ptr[3] << 8);
237 if (signalg != SGN_ALG_DES_MAC_MD5)
238 return GSS_S_DEFECTIVE_TOKEN;
240 sealalg = ptr[4] + (ptr[5] << 8);
241 if (sealalg != SEAL_ALG_DES)
242 return GSS_S_DEFECTIVE_TOKEN;
244 if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
245 return GSS_S_DEFECTIVE_TOKEN;
247 if (gss_decrypt_xdr_buf(kctx->enc, buf,
248 ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base))
249 return GSS_S_DEFECTIVE_TOKEN;
251 if (make_checksum("md5", ptr, 8, buf,
252 ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base, &md5cksum))
253 return GSS_S_FAILURE;
255 if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
256 md5cksum.data, md5cksum.len))
257 return GSS_S_FAILURE;
259 if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, 8))
260 return GSS_S_BAD_SIG;
262 /* it got through unscathed. Make sure the context is unexpired */
266 if (now > kctx->endtime)
267 return GSS_S_CONTEXT_EXPIRED;
269 /* do sequencing checks */
271 if (krb5_get_seq_num(kctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8,
272 &direction, &seqnum))
273 return GSS_S_BAD_SIG;
275 if ((kctx->initiate && direction != 0xff) ||
276 (!kctx->initiate && direction != 0))
277 return GSS_S_BAD_SIG;
279 /* Copy the data back to the right position. XXX: Would probably be
280 * better to copy and encrypt at the same time. */
282 blocksize = crypto_blkcipher_blocksize(kctx->enc);
283 data_start = ptr + GSS_KRB5_TOK_HDR_LEN + 8 + blocksize;
284 orig_start = buf->head[0].iov_base + offset;
285 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
286 memmove(orig_start, data_start, data_len);
287 buf->head[0].iov_len -= (data_start - orig_start);
288 buf->len -= (data_start - orig_start);
290 if (gss_krb5_remove_padding(buf, blocksize))
291 return GSS_S_DEFECTIVE_TOKEN;
293 return GSS_S_COMPLETE;
297 gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
298 struct xdr_buf *buf, struct page **pages)
300 struct krb5_ctx *kctx = gctx->internal_ctx_id;
302 switch (kctx->enctype) {
305 case ENCTYPE_DES_CBC_RAW:
306 return gss_wrap_kerberos_v1(kctx, offset, buf, pages);
311 gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
313 struct krb5_ctx *kctx = gctx->internal_ctx_id;
315 switch (kctx->enctype) {
318 case ENCTYPE_DES_CBC_RAW:
319 return gss_unwrap_kerberos_v1(kctx, offset, buf);