svcrdma: Use RPC reply map for RDMA_WRITE processing
[safe/jmp/linux-2.6] / net / sunrpc / xprtrdma / svc_rdma_sendto.c
1 /*
2  * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the BSD-type
8  * license below:
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  *      Redistributions of source code must retain the above copyright
15  *      notice, this list of conditions and the following disclaimer.
16  *
17  *      Redistributions in binary form must reproduce the above
18  *      copyright notice, this list of conditions and the following
19  *      disclaimer in the documentation and/or other materials provided
20  *      with the distribution.
21  *
22  *      Neither the name of the Network Appliance, Inc. nor the names of
23  *      its contributors may be used to endorse or promote products
24  *      derived from this software without specific prior written
25  *      permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38  *
39  * Author: Tom Tucker <tom@opengridcomputing.com>
40  */
41
42 #include <linux/sunrpc/debug.h>
43 #include <linux/sunrpc/rpc_rdma.h>
44 #include <linux/spinlock.h>
45 #include <asm/unaligned.h>
46 #include <rdma/ib_verbs.h>
47 #include <rdma/rdma_cm.h>
48 #include <linux/sunrpc/svc_rdma.h>
49
50 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
51
52 /* Encode an XDR as an array of IB SGE
53  *
54  * Assumptions:
55  * - head[0] is physically contiguous.
56  * - tail[0] is physically contiguous.
57  * - pages[] is not physically or virtually contigous and consists of
58  *   PAGE_SIZE elements.
59  *
60  * Output:
61  * SGE[0]              reserved for RCPRDMA header
62  * SGE[1]              data from xdr->head[]
63  * SGE[2..sge_count-2] data from xdr->pages[]
64  * SGE[sge_count-1]    data from xdr->tail.
65  *
66  * The max SGE we need is the length of the XDR / pagesize + one for
67  * head + one for tail + one for RPCRDMA header. Since RPCSVC_MAXPAGES
68  * reserves a page for both the request and the reply header, and this
69  * array is only concerned with the reply we are assured that we have
70  * on extra page for the RPCRMDA header.
71  */
72 static void xdr_to_sge(struct svcxprt_rdma *xprt,
73                        struct xdr_buf *xdr,
74                        struct svc_rdma_req_map *vec)
75 {
76         int sge_max = (xdr->len+PAGE_SIZE-1) / PAGE_SIZE + 3;
77         int sge_no;
78         u32 sge_bytes;
79         u32 page_bytes;
80         u32 page_off;
81         int page_no;
82
83         BUG_ON(xdr->len !=
84                (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len));
85
86         /* Skip the first sge, this is for the RPCRDMA header */
87         sge_no = 1;
88
89         /* Head SGE */
90         vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
91         vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
92         sge_no++;
93
94         /* pages SGE */
95         page_no = 0;
96         page_bytes = xdr->page_len;
97         page_off = xdr->page_base;
98         while (page_bytes) {
99                 vec->sge[sge_no].iov_base =
100                         page_address(xdr->pages[page_no]) + page_off;
101                 sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
102                 page_bytes -= sge_bytes;
103                 vec->sge[sge_no].iov_len = sge_bytes;
104
105                 sge_no++;
106                 page_no++;
107                 page_off = 0; /* reset for next time through loop */
108         }
109
110         /* Tail SGE */
111         if (xdr->tail[0].iov_len) {
112                 vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;
113                 vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;
114                 sge_no++;
115         }
116
117         BUG_ON(sge_no > sge_max);
118         vec->count = sge_no;
119 }
120
121 /* Assumptions:
122  * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
123  */
124 static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
125                       u32 rmr, u64 to,
126                       u32 xdr_off, int write_len,
127                       struct svc_rdma_req_map *vec)
128 {
129         struct ib_send_wr write_wr;
130         struct ib_sge *sge;
131         int xdr_sge_no;
132         int sge_no;
133         int sge_bytes;
134         int sge_off;
135         int bc;
136         struct svc_rdma_op_ctxt *ctxt;
137
138         BUG_ON(vec->count > RPCSVC_MAXPAGES);
139         dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
140                 "write_len=%d, vec->sge=%p, vec->count=%lu\n",
141                 rmr, (unsigned long long)to, xdr_off,
142                 write_len, vec->sge, vec->count);
143
144         ctxt = svc_rdma_get_context(xprt);
145         ctxt->direction = DMA_TO_DEVICE;
146         sge = ctxt->sge;
147
148         /* Find the SGE associated with xdr_off */
149         for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->count;
150              xdr_sge_no++) {
151                 if (vec->sge[xdr_sge_no].iov_len > bc)
152                         break;
153                 bc -= vec->sge[xdr_sge_no].iov_len;
154         }
155
156         sge_off = bc;
157         bc = write_len;
158         sge_no = 0;
159
160         /* Copy the remaining SGE */
161         while (bc != 0 && xdr_sge_no < vec->count) {
162                 sge[sge_no].lkey = xprt->sc_phys_mr->lkey;
163                 sge_bytes = min((size_t)bc,
164                                 (size_t)(vec->sge[xdr_sge_no].iov_len-sge_off));
165                 sge[sge_no].length = sge_bytes;
166                 sge[sge_no].addr =
167                         ib_dma_map_single(xprt->sc_cm_id->device,
168                                           (void *)
169                                           vec->sge[xdr_sge_no].iov_base + sge_off,
170                                           sge_bytes, DMA_TO_DEVICE);
171                 if (dma_mapping_error(sge[sge_no].addr))
172                         goto err;
173                 sge_off = 0;
174                 sge_no++;
175                 ctxt->count++;
176                 xdr_sge_no++;
177                 bc -= sge_bytes;
178         }
179
180         BUG_ON(bc != 0);
181         BUG_ON(xdr_sge_no > vec->count);
182
183         /* Prepare WRITE WR */
184         memset(&write_wr, 0, sizeof write_wr);
185         ctxt->wr_op = IB_WR_RDMA_WRITE;
186         write_wr.wr_id = (unsigned long)ctxt;
187         write_wr.sg_list = &sge[0];
188         write_wr.num_sge = sge_no;
189         write_wr.opcode = IB_WR_RDMA_WRITE;
190         write_wr.send_flags = IB_SEND_SIGNALED;
191         write_wr.wr.rdma.rkey = rmr;
192         write_wr.wr.rdma.remote_addr = to;
193
194         /* Post It */
195         atomic_inc(&rdma_stat_write);
196         if (svc_rdma_send(xprt, &write_wr))
197                 goto err;
198         return 0;
199  err:
200         svc_rdma_put_context(ctxt, 0);
201         /* Fatal error, close transport */
202         return -EIO;
203 }
204
205 static int send_write_chunks(struct svcxprt_rdma *xprt,
206                              struct rpcrdma_msg *rdma_argp,
207                              struct rpcrdma_msg *rdma_resp,
208                              struct svc_rqst *rqstp,
209                              struct svc_rdma_req_map *vec)
210 {
211         u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
212         int write_len;
213         int max_write;
214         u32 xdr_off;
215         int chunk_off;
216         int chunk_no;
217         struct rpcrdma_write_array *arg_ary;
218         struct rpcrdma_write_array *res_ary;
219         int ret;
220
221         arg_ary = svc_rdma_get_write_array(rdma_argp);
222         if (!arg_ary)
223                 return 0;
224         res_ary = (struct rpcrdma_write_array *)
225                 &rdma_resp->rm_body.rm_chunks[1];
226
227         max_write = xprt->sc_max_sge * PAGE_SIZE;
228
229         /* Write chunks start at the pagelist */
230         for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
231              xfer_len && chunk_no < arg_ary->wc_nchunks;
232              chunk_no++) {
233                 struct rpcrdma_segment *arg_ch;
234                 u64 rs_offset;
235
236                 arg_ch = &arg_ary->wc_array[chunk_no].wc_target;
237                 write_len = min(xfer_len, arg_ch->rs_length);
238
239                 /* Prepare the response chunk given the length actually
240                  * written */
241                 rs_offset = get_unaligned(&(arg_ch->rs_offset));
242                 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
243                                             arg_ch->rs_handle,
244                                             rs_offset,
245                                             write_len);
246                 chunk_off = 0;
247                 while (write_len) {
248                         int this_write;
249                         this_write = min(write_len, max_write);
250                         ret = send_write(xprt, rqstp,
251                                          arg_ch->rs_handle,
252                                          rs_offset + chunk_off,
253                                          xdr_off,
254                                          this_write,
255                                          vec);
256                         if (ret) {
257                                 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
258                                         ret);
259                                 return -EIO;
260                         }
261                         chunk_off += this_write;
262                         xdr_off += this_write;
263                         xfer_len -= this_write;
264                         write_len -= this_write;
265                 }
266         }
267         /* Update the req with the number of chunks actually used */
268         svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no);
269
270         return rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
271 }
272
273 static int send_reply_chunks(struct svcxprt_rdma *xprt,
274                              struct rpcrdma_msg *rdma_argp,
275                              struct rpcrdma_msg *rdma_resp,
276                              struct svc_rqst *rqstp,
277                              struct svc_rdma_req_map *vec)
278 {
279         u32 xfer_len = rqstp->rq_res.len;
280         int write_len;
281         int max_write;
282         u32 xdr_off;
283         int chunk_no;
284         int chunk_off;
285         struct rpcrdma_segment *ch;
286         struct rpcrdma_write_array *arg_ary;
287         struct rpcrdma_write_array *res_ary;
288         int ret;
289
290         arg_ary = svc_rdma_get_reply_array(rdma_argp);
291         if (!arg_ary)
292                 return 0;
293         /* XXX: need to fix when reply lists occur with read-list and or
294          * write-list */
295         res_ary = (struct rpcrdma_write_array *)
296                 &rdma_resp->rm_body.rm_chunks[2];
297
298         max_write = xprt->sc_max_sge * PAGE_SIZE;
299
300         /* xdr offset starts at RPC message */
301         for (xdr_off = 0, chunk_no = 0;
302              xfer_len && chunk_no < arg_ary->wc_nchunks;
303              chunk_no++) {
304                 u64 rs_offset;
305                 ch = &arg_ary->wc_array[chunk_no].wc_target;
306                 write_len = min(xfer_len, ch->rs_length);
307
308
309                 /* Prepare the reply chunk given the length actually
310                  * written */
311                 rs_offset = get_unaligned(&(ch->rs_offset));
312                 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
313                                             ch->rs_handle, rs_offset,
314                                             write_len);
315                 chunk_off = 0;
316                 while (write_len) {
317                         int this_write;
318
319                         this_write = min(write_len, max_write);
320                         ret = send_write(xprt, rqstp,
321                                          ch->rs_handle,
322                                          rs_offset + chunk_off,
323                                          xdr_off,
324                                          this_write,
325                                          vec);
326                         if (ret) {
327                                 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
328                                         ret);
329                                 return -EIO;
330                         }
331                         chunk_off += this_write;
332                         xdr_off += this_write;
333                         xfer_len -= this_write;
334                         write_len -= this_write;
335                 }
336         }
337         /* Update the req with the number of chunks actually used */
338         svc_rdma_xdr_encode_reply_array(res_ary, chunk_no);
339
340         return rqstp->rq_res.len;
341 }
342
343 /* This function prepares the portion of the RPCRDMA message to be
344  * sent in the RDMA_SEND. This function is called after data sent via
345  * RDMA has already been transmitted. There are three cases:
346  * - The RPCRDMA header, RPC header, and payload are all sent in a
347  *   single RDMA_SEND. This is the "inline" case.
348  * - The RPCRDMA header and some portion of the RPC header and data
349  *   are sent via this RDMA_SEND and another portion of the data is
350  *   sent via RDMA.
351  * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC
352  *   header and data are all transmitted via RDMA.
353  * In all three cases, this function prepares the RPCRDMA header in
354  * sge[0], the 'type' parameter indicates the type to place in the
355  * RPCRDMA header, and the 'byte_count' field indicates how much of
356  * the XDR to include in this RDMA_SEND.
357  */
358 static int send_reply(struct svcxprt_rdma *rdma,
359                       struct svc_rqst *rqstp,
360                       struct page *page,
361                       struct rpcrdma_msg *rdma_resp,
362                       struct svc_rdma_op_ctxt *ctxt,
363                       struct svc_rdma_req_map *vec,
364                       int byte_count)
365 {
366         struct ib_send_wr send_wr;
367         int sge_no;
368         int sge_bytes;
369         int page_no;
370         int ret;
371
372         /* Post a recv buffer to handle another request. */
373         ret = svc_rdma_post_recv(rdma);
374         if (ret) {
375                 printk(KERN_INFO
376                        "svcrdma: could not post a receive buffer, err=%d."
377                        "Closing transport %p.\n", ret, rdma);
378                 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
379                 svc_rdma_put_context(ctxt, 0);
380                 return -ENOTCONN;
381         }
382
383         /* Prepare the context */
384         ctxt->pages[0] = page;
385         ctxt->count = 1;
386
387         /* Prepare the SGE for the RPCRDMA Header */
388         ctxt->sge[0].addr =
389                 ib_dma_map_page(rdma->sc_cm_id->device,
390                                 page, 0, PAGE_SIZE, DMA_TO_DEVICE);
391         ctxt->direction = DMA_TO_DEVICE;
392         ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
393         ctxt->sge[0].lkey = rdma->sc_phys_mr->lkey;
394
395         /* Determine how many of our SGE are to be transmitted */
396         for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
397                 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
398                 byte_count -= sge_bytes;
399                 ctxt->sge[sge_no].addr =
400                         ib_dma_map_single(rdma->sc_cm_id->device,
401                                           vec->sge[sge_no].iov_base,
402                                           sge_bytes, DMA_TO_DEVICE);
403                 ctxt->sge[sge_no].length = sge_bytes;
404                 ctxt->sge[sge_no].lkey = rdma->sc_phys_mr->lkey;
405         }
406         BUG_ON(byte_count != 0);
407
408         /* Save all respages in the ctxt and remove them from the
409          * respages array. They are our pages until the I/O
410          * completes.
411          */
412         for (page_no = 0; page_no < rqstp->rq_resused; page_no++) {
413                 ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
414                 ctxt->count++;
415                 rqstp->rq_respages[page_no] = NULL;
416                 /* If there are more pages than SGE, terminate SGE list */
417                 if (page_no+1 >= sge_no)
418                         ctxt->sge[page_no+1].length = 0;
419         }
420         BUG_ON(sge_no > rdma->sc_max_sge);
421         memset(&send_wr, 0, sizeof send_wr);
422         ctxt->wr_op = IB_WR_SEND;
423         send_wr.wr_id = (unsigned long)ctxt;
424         send_wr.sg_list = ctxt->sge;
425         send_wr.num_sge = sge_no;
426         send_wr.opcode = IB_WR_SEND;
427         send_wr.send_flags =  IB_SEND_SIGNALED;
428
429         ret = svc_rdma_send(rdma, &send_wr);
430         if (ret)
431                 svc_rdma_put_context(ctxt, 1);
432
433         return ret;
434 }
435
436 void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)
437 {
438 }
439
440 /*
441  * Return the start of an xdr buffer.
442  */
443 static void *xdr_start(struct xdr_buf *xdr)
444 {
445         return xdr->head[0].iov_base -
446                 (xdr->len -
447                  xdr->page_len -
448                  xdr->tail[0].iov_len -
449                  xdr->head[0].iov_len);
450 }
451
452 int svc_rdma_sendto(struct svc_rqst *rqstp)
453 {
454         struct svc_xprt *xprt = rqstp->rq_xprt;
455         struct svcxprt_rdma *rdma =
456                 container_of(xprt, struct svcxprt_rdma, sc_xprt);
457         struct rpcrdma_msg *rdma_argp;
458         struct rpcrdma_msg *rdma_resp;
459         struct rpcrdma_write_array *reply_ary;
460         enum rpcrdma_proc reply_type;
461         int ret;
462         int inline_bytes;
463         struct page *res_page;
464         struct svc_rdma_op_ctxt *ctxt;
465         struct svc_rdma_req_map *vec;
466
467         dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
468
469         /* Get the RDMA request header. */
470         rdma_argp = xdr_start(&rqstp->rq_arg);
471
472         /* Build an req vec for the XDR */
473         ctxt = svc_rdma_get_context(rdma);
474         ctxt->direction = DMA_TO_DEVICE;
475         vec = svc_rdma_get_req_map();
476         xdr_to_sge(rdma, &rqstp->rq_res, vec);
477
478         inline_bytes = rqstp->rq_res.len;
479
480         /* Create the RDMA response header */
481         res_page = svc_rdma_get_page();
482         rdma_resp = page_address(res_page);
483         reply_ary = svc_rdma_get_reply_array(rdma_argp);
484         if (reply_ary)
485                 reply_type = RDMA_NOMSG;
486         else
487                 reply_type = RDMA_MSG;
488         svc_rdma_xdr_encode_reply_header(rdma, rdma_argp,
489                                          rdma_resp, reply_type);
490
491         /* Send any write-chunk data and build resp write-list */
492         ret = send_write_chunks(rdma, rdma_argp, rdma_resp,
493                                 rqstp, vec);
494         if (ret < 0) {
495                 printk(KERN_ERR "svcrdma: failed to send write chunks, rc=%d\n",
496                        ret);
497                 goto error;
498         }
499         inline_bytes -= ret;
500
501         /* Send any reply-list data and update resp reply-list */
502         ret = send_reply_chunks(rdma, rdma_argp, rdma_resp,
503                                 rqstp, vec);
504         if (ret < 0) {
505                 printk(KERN_ERR "svcrdma: failed to send reply chunks, rc=%d\n",
506                        ret);
507                 goto error;
508         }
509         inline_bytes -= ret;
510
511         ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec,
512                          inline_bytes);
513         svc_rdma_put_req_map(vec);
514         dprintk("svcrdma: send_reply returns %d\n", ret);
515         return ret;
516  error:
517         svc_rdma_put_req_map(vec);
518         svc_rdma_put_context(ctxt, 0);
519         put_page(res_page);
520         return ret;
521 }