svcrdma: Use reply and chunk map for RDMA_READ processing
[safe/jmp/linux-2.6] / net / sunrpc / xprtrdma / svc_rdma_recvfrom.c
1 /*
2  * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the BSD-type
8  * license below:
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  *      Redistributions of source code must retain the above copyright
15  *      notice, this list of conditions and the following disclaimer.
16  *
17  *      Redistributions in binary form must reproduce the above
18  *      copyright notice, this list of conditions and the following
19  *      disclaimer in the documentation and/or other materials provided
20  *      with the distribution.
21  *
22  *      Neither the name of the Network Appliance, Inc. nor the names of
23  *      its contributors may be used to endorse or promote products
24  *      derived from this software without specific prior written
25  *      permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38  *
39  * Author: Tom Tucker <tom@opengridcomputing.com>
40  */
41
42 #include <linux/sunrpc/debug.h>
43 #include <linux/sunrpc/rpc_rdma.h>
44 #include <linux/spinlock.h>
45 #include <asm/unaligned.h>
46 #include <rdma/ib_verbs.h>
47 #include <rdma/rdma_cm.h>
48 #include <linux/sunrpc/svc_rdma.h>
49
50 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
51
52 /*
53  * Replace the pages in the rq_argpages array with the pages from the SGE in
54  * the RDMA_RECV completion. The SGL should contain full pages up until the
55  * last one.
56  */
57 static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
58                                struct svc_rdma_op_ctxt *ctxt,
59                                u32 byte_count)
60 {
61         struct page *page;
62         u32 bc;
63         int sge_no;
64
65         /* Swap the page in the SGE with the page in argpages */
66         page = ctxt->pages[0];
67         put_page(rqstp->rq_pages[0]);
68         rqstp->rq_pages[0] = page;
69
70         /* Set up the XDR head */
71         rqstp->rq_arg.head[0].iov_base = page_address(page);
72         rqstp->rq_arg.head[0].iov_len = min(byte_count, ctxt->sge[0].length);
73         rqstp->rq_arg.len = byte_count;
74         rqstp->rq_arg.buflen = byte_count;
75
76         /* Compute bytes past head in the SGL */
77         bc = byte_count - rqstp->rq_arg.head[0].iov_len;
78
79         /* If data remains, store it in the pagelist */
80         rqstp->rq_arg.page_len = bc;
81         rqstp->rq_arg.page_base = 0;
82         rqstp->rq_arg.pages = &rqstp->rq_pages[1];
83         sge_no = 1;
84         while (bc && sge_no < ctxt->count) {
85                 page = ctxt->pages[sge_no];
86                 put_page(rqstp->rq_pages[sge_no]);
87                 rqstp->rq_pages[sge_no] = page;
88                 bc -= min(bc, ctxt->sge[sge_no].length);
89                 rqstp->rq_arg.buflen += ctxt->sge[sge_no].length;
90                 sge_no++;
91         }
92         rqstp->rq_respages = &rqstp->rq_pages[sge_no];
93
94         /* We should never run out of SGE because the limit is defined to
95          * support the max allowed RPC data length
96          */
97         BUG_ON(bc && (sge_no == ctxt->count));
98         BUG_ON((rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len)
99                != byte_count);
100         BUG_ON(rqstp->rq_arg.len != byte_count);
101
102         /* If not all pages were used from the SGL, free the remaining ones */
103         bc = sge_no;
104         while (sge_no < ctxt->count) {
105                 page = ctxt->pages[sge_no++];
106                 put_page(page);
107         }
108         ctxt->count = bc;
109
110         /* Set up tail */
111         rqstp->rq_arg.tail[0].iov_base = NULL;
112         rqstp->rq_arg.tail[0].iov_len = 0;
113 }
114
115 /* Encode a read-chunk-list as an array of IB SGE
116  *
117  * Assumptions:
118  * - chunk[0]->position points to pages[0] at an offset of 0
119  * - pages[] is not physically or virtually contigous and consists of
120  *   PAGE_SIZE elements.
121  *
122  * Output:
123  * - sge array pointing into pages[] array.
124  * - chunk_sge array specifying sge index and count for each
125  *   chunk in the read list
126  *
127  */
128 static int rdma_rcl_to_sge(struct svcxprt_rdma *xprt,
129                            struct svc_rqst *rqstp,
130                            struct svc_rdma_op_ctxt *head,
131                            struct rpcrdma_msg *rmsgp,
132                            struct svc_rdma_req_map *rpl_map,
133                            struct svc_rdma_req_map *chl_map,
134                            int ch_count,
135                            int byte_count)
136 {
137         int sge_no;
138         int sge_bytes;
139         int page_off;
140         int page_no;
141         int ch_bytes;
142         int ch_no;
143         struct rpcrdma_read_chunk *ch;
144
145         sge_no = 0;
146         page_no = 0;
147         page_off = 0;
148         ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
149         ch_no = 0;
150         ch_bytes = ch->rc_target.rs_length;
151         head->arg.head[0] = rqstp->rq_arg.head[0];
152         head->arg.tail[0] = rqstp->rq_arg.tail[0];
153         head->arg.pages = &head->pages[head->count];
154         head->hdr_count = head->count; /* save count of hdr pages */
155         head->arg.page_base = 0;
156         head->arg.page_len = ch_bytes;
157         head->arg.len = rqstp->rq_arg.len + ch_bytes;
158         head->arg.buflen = rqstp->rq_arg.buflen + ch_bytes;
159         head->count++;
160         chl_map->ch[0].start = 0;
161         while (byte_count) {
162                 rpl_map->sge[sge_no].iov_base =
163                         page_address(rqstp->rq_arg.pages[page_no]) + page_off;
164                 sge_bytes = min_t(int, PAGE_SIZE-page_off, ch_bytes);
165                 rpl_map->sge[sge_no].iov_len = sge_bytes;
166                 /*
167                  * Don't bump head->count here because the same page
168                  * may be used by multiple SGE.
169                  */
170                 head->arg.pages[page_no] = rqstp->rq_arg.pages[page_no];
171                 rqstp->rq_respages = &rqstp->rq_arg.pages[page_no+1];
172
173                 byte_count -= sge_bytes;
174                 ch_bytes -= sge_bytes;
175                 sge_no++;
176                 /*
177                  * If all bytes for this chunk have been mapped to an
178                  * SGE, move to the next SGE
179                  */
180                 if (ch_bytes == 0) {
181                         chl_map->ch[ch_no].count =
182                                 sge_no - chl_map->ch[ch_no].start;
183                         ch_no++;
184                         ch++;
185                         chl_map->ch[ch_no].start = sge_no;
186                         ch_bytes = ch->rc_target.rs_length;
187                         /* If bytes remaining account for next chunk */
188                         if (byte_count) {
189                                 head->arg.page_len += ch_bytes;
190                                 head->arg.len += ch_bytes;
191                                 head->arg.buflen += ch_bytes;
192                         }
193                 }
194                 /*
195                  * If this SGE consumed all of the page, move to the
196                  * next page
197                  */
198                 if ((sge_bytes + page_off) == PAGE_SIZE) {
199                         page_no++;
200                         page_off = 0;
201                         /*
202                          * If there are still bytes left to map, bump
203                          * the page count
204                          */
205                         if (byte_count)
206                                 head->count++;
207                 } else
208                         page_off += sge_bytes;
209         }
210         BUG_ON(byte_count != 0);
211         return sge_no;
212 }
213
214 static void rdma_set_ctxt_sge(struct svcxprt_rdma *xprt,
215                               struct svc_rdma_op_ctxt *ctxt,
216                               struct kvec *vec,
217                               u64 *sgl_offset,
218                               int count)
219 {
220         int i;
221
222         ctxt->count = count;
223         ctxt->direction = DMA_FROM_DEVICE;
224         for (i = 0; i < count; i++) {
225                 ctxt->sge[i].addr =
226                         ib_dma_map_single(xprt->sc_cm_id->device,
227                                           vec[i].iov_base, vec[i].iov_len,
228                                           DMA_FROM_DEVICE);
229                 ctxt->sge[i].length = vec[i].iov_len;
230                 ctxt->sge[i].lkey = xprt->sc_phys_mr->lkey;
231                 *sgl_offset = *sgl_offset + vec[i].iov_len;
232         }
233 }
234
235 static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count)
236 {
237         if ((RDMA_TRANSPORT_IWARP ==
238              rdma_node_get_transport(xprt->sc_cm_id->
239                                      device->node_type))
240             && sge_count > 1)
241                 return 1;
242         else
243                 return min_t(int, sge_count, xprt->sc_max_sge);
244 }
245
246 /*
247  * Use RDMA_READ to read data from the advertised client buffer into the
248  * XDR stream starting at rq_arg.head[0].iov_base.
249  * Each chunk in the array
250  * contains the following fields:
251  * discrim      - '1', This isn't used for data placement
252  * position     - The xdr stream offset (the same for every chunk)
253  * handle       - RMR for client memory region
254  * length       - data transfer length
255  * offset       - 64 bit tagged offset in remote memory region
256  *
257  * On our side, we need to read into a pagelist. The first page immediately
258  * follows the RPC header.
259  *
260  * This function returns:
261  * 0 - No error and no read-list found.
262  *
263  * 1 - Successful read-list processing. The data is not yet in
264  * the pagelist and therefore the RPC request must be deferred. The
265  * I/O completion will enqueue the transport again and
266  * svc_rdma_recvfrom will complete the request.
267  *
268  * <0 - Error processing/posting read-list.
269  *
270  * NOTE: The ctxt must not be touched after the last WR has been posted
271  * because the I/O completion processing may occur on another
272  * processor and free / modify the context. Ne touche pas!
273  */
274 static int rdma_read_xdr(struct svcxprt_rdma *xprt,
275                          struct rpcrdma_msg *rmsgp,
276                          struct svc_rqst *rqstp,
277                          struct svc_rdma_op_ctxt *hdr_ctxt)
278 {
279         struct ib_send_wr read_wr;
280         int err = 0;
281         int ch_no;
282         int ch_count;
283         int byte_count;
284         int sge_count;
285         u64 sgl_offset;
286         struct rpcrdma_read_chunk *ch;
287         struct svc_rdma_op_ctxt *ctxt = NULL;
288         struct svc_rdma_req_map *rpl_map;
289         struct svc_rdma_req_map *chl_map;
290
291         /* If no read list is present, return 0 */
292         ch = svc_rdma_get_read_chunk(rmsgp);
293         if (!ch)
294                 return 0;
295
296         /* Allocate temporary reply and chunk maps */
297         rpl_map = svc_rdma_get_req_map();
298         chl_map = svc_rdma_get_req_map();
299
300         svc_rdma_rcl_chunk_counts(ch, &ch_count, &byte_count);
301         if (ch_count > RPCSVC_MAXPAGES)
302                 return -EINVAL;
303         sge_count = rdma_rcl_to_sge(xprt, rqstp, hdr_ctxt, rmsgp,
304                                     rpl_map, chl_map,
305                                     ch_count, byte_count);
306         sgl_offset = 0;
307         ch_no = 0;
308
309         for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
310              ch->rc_discrim != 0; ch++, ch_no++) {
311 next_sge:
312                 ctxt = svc_rdma_get_context(xprt);
313                 ctxt->direction = DMA_FROM_DEVICE;
314                 clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
315
316                 /* Prepare READ WR */
317                 memset(&read_wr, 0, sizeof read_wr);
318                 ctxt->wr_op = IB_WR_RDMA_READ;
319                 read_wr.wr_id = (unsigned long)ctxt;
320                 read_wr.opcode = IB_WR_RDMA_READ;
321                 read_wr.send_flags = IB_SEND_SIGNALED;
322                 read_wr.wr.rdma.rkey = ch->rc_target.rs_handle;
323                 read_wr.wr.rdma.remote_addr =
324                         get_unaligned(&(ch->rc_target.rs_offset)) +
325                         sgl_offset;
326                 read_wr.sg_list = ctxt->sge;
327                 read_wr.num_sge =
328                         rdma_read_max_sge(xprt, chl_map->ch[ch_no].count);
329                 rdma_set_ctxt_sge(xprt, ctxt,
330                                   &rpl_map->sge[chl_map->ch[ch_no].start],
331                                   &sgl_offset,
332                                   read_wr.num_sge);
333                 if (((ch+1)->rc_discrim == 0) &&
334                     (read_wr.num_sge == chl_map->ch[ch_no].count)) {
335                         /*
336                          * Mark the last RDMA_READ with a bit to
337                          * indicate all RPC data has been fetched from
338                          * the client and the RPC needs to be enqueued.
339                          */
340                         set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
341                         ctxt->read_hdr = hdr_ctxt;
342                 }
343                 /* Post the read */
344                 err = svc_rdma_send(xprt, &read_wr);
345                 if (err) {
346                         printk(KERN_ERR "svcrdma: Error %d posting RDMA_READ\n",
347                                err);
348                         set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
349                         svc_rdma_put_context(ctxt, 0);
350                         goto out;
351                 }
352                 atomic_inc(&rdma_stat_read);
353
354                 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
355                         chl_map->ch[ch_no].count -= read_wr.num_sge;
356                         chl_map->ch[ch_no].start += read_wr.num_sge;
357                         goto next_sge;
358                 }
359                 sgl_offset = 0;
360                 err = 1;
361         }
362
363  out:
364         svc_rdma_put_req_map(rpl_map);
365         svc_rdma_put_req_map(chl_map);
366
367         /* Detach arg pages. svc_recv will replenish them */
368         for (ch_no = 0; &rqstp->rq_pages[ch_no] < rqstp->rq_respages; ch_no++)
369                 rqstp->rq_pages[ch_no] = NULL;
370
371         /*
372          * Detach res pages. svc_release must see a resused count of
373          * zero or it will attempt to put them.
374          */
375         while (rqstp->rq_resused)
376                 rqstp->rq_respages[--rqstp->rq_resused] = NULL;
377
378         return err;
379 }
380
381 static int rdma_read_complete(struct svc_rqst *rqstp,
382                               struct svc_rdma_op_ctxt *head)
383 {
384         int page_no;
385         int ret;
386
387         BUG_ON(!head);
388
389         /* Copy RPC pages */
390         for (page_no = 0; page_no < head->count; page_no++) {
391                 put_page(rqstp->rq_pages[page_no]);
392                 rqstp->rq_pages[page_no] = head->pages[page_no];
393         }
394         /* Point rq_arg.pages past header */
395         rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count];
396         rqstp->rq_arg.page_len = head->arg.page_len;
397         rqstp->rq_arg.page_base = head->arg.page_base;
398
399         /* rq_respages starts after the last arg page */
400         rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];
401         rqstp->rq_resused = 0;
402
403         /* Rebuild rq_arg head and tail. */
404         rqstp->rq_arg.head[0] = head->arg.head[0];
405         rqstp->rq_arg.tail[0] = head->arg.tail[0];
406         rqstp->rq_arg.len = head->arg.len;
407         rqstp->rq_arg.buflen = head->arg.buflen;
408
409         /* Free the context */
410         svc_rdma_put_context(head, 0);
411
412         /* XXX: What should this be? */
413         rqstp->rq_prot = IPPROTO_MAX;
414         svc_xprt_copy_addrs(rqstp, rqstp->rq_xprt);
415
416         ret = rqstp->rq_arg.head[0].iov_len
417                 + rqstp->rq_arg.page_len
418                 + rqstp->rq_arg.tail[0].iov_len;
419         dprintk("svcrdma: deferred read ret=%d, rq_arg.len =%d, "
420                 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len = %zd\n",
421                 ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base,
422                 rqstp->rq_arg.head[0].iov_len);
423
424         svc_xprt_received(rqstp->rq_xprt);
425         return ret;
426 }
427
428 /*
429  * Set up the rqstp thread context to point to the RQ buffer. If
430  * necessary, pull additional data from the client with an RDMA_READ
431  * request.
432  */
433 int svc_rdma_recvfrom(struct svc_rqst *rqstp)
434 {
435         struct svc_xprt *xprt = rqstp->rq_xprt;
436         struct svcxprt_rdma *rdma_xprt =
437                 container_of(xprt, struct svcxprt_rdma, sc_xprt);
438         struct svc_rdma_op_ctxt *ctxt = NULL;
439         struct rpcrdma_msg *rmsgp;
440         int ret = 0;
441         int len;
442
443         dprintk("svcrdma: rqstp=%p\n", rqstp);
444
445         spin_lock_bh(&rdma_xprt->sc_read_complete_lock);
446         if (!list_empty(&rdma_xprt->sc_read_complete_q)) {
447                 ctxt = list_entry(rdma_xprt->sc_read_complete_q.next,
448                                   struct svc_rdma_op_ctxt,
449                                   dto_q);
450                 list_del_init(&ctxt->dto_q);
451         }
452         spin_unlock_bh(&rdma_xprt->sc_read_complete_lock);
453         if (ctxt)
454                 return rdma_read_complete(rqstp, ctxt);
455
456         spin_lock_bh(&rdma_xprt->sc_rq_dto_lock);
457         if (!list_empty(&rdma_xprt->sc_rq_dto_q)) {
458                 ctxt = list_entry(rdma_xprt->sc_rq_dto_q.next,
459                                   struct svc_rdma_op_ctxt,
460                                   dto_q);
461                 list_del_init(&ctxt->dto_q);
462         } else {
463                 atomic_inc(&rdma_stat_rq_starve);
464                 clear_bit(XPT_DATA, &xprt->xpt_flags);
465                 ctxt = NULL;
466         }
467         spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock);
468         if (!ctxt) {
469                 /* This is the EAGAIN path. The svc_recv routine will
470                  * return -EAGAIN, the nfsd thread will go to call into
471                  * svc_recv again and we shouldn't be on the active
472                  * transport list
473                  */
474                 if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
475                         goto close_out;
476
477                 BUG_ON(ret);
478                 goto out;
479         }
480         dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
481                 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
482         BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
483         atomic_inc(&rdma_stat_recv);
484
485         /* Build up the XDR from the receive buffers. */
486         rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
487
488         /* Decode the RDMA header. */
489         len = svc_rdma_xdr_decode_req(&rmsgp, rqstp);
490         rqstp->rq_xprt_hlen = len;
491
492         /* If the request is invalid, reply with an error */
493         if (len < 0) {
494                 if (len == -ENOSYS)
495                         svc_rdma_send_error(rdma_xprt, rmsgp, ERR_VERS);
496                 goto close_out;
497         }
498
499         /* Read read-list data. */
500         ret = rdma_read_xdr(rdma_xprt, rmsgp, rqstp, ctxt);
501         if (ret > 0) {
502                 /* read-list posted, defer until data received from client. */
503                 svc_xprt_received(xprt);
504                 return 0;
505         }
506         if (ret < 0) {
507                 /* Post of read-list failed, free context. */
508                 svc_rdma_put_context(ctxt, 1);
509                 return 0;
510         }
511
512         ret = rqstp->rq_arg.head[0].iov_len
513                 + rqstp->rq_arg.page_len
514                 + rqstp->rq_arg.tail[0].iov_len;
515         svc_rdma_put_context(ctxt, 0);
516  out:
517         dprintk("svcrdma: ret = %d, rq_arg.len =%d, "
518                 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len = %zd\n",
519                 ret, rqstp->rq_arg.len,
520                 rqstp->rq_arg.head[0].iov_base,
521                 rqstp->rq_arg.head[0].iov_len);
522         rqstp->rq_prot = IPPROTO_MAX;
523         svc_xprt_copy_addrs(rqstp, xprt);
524         svc_xprt_received(xprt);
525         return ret;
526
527  close_out:
528         if (ctxt)
529                 svc_rdma_put_context(ctxt, 1);
530         dprintk("svcrdma: transport %p is closing\n", xprt);
531         /*
532          * Set the close bit and enqueue it. svc_recv will see the
533          * close bit and call svc_xprt_delete
534          */
535         set_bit(XPT_CLOSE, &xprt->xpt_flags);
536         svc_xprt_received(xprt);
537         return 0;
538 }