ceph: unregister canceled/timed out osd requests
[safe/jmp/linux-2.6] / fs / ceph / osd_client.c
1 #include "ceph_debug.h"
2
3 #include <linux/err.h>
4 #include <linux/highmem.h>
5 #include <linux/mm.h>
6 #include <linux/pagemap.h>
7 #include <linux/slab.h>
8 #include <linux/uaccess.h>
9
10 #include "super.h"
11 #include "osd_client.h"
12 #include "messenger.h"
13 #include "decode.h"
14 #include "auth.h"
15
16 const static struct ceph_connection_operations osd_con_ops;
17
18 static void kick_requests(struct ceph_osd_client *osdc, struct ceph_osd *osd);
19
20 /*
21  * Implement client access to distributed object storage cluster.
22  *
23  * All data objects are stored within a cluster/cloud of OSDs, or
24  * "object storage devices."  (Note that Ceph OSDs have _nothing_ to
25  * do with the T10 OSD extensions to SCSI.)  Ceph OSDs are simply
26  * remote daemons serving up and coordinating consistent and safe
27  * access to storage.
28  *
29  * Cluster membership and the mapping of data objects onto storage devices
30  * are described by the osd map.
31  *
32  * We keep track of pending OSD requests (read, write), resubmit
33  * requests to different OSDs when the cluster topology/data layout
34  * change, or retry the affected requests when the communications
35  * channel with an OSD is reset.
36  */
37
38 /*
39  * calculate the mapping of a file extent onto an object, and fill out the
40  * request accordingly.  shorten extent as necessary if it crosses an
41  * object boundary.
42  *
43  * fill osd op in request message.
44  */
45 static void calc_layout(struct ceph_osd_client *osdc,
46                         struct ceph_vino vino, struct ceph_file_layout *layout,
47                         u64 off, u64 *plen,
48                         struct ceph_osd_request *req)
49 {
50         struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
51         struct ceph_osd_op *op = (void *)(reqhead + 1);
52         u64 orig_len = *plen;
53         u64 objoff, objlen;    /* extent in object */
54         u64 bno;
55
56         reqhead->snapid = cpu_to_le64(vino.snap);
57
58         /* object extent? */
59         ceph_calc_file_object_mapping(layout, off, plen, &bno,
60                                       &objoff, &objlen);
61         if (*plen < orig_len)
62                 dout(" skipping last %llu, final file extent %llu~%llu\n",
63                      orig_len - *plen, off, *plen);
64
65         sprintf(req->r_oid, "%llx.%08llx", vino.ino, bno);
66         req->r_oid_len = strlen(req->r_oid);
67
68         op->extent.offset = cpu_to_le64(objoff);
69         op->extent.length = cpu_to_le64(objlen);
70         req->r_num_pages = calc_pages_for(off, *plen);
71
72         dout("calc_layout %s (%d) %llu~%llu (%d pages)\n",
73              req->r_oid, req->r_oid_len, objoff, objlen, req->r_num_pages);
74 }
75
76
77 /*
78  * requests
79  */
80 void ceph_osdc_release_request(struct kref *kref)
81 {
82         struct ceph_osd_request *req = container_of(kref,
83                                                     struct ceph_osd_request,
84                                                     r_kref);
85
86         if (req->r_request)
87                 ceph_msg_put(req->r_request);
88         if (req->r_reply)
89                 ceph_msg_put(req->r_reply);
90         if (req->r_own_pages)
91                 ceph_release_page_vector(req->r_pages,
92                                          req->r_num_pages);
93         ceph_put_snap_context(req->r_snapc);
94         if (req->r_mempool)
95                 mempool_free(req, req->r_osdc->req_mempool);
96         else
97                 kfree(req);
98 }
99
100 /*
101  * build new request AND message, calculate layout, and adjust file
102  * extent as needed.
103  *
104  * if the file was recently truncated, we include information about its
105  * old and new size so that the object can be updated appropriately.  (we
106  * avoid synchronously deleting truncated objects because it's slow.)
107  *
108  * if @do_sync, include a 'startsync' command so that the osd will flush
109  * data quickly.
110  */
111 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
112                                                struct ceph_file_layout *layout,
113                                                struct ceph_vino vino,
114                                                u64 off, u64 *plen,
115                                                int opcode, int flags,
116                                                struct ceph_snap_context *snapc,
117                                                int do_sync,
118                                                u32 truncate_seq,
119                                                u64 truncate_size,
120                                                struct timespec *mtime,
121                                                bool use_mempool, int num_reply)
122 {
123         struct ceph_osd_request *req;
124         struct ceph_msg *msg;
125         struct ceph_osd_request_head *head;
126         struct ceph_osd_op *op;
127         void *p;
128         int do_trunc = truncate_seq && (off + *plen > truncate_size);
129         int num_op = 1 + do_sync + do_trunc;
130         size_t msg_size = sizeof(*head) + num_op*sizeof(*op);
131         int err, i;
132         u64 prevofs;
133
134         if (use_mempool) {
135                 req = mempool_alloc(osdc->req_mempool, GFP_NOFS);
136                 memset(req, 0, sizeof(*req));
137         } else {
138                 req = kzalloc(sizeof(*req), GFP_NOFS);
139         }
140         if (req == NULL)
141                 return ERR_PTR(-ENOMEM);
142
143         err = ceph_msgpool_resv(&osdc->msgpool_op_reply, num_reply);
144         if (err) {
145                 ceph_osdc_put_request(req);
146                 return ERR_PTR(-ENOMEM);
147         }
148         req->r_num_prealloc_reply = num_reply;
149
150         req->r_osdc = osdc;
151         req->r_mempool = use_mempool;
152         kref_init(&req->r_kref);
153         init_completion(&req->r_completion);
154         init_completion(&req->r_safe_completion);
155         INIT_LIST_HEAD(&req->r_unsafe_item);
156         req->r_flags = flags;
157
158         WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
159
160         /* create message; allow space for oid */
161         msg_size += 40;
162         if (snapc)
163                 msg_size += sizeof(u64) * snapc->num_snaps;
164         if (use_mempool)
165                 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
166         else
167                 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, 0, 0, NULL);
168         if (IS_ERR(msg)) {
169                 ceph_msgpool_resv(&osdc->msgpool_op_reply, -num_reply);
170                 ceph_osdc_put_request(req);
171                 return ERR_PTR(PTR_ERR(msg));
172         }
173         msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP);
174         memset(msg->front.iov_base, 0, msg->front.iov_len);
175         head = msg->front.iov_base;
176         op = (void *)(head + 1);
177         p = (void *)(op + num_op);
178
179         req->r_request = msg;
180         req->r_snapc = ceph_get_snap_context(snapc);
181
182         head->client_inc = cpu_to_le32(1); /* always, for now. */
183         head->flags = cpu_to_le32(flags);
184         if (flags & CEPH_OSD_FLAG_WRITE)
185                 ceph_encode_timespec(&head->mtime, mtime);
186         head->num_ops = cpu_to_le16(num_op);
187         op->op = cpu_to_le16(opcode);
188
189         /* calculate max write size */
190         calc_layout(osdc, vino, layout, off, plen, req);
191         req->r_file_layout = *layout;  /* keep a copy */
192
193         if (flags & CEPH_OSD_FLAG_WRITE) {
194                 req->r_request->hdr.data_off = cpu_to_le16(off);
195                 req->r_request->hdr.data_len = cpu_to_le32(*plen);
196                 op->payload_len = cpu_to_le32(*plen);
197         }
198
199         /* fill in oid */
200         head->object_len = cpu_to_le32(req->r_oid_len);
201         memcpy(p, req->r_oid, req->r_oid_len);
202         p += req->r_oid_len;
203
204         /* additional ops */
205         if (do_trunc) {
206                 op++;
207                 op->op = cpu_to_le16(opcode == CEPH_OSD_OP_READ ?
208                              CEPH_OSD_OP_MASKTRUNC : CEPH_OSD_OP_SETTRUNC);
209                 op->trunc.truncate_seq = cpu_to_le32(truncate_seq);
210                 prevofs = le64_to_cpu((op-1)->extent.offset);
211                 op->trunc.truncate_size = cpu_to_le64(truncate_size -
212                                                       (off-prevofs));
213         }
214         if (do_sync) {
215                 op++;
216                 op->op = cpu_to_le16(CEPH_OSD_OP_STARTSYNC);
217         }
218         if (snapc) {
219                 head->snap_seq = cpu_to_le64(snapc->seq);
220                 head->num_snaps = cpu_to_le32(snapc->num_snaps);
221                 for (i = 0; i < snapc->num_snaps; i++) {
222                         put_unaligned_le64(snapc->snaps[i], p);
223                         p += sizeof(u64);
224                 }
225         }
226
227         BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
228         return req;
229 }
230
231 /*
232  * We keep osd requests in an rbtree, sorted by ->r_tid.
233  */
234 static void __insert_request(struct ceph_osd_client *osdc,
235                              struct ceph_osd_request *new)
236 {
237         struct rb_node **p = &osdc->requests.rb_node;
238         struct rb_node *parent = NULL;
239         struct ceph_osd_request *req = NULL;
240
241         while (*p) {
242                 parent = *p;
243                 req = rb_entry(parent, struct ceph_osd_request, r_node);
244                 if (new->r_tid < req->r_tid)
245                         p = &(*p)->rb_left;
246                 else if (new->r_tid > req->r_tid)
247                         p = &(*p)->rb_right;
248                 else
249                         BUG();
250         }
251
252         rb_link_node(&new->r_node, parent, p);
253         rb_insert_color(&new->r_node, &osdc->requests);
254 }
255
256 static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc,
257                                                  u64 tid)
258 {
259         struct ceph_osd_request *req;
260         struct rb_node *n = osdc->requests.rb_node;
261
262         while (n) {
263                 req = rb_entry(n, struct ceph_osd_request, r_node);
264                 if (tid < req->r_tid)
265                         n = n->rb_left;
266                 else if (tid > req->r_tid)
267                         n = n->rb_right;
268                 else
269                         return req;
270         }
271         return NULL;
272 }
273
274 static struct ceph_osd_request *
275 __lookup_request_ge(struct ceph_osd_client *osdc,
276                     u64 tid)
277 {
278         struct ceph_osd_request *req;
279         struct rb_node *n = osdc->requests.rb_node;
280
281         while (n) {
282                 req = rb_entry(n, struct ceph_osd_request, r_node);
283                 if (tid < req->r_tid) {
284                         if (!n->rb_left)
285                                 return req;
286                         n = n->rb_left;
287                 } else if (tid > req->r_tid) {
288                         n = n->rb_right;
289                 } else {
290                         return req;
291                 }
292         }
293         return NULL;
294 }
295
296
297 /*
298  * If the osd connection drops, we need to resubmit all requests.
299  */
300 static void osd_reset(struct ceph_connection *con)
301 {
302         struct ceph_osd *osd = con->private;
303         struct ceph_osd_client *osdc;
304
305         if (!osd)
306                 return;
307         dout("osd_reset osd%d\n", osd->o_osd);
308         osdc = osd->o_osdc;
309         osd->o_incarnation++;
310         down_read(&osdc->map_sem);
311         kick_requests(osdc, osd);
312         up_read(&osdc->map_sem);
313 }
314
315 /*
316  * Track open sessions with osds.
317  */
318 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc)
319 {
320         struct ceph_osd *osd;
321
322         osd = kzalloc(sizeof(*osd), GFP_NOFS);
323         if (!osd)
324                 return NULL;
325
326         atomic_set(&osd->o_ref, 1);
327         osd->o_osdc = osdc;
328         INIT_LIST_HEAD(&osd->o_requests);
329         osd->o_incarnation = 1;
330
331         ceph_con_init(osdc->client->msgr, &osd->o_con);
332         osd->o_con.private = osd;
333         osd->o_con.ops = &osd_con_ops;
334         osd->o_con.peer_name.type = CEPH_ENTITY_TYPE_OSD;
335
336         return osd;
337 }
338
339 static struct ceph_osd *get_osd(struct ceph_osd *osd)
340 {
341         if (atomic_inc_not_zero(&osd->o_ref)) {
342                 dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
343                      atomic_read(&osd->o_ref));
344                 return osd;
345         } else {
346                 dout("get_osd %p FAIL\n", osd);
347                 return NULL;
348         }
349 }
350
351 static void put_osd(struct ceph_osd *osd)
352 {
353         dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
354              atomic_read(&osd->o_ref) - 1);
355         if (atomic_dec_and_test(&osd->o_ref))
356                 kfree(osd);
357 }
358
359 /*
360  * remove an osd from our map
361  */
362 static void remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
363 {
364         dout("remove_osd %p\n", osd);
365         BUG_ON(!list_empty(&osd->o_requests));
366         rb_erase(&osd->o_node, &osdc->osds);
367         ceph_con_close(&osd->o_con);
368         put_osd(osd);
369 }
370
371 /*
372  * reset osd connect
373  */
374 static int reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
375 {
376         int ret = 0;
377
378         dout("reset_osd %p osd%d\n", osd, osd->o_osd);
379         if (list_empty(&osd->o_requests)) {
380                 remove_osd(osdc, osd);
381         } else {
382                 ceph_con_close(&osd->o_con);
383                 ceph_con_open(&osd->o_con, &osdc->osdmap->osd_addr[osd->o_osd]);
384                 osd->o_incarnation++;
385         }
386         return ret;
387 }
388
389 static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
390 {
391         struct rb_node **p = &osdc->osds.rb_node;
392         struct rb_node *parent = NULL;
393         struct ceph_osd *osd = NULL;
394
395         while (*p) {
396                 parent = *p;
397                 osd = rb_entry(parent, struct ceph_osd, o_node);
398                 if (new->o_osd < osd->o_osd)
399                         p = &(*p)->rb_left;
400                 else if (new->o_osd > osd->o_osd)
401                         p = &(*p)->rb_right;
402                 else
403                         BUG();
404         }
405
406         rb_link_node(&new->o_node, parent, p);
407         rb_insert_color(&new->o_node, &osdc->osds);
408 }
409
410 static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
411 {
412         struct ceph_osd *osd;
413         struct rb_node *n = osdc->osds.rb_node;
414
415         while (n) {
416                 osd = rb_entry(n, struct ceph_osd, o_node);
417                 if (o < osd->o_osd)
418                         n = n->rb_left;
419                 else if (o > osd->o_osd)
420                         n = n->rb_right;
421                 else
422                         return osd;
423         }
424         return NULL;
425 }
426
427
428 /*
429  * Register request, assign tid.  If this is the first request, set up
430  * the timeout event.
431  */
432 static void register_request(struct ceph_osd_client *osdc,
433                              struct ceph_osd_request *req)
434 {
435         struct ceph_osd_request_head *head = req->r_request->front.iov_base;
436
437         mutex_lock(&osdc->request_mutex);
438         req->r_tid = ++osdc->last_tid;
439         head->tid = cpu_to_le64(req->r_tid);
440
441         dout("register_request %p tid %lld\n", req, req->r_tid);
442         __insert_request(osdc, req);
443         ceph_osdc_get_request(req);
444         osdc->num_requests++;
445
446         req->r_timeout_stamp =
447                 jiffies + osdc->client->mount_args->osd_timeout*HZ;
448
449         if (osdc->num_requests == 1) {
450                 osdc->timeout_tid = req->r_tid;
451                 dout("  timeout on tid %llu at %lu\n", req->r_tid,
452                      req->r_timeout_stamp);
453                 schedule_delayed_work(&osdc->timeout_work,
454                       round_jiffies_relative(req->r_timeout_stamp - jiffies));
455         }
456         mutex_unlock(&osdc->request_mutex);
457 }
458
459 /*
460  * called under osdc->request_mutex
461  */
462 static void __unregister_request(struct ceph_osd_client *osdc,
463                                  struct ceph_osd_request *req)
464 {
465         dout("__unregister_request %p tid %lld\n", req, req->r_tid);
466         rb_erase(&req->r_node, &osdc->requests);
467         osdc->num_requests--;
468
469         ceph_msgpool_resv(&osdc->msgpool_op_reply, -req->r_num_prealloc_reply);
470
471         if (req->r_osd) {
472                 /* make sure the original request isn't in flight. */
473                 ceph_con_revoke(&req->r_osd->o_con, req->r_request);
474
475                 list_del_init(&req->r_osd_item);
476                 if (list_empty(&req->r_osd->o_requests))
477                         remove_osd(osdc, req->r_osd);
478                 req->r_osd = NULL;
479         }
480
481         ceph_osdc_put_request(req);
482
483         if (req->r_tid == osdc->timeout_tid) {
484                 if (osdc->num_requests == 0) {
485                         dout("no requests, canceling timeout\n");
486                         osdc->timeout_tid = 0;
487                         cancel_delayed_work(&osdc->timeout_work);
488                 } else {
489                         req = rb_entry(rb_first(&osdc->requests),
490                                        struct ceph_osd_request, r_node);
491                         osdc->timeout_tid = req->r_tid;
492                         dout("rescheduled timeout on tid %llu at %lu\n",
493                              req->r_tid, req->r_timeout_stamp);
494                         schedule_delayed_work(&osdc->timeout_work,
495                               round_jiffies_relative(req->r_timeout_stamp -
496                                                      jiffies));
497                 }
498         }
499 }
500
501 /*
502  * Cancel a previously queued request message
503  */
504 static void __cancel_request(struct ceph_osd_request *req)
505 {
506         if (req->r_sent) {
507                 ceph_con_revoke(&req->r_osd->o_con, req->r_request);
508                 req->r_sent = 0;
509         }
510 }
511
512 /*
513  * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
514  * (as needed), and set the request r_osd appropriately.  If there is
515  * no up osd, set r_osd to NULL.
516  *
517  * Return 0 if unchanged, 1 if changed, or negative on error.
518  *
519  * Caller should hold map_sem for read and request_mutex.
520  */
521 static int __map_osds(struct ceph_osd_client *osdc,
522                       struct ceph_osd_request *req)
523 {
524         struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
525         struct ceph_pg pgid;
526         int o = -1;
527         int err;
528         struct ceph_osd *newosd = NULL;
529
530         dout("map_osds %p tid %lld\n", req, req->r_tid);
531         err = ceph_calc_object_layout(&reqhead->layout, req->r_oid,
532                                       &req->r_file_layout, osdc->osdmap);
533         if (err)
534                 return err;
535         pgid = reqhead->layout.ol_pgid;
536         o = ceph_calc_pg_primary(osdc->osdmap, pgid);
537
538         if ((req->r_osd && req->r_osd->o_osd == o &&
539              req->r_sent >= req->r_osd->o_incarnation) ||
540             (req->r_osd == NULL && o == -1))
541                 return 0;  /* no change */
542
543         dout("map_osds tid %llu pgid %d.%x osd%d (was osd%d)\n",
544              req->r_tid, le32_to_cpu(pgid.pool), le16_to_cpu(pgid.ps), o,
545              req->r_osd ? req->r_osd->o_osd : -1);
546
547         if (req->r_osd) {
548                 __cancel_request(req);
549                 list_del_init(&req->r_osd_item);
550                 if (list_empty(&req->r_osd->o_requests)) {
551                         /* try to re-use r_osd if possible */
552                         newosd = get_osd(req->r_osd);
553                         remove_osd(osdc, newosd);
554                 }
555                 req->r_osd = NULL;
556         }
557
558         req->r_osd = __lookup_osd(osdc, o);
559         if (!req->r_osd && o >= 0) {
560                 if (newosd) {
561                         req->r_osd = newosd;
562                         newosd = NULL;
563                 } else {
564                         err = -ENOMEM;
565                         req->r_osd = create_osd(osdc);
566                         if (!req->r_osd)
567                                 goto out;
568                 }
569
570                 dout("map_osds osd %p is osd%d\n", req->r_osd, o);
571                 req->r_osd->o_osd = o;
572                 req->r_osd->o_con.peer_name.num = cpu_to_le64(o);
573                 __insert_osd(osdc, req->r_osd);
574
575                 ceph_con_open(&req->r_osd->o_con, &osdc->osdmap->osd_addr[o]);
576         }
577
578         if (req->r_osd)
579                 list_add(&req->r_osd_item, &req->r_osd->o_requests);
580         err = 1;   /* osd changed */
581
582 out:
583         if (newosd)
584                 put_osd(newosd);
585         return err;
586 }
587
588 /*
589  * caller should hold map_sem (for read) and request_mutex
590  */
591 static int __send_request(struct ceph_osd_client *osdc,
592                           struct ceph_osd_request *req)
593 {
594         struct ceph_osd_request_head *reqhead;
595         int err;
596
597         err = __map_osds(osdc, req);
598         if (err < 0)
599                 return err;
600         if (req->r_osd == NULL) {
601                 dout("send_request %p no up osds in pg\n", req);
602                 ceph_monc_request_next_osdmap(&osdc->client->monc);
603                 return 0;
604         }
605
606         dout("send_request %p tid %llu to osd%d flags %d\n",
607              req, req->r_tid, req->r_osd->o_osd, req->r_flags);
608
609         reqhead = req->r_request->front.iov_base;
610         reqhead->osdmap_epoch = cpu_to_le32(osdc->osdmap->epoch);
611         reqhead->flags |= cpu_to_le32(req->r_flags);  /* e.g., RETRY */
612         reqhead->reassert_version = req->r_reassert_version;
613
614         req->r_timeout_stamp = jiffies+osdc->client->mount_args->osd_timeout*HZ;
615
616         ceph_msg_get(req->r_request); /* send consumes a ref */
617         ceph_con_send(&req->r_osd->o_con, req->r_request);
618         req->r_sent = req->r_osd->o_incarnation;
619         return 0;
620 }
621
622 /*
623  * Timeout callback, called every N seconds when 1 or more osd
624  * requests has been active for more than N seconds.  When this
625  * happens, we ping all OSDs with requests who have timed out to
626  * ensure any communications channel reset is detected.  Reset the
627  * request timeouts another N seconds in the future as we go.
628  * Reschedule the timeout event another N seconds in future (unless
629  * there are no open requests).
630  */
631 static void handle_timeout(struct work_struct *work)
632 {
633         struct ceph_osd_client *osdc =
634                 container_of(work, struct ceph_osd_client, timeout_work.work);
635         struct ceph_osd_request *req;
636         struct ceph_osd *osd;
637         unsigned long timeout = osdc->client->mount_args->osd_timeout * HZ;
638         unsigned long next_timeout = timeout + jiffies;
639         struct rb_node *p;
640
641         dout("timeout\n");
642         down_read(&osdc->map_sem);
643
644         ceph_monc_request_next_osdmap(&osdc->client->monc);
645
646         mutex_lock(&osdc->request_mutex);
647         for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
648                 req = rb_entry(p, struct ceph_osd_request, r_node);
649
650                 if (req->r_resend) {
651                         int err;
652
653                         dout("osdc resending prev failed %lld\n", req->r_tid);
654                         err = __send_request(osdc, req);
655                         if (err)
656                                 dout("osdc failed again on %lld\n", req->r_tid);
657                         else
658                                 req->r_resend = false;
659                         continue;
660                 }
661         }
662         for (p = rb_first(&osdc->osds); p; p = rb_next(p)) {
663                 osd = rb_entry(p, struct ceph_osd, o_node);
664                 if (list_empty(&osd->o_requests))
665                         continue;
666                 req = list_first_entry(&osd->o_requests,
667                                        struct ceph_osd_request, r_osd_item);
668                 if (time_before(jiffies, req->r_timeout_stamp))
669                         continue;
670
671                 dout(" tid %llu (at least) timed out on osd%d\n",
672                      req->r_tid, osd->o_osd);
673                 req->r_timeout_stamp = next_timeout;
674                 ceph_con_keepalive(&osd->o_con);
675         }
676
677         if (osdc->timeout_tid)
678                 schedule_delayed_work(&osdc->timeout_work,
679                                       round_jiffies_relative(timeout));
680
681         mutex_unlock(&osdc->request_mutex);
682
683         up_read(&osdc->map_sem);
684 }
685
686 /*
687  * handle osd op reply.  either call the callback if it is specified,
688  * or do the completion to wake up the waiting thread.
689  */
690 static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg)
691 {
692         struct ceph_osd_reply_head *rhead = msg->front.iov_base;
693         struct ceph_osd_request *req;
694         u64 tid;
695         int numops, object_len, flags;
696
697         if (msg->front.iov_len < sizeof(*rhead))
698                 goto bad;
699         tid = le64_to_cpu(rhead->tid);
700         numops = le32_to_cpu(rhead->num_ops);
701         object_len = le32_to_cpu(rhead->object_len);
702         if (msg->front.iov_len != sizeof(*rhead) + object_len +
703             numops * sizeof(struct ceph_osd_op))
704                 goto bad;
705         dout("handle_reply %p tid %llu\n", msg, tid);
706
707         /* lookup */
708         mutex_lock(&osdc->request_mutex);
709         req = __lookup_request(osdc, tid);
710         if (req == NULL) {
711                 dout("handle_reply tid %llu dne\n", tid);
712                 mutex_unlock(&osdc->request_mutex);
713                 return;
714         }
715         ceph_osdc_get_request(req);
716         flags = le32_to_cpu(rhead->flags);
717
718         if (req->r_reply) {
719                 /*
720                  * once we see the message has been received, we don't
721                  * need a ref (which is only needed for revoking
722                  * pages)
723                  */
724                 ceph_msg_put(req->r_reply);
725                 req->r_reply = NULL;
726         }
727
728         if (!req->r_got_reply) {
729                 unsigned bytes;
730
731                 req->r_result = le32_to_cpu(rhead->result);
732                 bytes = le32_to_cpu(msg->hdr.data_len);
733                 dout("handle_reply result %d bytes %d\n", req->r_result,
734                      bytes);
735                 if (req->r_result == 0)
736                         req->r_result = bytes;
737
738                 /* in case this is a write and we need to replay, */
739                 req->r_reassert_version = rhead->reassert_version;
740
741                 req->r_got_reply = 1;
742         } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
743                 dout("handle_reply tid %llu dup ack\n", tid);
744                 mutex_unlock(&osdc->request_mutex);
745                 goto done;
746         }
747
748         dout("handle_reply tid %llu flags %d\n", tid, flags);
749
750         /* either this is a read, or we got the safe response */
751         if ((flags & CEPH_OSD_FLAG_ONDISK) ||
752             ((flags & CEPH_OSD_FLAG_WRITE) == 0))
753                 __unregister_request(osdc, req);
754
755         mutex_unlock(&osdc->request_mutex);
756
757         if (req->r_callback)
758                 req->r_callback(req, msg);
759         else
760                 complete(&req->r_completion);
761
762         if (flags & CEPH_OSD_FLAG_ONDISK) {
763                 if (req->r_safe_callback)
764                         req->r_safe_callback(req, msg);
765                 complete(&req->r_safe_completion);  /* fsync waiter */
766         }
767
768 done:
769         ceph_osdc_put_request(req);
770         return;
771
772 bad:
773         pr_err("corrupt osd_op_reply got %d %d expected %d\n",
774                (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len),
775                (int)sizeof(*rhead));
776         ceph_msg_dump(msg);
777 }
778
779
780 /*
781  * Resubmit osd requests whose osd or osd address has changed.  Request
782  * a new osd map if osds are down, or we are otherwise unable to determine
783  * how to direct a request.
784  *
785  * Close connections to down osds.
786  *
787  * If @who is specified, resubmit requests for that specific osd.
788  *
789  * Caller should hold map_sem for read and request_mutex.
790  */
791 static void kick_requests(struct ceph_osd_client *osdc,
792                           struct ceph_osd *kickosd)
793 {
794         struct ceph_osd_request *req;
795         struct rb_node *p, *n;
796         int needmap = 0;
797         int err;
798
799         dout("kick_requests osd%d\n", kickosd ? kickosd->o_osd : -1);
800         mutex_lock(&osdc->request_mutex);
801         if (!kickosd) {
802                 for (p = rb_first(&osdc->osds); p; p = n) {
803                         struct ceph_osd *osd =
804                                 rb_entry(p, struct ceph_osd, o_node);
805
806                         n = rb_next(p);
807                         if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
808                             !ceph_entity_addr_equal(&osd->o_con.peer_addr,
809                                             ceph_osd_addr(osdc->osdmap,
810                                                           osd->o_osd)))
811                                 reset_osd(osdc, osd);
812                 }
813         }
814
815         for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
816                 req = rb_entry(p, struct ceph_osd_request, r_node);
817
818                 if (req->r_resend) {
819                         dout(" r_resend set on tid %llu\n", req->r_tid);
820                         __cancel_request(req);
821                         goto kick;
822                 }
823                 if (req->r_osd && kickosd == req->r_osd) {
824                         __cancel_request(req);
825                         goto kick;
826                 }
827
828                 err = __map_osds(osdc, req);
829                 if (err == 0)
830                         continue;  /* no change */
831                 if (err < 0) {
832                         /*
833                          * FIXME: really, we should set the request
834                          * error and fail if this isn't a 'nofail'
835                          * request, but that's a fair bit more
836                          * complicated to do.  So retry!
837                          */
838                         dout(" setting r_resend on %llu\n", req->r_tid);
839                         req->r_resend = true;
840                         continue;
841                 }
842                 if (req->r_osd == NULL) {
843                         dout("tid %llu maps to no valid osd\n", req->r_tid);
844                         needmap++;  /* request a newer map */
845                         continue;
846                 }
847
848 kick:
849                 dout("kicking %p tid %llu osd%d\n", req, req->r_tid,
850                      req->r_osd->o_osd);
851                 req->r_flags |= CEPH_OSD_FLAG_RETRY;
852                 err = __send_request(osdc, req);
853                 if (err) {
854                         dout(" setting r_resend on %llu\n", req->r_tid);
855                         req->r_resend = true;
856                 }
857         }
858         mutex_unlock(&osdc->request_mutex);
859
860         if (needmap) {
861                 dout("%d requests for down osds, need new map\n", needmap);
862                 ceph_monc_request_next_osdmap(&osdc->client->monc);
863         }
864 }
865
866 /*
867  * Process updated osd map.
868  *
869  * The message contains any number of incremental and full maps, normally
870  * indicating some sort of topology change in the cluster.  Kick requests
871  * off to different OSDs as needed.
872  */
873 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
874 {
875         void *p, *end, *next;
876         u32 nr_maps, maplen;
877         u32 epoch;
878         struct ceph_osdmap *newmap = NULL, *oldmap;
879         int err;
880         struct ceph_fsid fsid;
881
882         dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
883         p = msg->front.iov_base;
884         end = p + msg->front.iov_len;
885
886         /* verify fsid */
887         ceph_decode_need(&p, end, sizeof(fsid), bad);
888         ceph_decode_copy(&p, &fsid, sizeof(fsid));
889         if (ceph_check_fsid(osdc->client, &fsid) < 0)
890                 return;
891
892         down_write(&osdc->map_sem);
893
894         /* incremental maps */
895         ceph_decode_32_safe(&p, end, nr_maps, bad);
896         dout(" %d inc maps\n", nr_maps);
897         while (nr_maps > 0) {
898                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
899                 epoch = ceph_decode_32(&p);
900                 maplen = ceph_decode_32(&p);
901                 ceph_decode_need(&p, end, maplen, bad);
902                 next = p + maplen;
903                 if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) {
904                         dout("applying incremental map %u len %d\n",
905                              epoch, maplen);
906                         newmap = osdmap_apply_incremental(&p, next,
907                                                           osdc->osdmap,
908                                                           osdc->client->msgr);
909                         if (IS_ERR(newmap)) {
910                                 err = PTR_ERR(newmap);
911                                 goto bad;
912                         }
913                         BUG_ON(!newmap);
914                         if (newmap != osdc->osdmap) {
915                                 ceph_osdmap_destroy(osdc->osdmap);
916                                 osdc->osdmap = newmap;
917                         }
918                 } else {
919                         dout("ignoring incremental map %u len %d\n",
920                              epoch, maplen);
921                 }
922                 p = next;
923                 nr_maps--;
924         }
925         if (newmap)
926                 goto done;
927
928         /* full maps */
929         ceph_decode_32_safe(&p, end, nr_maps, bad);
930         dout(" %d full maps\n", nr_maps);
931         while (nr_maps) {
932                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
933                 epoch = ceph_decode_32(&p);
934                 maplen = ceph_decode_32(&p);
935                 ceph_decode_need(&p, end, maplen, bad);
936                 if (nr_maps > 1) {
937                         dout("skipping non-latest full map %u len %d\n",
938                              epoch, maplen);
939                 } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) {
940                         dout("skipping full map %u len %d, "
941                              "older than our %u\n", epoch, maplen,
942                              osdc->osdmap->epoch);
943                 } else {
944                         dout("taking full map %u len %d\n", epoch, maplen);
945                         newmap = osdmap_decode(&p, p+maplen);
946                         if (IS_ERR(newmap)) {
947                                 err = PTR_ERR(newmap);
948                                 goto bad;
949                         }
950                         BUG_ON(!newmap);
951                         oldmap = osdc->osdmap;
952                         osdc->osdmap = newmap;
953                         if (oldmap)
954                                 ceph_osdmap_destroy(oldmap);
955                 }
956                 p += maplen;
957                 nr_maps--;
958         }
959
960 done:
961         downgrade_write(&osdc->map_sem);
962         ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
963         if (newmap)
964                 kick_requests(osdc, NULL);
965         up_read(&osdc->map_sem);
966         return;
967
968 bad:
969         pr_err("osdc handle_map corrupt msg\n");
970         ceph_msg_dump(msg);
971         up_write(&osdc->map_sem);
972         return;
973 }
974
975
976 /*
977  * A read request prepares specific pages that data is to be read into.
978  * When a message is being read off the wire, we call prepare_pages to
979  * find those pages.
980  *  0 = success, -1 failure.
981  */
982 static int prepare_pages(struct ceph_connection *con, struct ceph_msg *m,
983                          int want)
984 {
985         struct ceph_osd *osd = con->private;
986         struct ceph_osd_client *osdc;
987         struct ceph_osd_reply_head *rhead = m->front.iov_base;
988         struct ceph_osd_request *req;
989         u64 tid;
990         int ret = -1;
991         int type = le16_to_cpu(m->hdr.type);
992
993         if (!osd)
994                 return -1;
995         osdc = osd->o_osdc;
996
997         dout("prepare_pages on msg %p want %d\n", m, want);
998         if (unlikely(type != CEPH_MSG_OSD_OPREPLY))
999                 return -1;  /* hmm! */
1000
1001         tid = le64_to_cpu(rhead->tid);
1002         mutex_lock(&osdc->request_mutex);
1003         req = __lookup_request(osdc, tid);
1004         if (!req) {
1005                 dout("prepare_pages unknown tid %llu\n", tid);
1006                 goto out;
1007         }
1008         dout("prepare_pages tid %llu has %d pages, want %d\n",
1009              tid, req->r_num_pages, want);
1010         if (likely(req->r_num_pages >= want && !req->r_prepared_pages)) {
1011                 m->pages = req->r_pages;
1012                 m->nr_pages = req->r_num_pages;
1013                 req->r_reply = m;  /* only for duration of read over socket */
1014                 ceph_msg_get(m);
1015                 req->r_prepared_pages = 1;
1016                 ret = 0; /* success */
1017         }
1018 out:
1019         mutex_unlock(&osdc->request_mutex);
1020         return ret;
1021 }
1022
1023 /*
1024  * Register request, send initial attempt.
1025  */
1026 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
1027                             struct ceph_osd_request *req,
1028                             bool nofail)
1029 {
1030         int rc = 0;
1031
1032         req->r_request->pages = req->r_pages;
1033         req->r_request->nr_pages = req->r_num_pages;
1034
1035         register_request(osdc, req);
1036
1037         down_read(&osdc->map_sem);
1038         mutex_lock(&osdc->request_mutex);
1039         /*
1040          * a racing kick_requests() may have sent the message for us
1041          * while we dropped request_mutex above, so only send now if
1042          * the request still han't been touched yet.
1043          */
1044         if (req->r_sent == 0) {
1045                 rc = __send_request(osdc, req);
1046                 if (rc) {
1047                         if (nofail) {
1048                                 dout("osdc_start_request failed send, "
1049                                      " marking %lld\n", req->r_tid);
1050                                 req->r_resend = true;
1051                                 rc = 0;
1052                         } else {
1053                                 __unregister_request(osdc, req);
1054                         }
1055                 }
1056         }
1057         mutex_unlock(&osdc->request_mutex);
1058         up_read(&osdc->map_sem);
1059         return rc;
1060 }
1061
1062 /*
1063  * wait for a request to complete
1064  */
1065 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
1066                            struct ceph_osd_request *req)
1067 {
1068         int rc;
1069
1070         rc = wait_for_completion_interruptible(&req->r_completion);
1071         if (rc < 0) {
1072                 mutex_lock(&osdc->request_mutex);
1073                 __cancel_request(req);
1074                 __unregister_request(osdc, req);
1075                 mutex_unlock(&osdc->request_mutex);
1076                 dout("wait_request tid %llu canceled/timed out\n", req->r_tid);
1077                 return rc;
1078         }
1079
1080         dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result);
1081         return req->r_result;
1082 }
1083
1084 /*
1085  * sync - wait for all in-flight requests to flush.  avoid starvation.
1086  */
1087 void ceph_osdc_sync(struct ceph_osd_client *osdc)
1088 {
1089         struct ceph_osd_request *req;
1090         u64 last_tid, next_tid = 0;
1091
1092         mutex_lock(&osdc->request_mutex);
1093         last_tid = osdc->last_tid;
1094         while (1) {
1095                 req = __lookup_request_ge(osdc, next_tid);
1096                 if (!req)
1097                         break;
1098                 if (req->r_tid > last_tid)
1099                         break;
1100
1101                 next_tid = req->r_tid + 1;
1102                 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0)
1103                         continue;
1104
1105                 ceph_osdc_get_request(req);
1106                 mutex_unlock(&osdc->request_mutex);
1107                 dout("sync waiting on tid %llu (last is %llu)\n",
1108                      req->r_tid, last_tid);
1109                 wait_for_completion(&req->r_safe_completion);
1110                 mutex_lock(&osdc->request_mutex);
1111                 ceph_osdc_put_request(req);
1112         }
1113         mutex_unlock(&osdc->request_mutex);
1114         dout("sync done (thru tid %llu)\n", last_tid);
1115 }
1116
1117 /*
1118  * init, shutdown
1119  */
1120 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
1121 {
1122         int err;
1123
1124         dout("init\n");
1125         osdc->client = client;
1126         osdc->osdmap = NULL;
1127         init_rwsem(&osdc->map_sem);
1128         init_completion(&osdc->map_waiters);
1129         osdc->last_requested_map = 0;
1130         mutex_init(&osdc->request_mutex);
1131         osdc->timeout_tid = 0;
1132         osdc->last_tid = 0;
1133         osdc->osds = RB_ROOT;
1134         osdc->requests = RB_ROOT;
1135         osdc->num_requests = 0;
1136         INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
1137
1138         err = -ENOMEM;
1139         osdc->req_mempool = mempool_create_kmalloc_pool(10,
1140                                         sizeof(struct ceph_osd_request));
1141         if (!osdc->req_mempool)
1142                 goto out;
1143
1144         err = ceph_msgpool_init(&osdc->msgpool_op, 4096, 10, true);
1145         if (err < 0)
1146                 goto out_mempool;
1147         err = ceph_msgpool_init(&osdc->msgpool_op_reply, 512, 0, false);
1148         if (err < 0)
1149                 goto out_msgpool;
1150         return 0;
1151
1152 out_msgpool:
1153         ceph_msgpool_destroy(&osdc->msgpool_op);
1154 out_mempool:
1155         mempool_destroy(osdc->req_mempool);
1156 out:
1157         return err;
1158 }
1159
1160 void ceph_osdc_stop(struct ceph_osd_client *osdc)
1161 {
1162         cancel_delayed_work_sync(&osdc->timeout_work);
1163         if (osdc->osdmap) {
1164                 ceph_osdmap_destroy(osdc->osdmap);
1165                 osdc->osdmap = NULL;
1166         }
1167         mempool_destroy(osdc->req_mempool);
1168         ceph_msgpool_destroy(&osdc->msgpool_op);
1169         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
1170 }
1171
1172 /*
1173  * Read some contiguous pages.  If we cross a stripe boundary, shorten
1174  * *plen.  Return number of bytes read, or error.
1175  */
1176 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
1177                         struct ceph_vino vino, struct ceph_file_layout *layout,
1178                         u64 off, u64 *plen,
1179                         u32 truncate_seq, u64 truncate_size,
1180                         struct page **pages, int num_pages)
1181 {
1182         struct ceph_osd_request *req;
1183         int rc = 0;
1184
1185         dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
1186              vino.snap, off, *plen);
1187         req = ceph_osdc_new_request(osdc, layout, vino, off, plen,
1188                                     CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
1189                                     NULL, 0, truncate_seq, truncate_size, NULL,
1190                                     false, 1);
1191         if (IS_ERR(req))
1192                 return PTR_ERR(req);
1193
1194         /* it may be a short read due to an object boundary */
1195         req->r_pages = pages;
1196         num_pages = calc_pages_for(off, *plen);
1197         req->r_num_pages = num_pages;
1198
1199         dout("readpages  final extent is %llu~%llu (%d pages)\n",
1200              off, *plen, req->r_num_pages);
1201
1202         rc = ceph_osdc_start_request(osdc, req, false);
1203         if (!rc)
1204                 rc = ceph_osdc_wait_request(osdc, req);
1205
1206         ceph_osdc_put_request(req);
1207         dout("readpages result %d\n", rc);
1208         return rc;
1209 }
1210
1211 /*
1212  * do a synchronous write on N pages
1213  */
1214 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
1215                          struct ceph_file_layout *layout,
1216                          struct ceph_snap_context *snapc,
1217                          u64 off, u64 len,
1218                          u32 truncate_seq, u64 truncate_size,
1219                          struct timespec *mtime,
1220                          struct page **pages, int num_pages,
1221                          int flags, int do_sync, bool nofail)
1222 {
1223         struct ceph_osd_request *req;
1224         int rc = 0;
1225
1226         BUG_ON(vino.snap != CEPH_NOSNAP);
1227         req = ceph_osdc_new_request(osdc, layout, vino, off, &len,
1228                                     CEPH_OSD_OP_WRITE,
1229                                     flags | CEPH_OSD_FLAG_ONDISK |
1230                                             CEPH_OSD_FLAG_WRITE,
1231                                     snapc, do_sync,
1232                                     truncate_seq, truncate_size, mtime,
1233                                     nofail, 1);
1234         if (IS_ERR(req))
1235                 return PTR_ERR(req);
1236
1237         /* it may be a short write due to an object boundary */
1238         req->r_pages = pages;
1239         req->r_num_pages = calc_pages_for(off, len);
1240         dout("writepages %llu~%llu (%d pages)\n", off, len,
1241              req->r_num_pages);
1242
1243         rc = ceph_osdc_start_request(osdc, req, nofail);
1244         if (!rc)
1245                 rc = ceph_osdc_wait_request(osdc, req);
1246
1247         ceph_osdc_put_request(req);
1248         if (rc == 0)
1249                 rc = len;
1250         dout("writepages result %d\n", rc);
1251         return rc;
1252 }
1253
1254 /*
1255  * handle incoming message
1256  */
1257 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
1258 {
1259         struct ceph_osd *osd = con->private;
1260         struct ceph_osd_client *osdc;
1261         int type = le16_to_cpu(msg->hdr.type);
1262
1263         if (!osd)
1264                 return;
1265         osdc = osd->o_osdc;
1266
1267         switch (type) {
1268         case CEPH_MSG_OSD_MAP:
1269                 ceph_osdc_handle_map(osdc, msg);
1270                 break;
1271         case CEPH_MSG_OSD_OPREPLY:
1272                 handle_reply(osdc, msg);
1273                 break;
1274
1275         default:
1276                 pr_err("received unknown message type %d %s\n", type,
1277                        ceph_msg_type_name(type));
1278         }
1279         ceph_msg_put(msg);
1280 }
1281
1282 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
1283                                   struct ceph_msg_header *hdr)
1284 {
1285         struct ceph_osd *osd = con->private;
1286         struct ceph_osd_client *osdc = osd->o_osdc;
1287         int type = le16_to_cpu(hdr->type);
1288         int front = le32_to_cpu(hdr->front_len);
1289
1290         switch (type) {
1291         case CEPH_MSG_OSD_OPREPLY:
1292                 return ceph_msgpool_get(&osdc->msgpool_op_reply, front);
1293         }
1294         return ceph_alloc_msg(con, hdr);
1295 }
1296
1297 /*
1298  * Wrappers to refcount containing ceph_osd struct
1299  */
1300 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
1301 {
1302         struct ceph_osd *osd = con->private;
1303         if (get_osd(osd))
1304                 return con;
1305         return NULL;
1306 }
1307
1308 static void put_osd_con(struct ceph_connection *con)
1309 {
1310         struct ceph_osd *osd = con->private;
1311         put_osd(osd);
1312 }
1313
1314 /*
1315  * authentication
1316  */
1317 static int get_authorizer(struct ceph_connection *con,
1318                           void **buf, int *len, int *proto,
1319                           void **reply_buf, int *reply_len, int force_new)
1320 {
1321         struct ceph_osd *o = con->private;
1322         struct ceph_osd_client *osdc = o->o_osdc;
1323         struct ceph_auth_client *ac = osdc->client->monc.auth;
1324         int ret = 0;
1325
1326         if (force_new && o->o_authorizer) {
1327                 ac->ops->destroy_authorizer(ac, o->o_authorizer);
1328                 o->o_authorizer = NULL;
1329         }
1330         if (o->o_authorizer == NULL) {
1331                 ret = ac->ops->create_authorizer(
1332                         ac, CEPH_ENTITY_TYPE_OSD,
1333                         &o->o_authorizer,
1334                         &o->o_authorizer_buf,
1335                         &o->o_authorizer_buf_len,
1336                         &o->o_authorizer_reply_buf,
1337                         &o->o_authorizer_reply_buf_len);
1338                 if (ret)
1339                 return ret;
1340         }
1341
1342         *proto = ac->protocol;
1343         *buf = o->o_authorizer_buf;
1344         *len = o->o_authorizer_buf_len;
1345         *reply_buf = o->o_authorizer_reply_buf;
1346         *reply_len = o->o_authorizer_reply_buf_len;
1347         return 0;
1348 }
1349
1350
1351 static int verify_authorizer_reply(struct ceph_connection *con, int len)
1352 {
1353         struct ceph_osd *o = con->private;
1354         struct ceph_osd_client *osdc = o->o_osdc;
1355         struct ceph_auth_client *ac = osdc->client->monc.auth;
1356
1357         return ac->ops->verify_authorizer_reply(ac, o->o_authorizer, len);
1358 }
1359
1360
1361 const static struct ceph_connection_operations osd_con_ops = {
1362         .get = get_osd_con,
1363         .put = put_osd_con,
1364         .dispatch = dispatch,
1365         .get_authorizer = get_authorizer,
1366         .verify_authorizer_reply = verify_authorizer_reply,
1367         .alloc_msg = alloc_msg,
1368         .fault = osd_reset,
1369         .alloc_middle = ceph_alloc_middle,
1370         .prepare_pages = prepare_pages,
1371 };