1 #include "ceph_debug.h"
4 #include <linux/highmem.h>
6 #include <linux/pagemap.h>
7 #include <linux/slab.h>
8 #include <linux/uaccess.h>
11 #include "osd_client.h"
12 #include "messenger.h"
15 const static struct ceph_connection_operations osd_con_ops;
17 static void kick_requests(struct ceph_osd_client *osdc, struct ceph_osd *osd);
20 * Implement client access to distributed object storage cluster.
22 * All data objects are stored within a cluster/cloud of OSDs, or
23 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
24 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
25 * remote daemons serving up and coordinating consistent and safe
28 * Cluster membership and the mapping of data objects onto storage devices
29 * are described by the osd map.
31 * We keep track of pending OSD requests (read, write), resubmit
32 * requests to different OSDs when the cluster topology/data layout
33 * change, or retry the affected requests when the communications
34 * channel with an OSD is reset.
38 * calculate the mapping of a file extent onto an object, and fill out the
39 * request accordingly. shorten extent as necessary if it crosses an
42 * fill osd op in request message.
44 static void calc_layout(struct ceph_osd_client *osdc,
45 struct ceph_vino vino, struct ceph_file_layout *layout,
47 struct ceph_osd_request *req)
49 struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
50 struct ceph_osd_op *op = (void *)(reqhead + 1);
52 u64 objoff, objlen; /* extent in object */
55 reqhead->snapid = cpu_to_le64(vino.snap);
58 ceph_calc_file_object_mapping(layout, off, plen, &bno,
61 dout(" skipping last %llu, final file extent %llu~%llu\n",
62 orig_len - *plen, off, *plen);
64 sprintf(req->r_oid, "%llx.%08llx", vino.ino, bno);
65 req->r_oid_len = strlen(req->r_oid);
67 op->extent.offset = cpu_to_le64(objoff);
68 op->extent.length = cpu_to_le64(objlen);
69 req->r_num_pages = calc_pages_for(off, *plen);
71 dout("calc_layout %s (%d) %llu~%llu (%d pages)\n",
72 req->r_oid, req->r_oid_len, objoff, objlen, req->r_num_pages);
79 void ceph_osdc_put_request(struct ceph_osd_request *req)
81 dout("osdc put_request %p %d -> %d\n", req, atomic_read(&req->r_ref),
82 atomic_read(&req->r_ref)-1);
83 BUG_ON(atomic_read(&req->r_ref) <= 0);
84 if (atomic_dec_and_test(&req->r_ref)) {
86 ceph_msg_put(req->r_request);
88 ceph_msg_put(req->r_reply);
90 ceph_release_page_vector(req->r_pages,
92 ceph_put_snap_context(req->r_snapc);
94 mempool_free(req, req->r_osdc->req_mempool);
101 * build new request AND message, calculate layout, and adjust file
104 * if the file was recently truncated, we include information about its
105 * old and new size so that the object can be updated appropriately. (we
106 * avoid synchronously deleting truncated objects because it's slow.)
108 * if @do_sync, include a 'startsync' command so that the osd will flush
111 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
112 struct ceph_file_layout *layout,
113 struct ceph_vino vino,
115 int opcode, int flags,
116 struct ceph_snap_context *snapc,
120 struct timespec *mtime,
121 bool use_mempool, int num_reply)
123 struct ceph_osd_request *req;
124 struct ceph_msg *msg;
125 struct ceph_osd_request_head *head;
126 struct ceph_osd_op *op;
128 int do_trunc = truncate_seq && (off + *plen > truncate_size);
129 int num_op = 1 + do_sync + do_trunc;
130 size_t msg_size = sizeof(*head) + num_op*sizeof(*op);
135 req = mempool_alloc(osdc->req_mempool, GFP_NOFS);
136 memset(req, 0, sizeof(*req));
138 req = kzalloc(sizeof(*req), GFP_NOFS);
141 return ERR_PTR(-ENOMEM);
143 err = ceph_msgpool_resv(&osdc->msgpool_op_reply, num_reply);
145 ceph_osdc_put_request(req);
146 return ERR_PTR(-ENOMEM);
150 req->r_mempool = use_mempool;
151 atomic_set(&req->r_ref, 1);
152 init_completion(&req->r_completion);
153 init_completion(&req->r_safe_completion);
154 INIT_LIST_HEAD(&req->r_unsafe_item);
155 req->r_flags = flags;
157 WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
159 /* create message; allow space for oid */
162 msg_size += sizeof(u64) * snapc->num_snaps;
164 msg = ceph_msgpool_get(&osdc->msgpool_op);
166 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, 0, 0, NULL);
168 ceph_msgpool_resv(&osdc->msgpool_op_reply, num_reply);
169 ceph_osdc_put_request(req);
170 return ERR_PTR(PTR_ERR(msg));
172 msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP);
173 memset(msg->front.iov_base, 0, msg->front.iov_len);
174 head = msg->front.iov_base;
175 op = (void *)(head + 1);
176 p = (void *)(op + num_op);
178 req->r_request = msg;
179 req->r_snapc = ceph_get_snap_context(snapc);
181 head->client_inc = cpu_to_le32(1); /* always, for now. */
182 head->flags = cpu_to_le32(flags);
183 if (flags & CEPH_OSD_FLAG_WRITE)
184 ceph_encode_timespec(&head->mtime, mtime);
185 head->num_ops = cpu_to_le16(num_op);
186 op->op = cpu_to_le16(opcode);
188 /* calculate max write size */
189 calc_layout(osdc, vino, layout, off, plen, req);
190 req->r_file_layout = *layout; /* keep a copy */
192 if (flags & CEPH_OSD_FLAG_WRITE) {
193 req->r_request->hdr.data_off = cpu_to_le16(off);
194 req->r_request->hdr.data_len = cpu_to_le32(*plen);
195 op->payload_len = cpu_to_le32(*plen);
199 head->object_len = cpu_to_le32(req->r_oid_len);
200 memcpy(p, req->r_oid, req->r_oid_len);
206 op->op = cpu_to_le16(opcode == CEPH_OSD_OP_READ ?
207 CEPH_OSD_OP_MASKTRUNC : CEPH_OSD_OP_SETTRUNC);
208 op->trunc.truncate_seq = cpu_to_le32(truncate_seq);
209 prevofs = le64_to_cpu((op-1)->extent.offset);
210 op->trunc.truncate_size = cpu_to_le64(truncate_size -
215 op->op = cpu_to_le16(CEPH_OSD_OP_STARTSYNC);
218 head->snap_seq = cpu_to_le64(snapc->seq);
219 head->num_snaps = cpu_to_le32(snapc->num_snaps);
220 for (i = 0; i < snapc->num_snaps; i++) {
221 put_unaligned_le64(snapc->snaps[i], p);
226 BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
231 * We keep osd requests in an rbtree, sorted by ->r_tid.
233 static void __insert_request(struct ceph_osd_client *osdc,
234 struct ceph_osd_request *new)
236 struct rb_node **p = &osdc->requests.rb_node;
237 struct rb_node *parent = NULL;
238 struct ceph_osd_request *req = NULL;
242 req = rb_entry(parent, struct ceph_osd_request, r_node);
243 if (new->r_tid < req->r_tid)
245 else if (new->r_tid > req->r_tid)
251 rb_link_node(&new->r_node, parent, p);
252 rb_insert_color(&new->r_node, &osdc->requests);
255 static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc,
258 struct ceph_osd_request *req;
259 struct rb_node *n = osdc->requests.rb_node;
262 req = rb_entry(n, struct ceph_osd_request, r_node);
263 if (tid < req->r_tid)
265 else if (tid > req->r_tid)
273 static struct ceph_osd_request *
274 __lookup_request_ge(struct ceph_osd_client *osdc,
277 struct ceph_osd_request *req;
278 struct rb_node *n = osdc->requests.rb_node;
281 req = rb_entry(n, struct ceph_osd_request, r_node);
282 if (tid < req->r_tid) {
286 } else if (tid > req->r_tid) {
297 * The messaging layer will reconnect to the osd as needed. If the
298 * session has dropped, the OSD will have dropped the session state,
299 * and we'll get notified by the messaging layer. If that happens, we
300 * need to resubmit all requests for that osd.
302 static void osd_reset(struct ceph_connection *con)
304 struct ceph_osd *osd = con->private;
305 struct ceph_osd_client *osdc;
309 dout("osd_reset osd%d\n", osd->o_osd);
311 osd->o_incarnation++;
312 down_read(&osdc->map_sem);
313 kick_requests(osdc, osd);
314 up_read(&osdc->map_sem);
318 * Track open sessions with osds.
320 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc)
322 struct ceph_osd *osd;
324 osd = kzalloc(sizeof(*osd), GFP_NOFS);
328 atomic_set(&osd->o_ref, 1);
330 INIT_LIST_HEAD(&osd->o_requests);
331 osd->o_incarnation = 1;
333 ceph_con_init(osdc->client->msgr, &osd->o_con);
334 osd->o_con.private = osd;
335 osd->o_con.ops = &osd_con_ops;
336 osd->o_con.peer_name.type = CEPH_ENTITY_TYPE_OSD;
340 static struct ceph_osd *get_osd(struct ceph_osd *osd)
342 if (atomic_inc_not_zero(&osd->o_ref)) {
343 dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
344 atomic_read(&osd->o_ref));
347 dout("get_osd %p FAIL\n", osd);
352 static void put_osd(struct ceph_osd *osd)
354 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
355 atomic_read(&osd->o_ref) - 1);
356 if (atomic_dec_and_test(&osd->o_ref)) {
357 ceph_con_shutdown(&osd->o_con);
363 * remove an osd from our map
365 static void remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
367 dout("remove_osd %p\n", osd);
368 BUG_ON(!list_empty(&osd->o_requests));
369 rb_erase(&osd->o_node, &osdc->osds);
370 ceph_con_close(&osd->o_con);
377 static int reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
381 dout("reset_osd %p osd%d\n", osd, osd->o_osd);
382 if (list_empty(&osd->o_requests)) {
383 remove_osd(osdc, osd);
385 ceph_con_close(&osd->o_con);
386 ceph_con_open(&osd->o_con, &osdc->osdmap->osd_addr[osd->o_osd]);
387 osd->o_incarnation++;
392 static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
394 struct rb_node **p = &osdc->osds.rb_node;
395 struct rb_node *parent = NULL;
396 struct ceph_osd *osd = NULL;
400 osd = rb_entry(parent, struct ceph_osd, o_node);
401 if (new->o_osd < osd->o_osd)
403 else if (new->o_osd > osd->o_osd)
409 rb_link_node(&new->o_node, parent, p);
410 rb_insert_color(&new->o_node, &osdc->osds);
413 static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
415 struct ceph_osd *osd;
416 struct rb_node *n = osdc->osds.rb_node;
419 osd = rb_entry(n, struct ceph_osd, o_node);
422 else if (o > osd->o_osd)
432 * Register request, assign tid. If this is the first request, set up
435 static void register_request(struct ceph_osd_client *osdc,
436 struct ceph_osd_request *req)
438 struct ceph_osd_request_head *head = req->r_request->front.iov_base;
440 mutex_lock(&osdc->request_mutex);
441 req->r_tid = ++osdc->last_tid;
442 head->tid = cpu_to_le64(req->r_tid);
444 dout("register_request %p tid %lld\n", req, req->r_tid);
445 __insert_request(osdc, req);
446 ceph_osdc_get_request(req);
447 osdc->num_requests++;
449 req->r_timeout_stamp =
450 jiffies + osdc->client->mount_args.osd_timeout*HZ;
452 if (osdc->num_requests == 1) {
453 osdc->timeout_tid = req->r_tid;
454 dout(" timeout on tid %llu at %lu\n", req->r_tid,
455 req->r_timeout_stamp);
456 schedule_delayed_work(&osdc->timeout_work,
457 round_jiffies_relative(req->r_timeout_stamp - jiffies));
459 mutex_unlock(&osdc->request_mutex);
463 * called under osdc->request_mutex
465 static void __unregister_request(struct ceph_osd_client *osdc,
466 struct ceph_osd_request *req)
468 dout("__unregister_request %p tid %lld\n", req, req->r_tid);
469 rb_erase(&req->r_node, &osdc->requests);
470 osdc->num_requests--;
473 /* make sure the original request isn't in flight. */
474 ceph_con_revoke(&req->r_osd->o_con, req->r_request);
476 list_del_init(&req->r_osd_item);
477 if (list_empty(&req->r_osd->o_requests))
478 remove_osd(osdc, req->r_osd);
482 ceph_osdc_put_request(req);
484 if (req->r_tid == osdc->timeout_tid) {
485 if (osdc->num_requests == 0) {
486 dout("no requests, canceling timeout\n");
487 osdc->timeout_tid = 0;
488 cancel_delayed_work(&osdc->timeout_work);
490 req = rb_entry(rb_first(&osdc->requests),
491 struct ceph_osd_request, r_node);
492 osdc->timeout_tid = req->r_tid;
493 dout("rescheduled timeout on tid %llu at %lu\n",
494 req->r_tid, req->r_timeout_stamp);
495 schedule_delayed_work(&osdc->timeout_work,
496 round_jiffies_relative(req->r_timeout_stamp -
503 * Cancel a previously queued request message
505 static void __cancel_request(struct ceph_osd_request *req)
508 ceph_con_revoke(&req->r_osd->o_con, req->r_request);
514 * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
515 * (as needed), and set the request r_osd appropriately. If there is
516 * no up osd, set r_osd to NULL.
518 * Return 0 if unchanged, 1 if changed, or negative on error.
520 * Caller should hold map_sem for read and request_mutex.
522 static int __map_osds(struct ceph_osd_client *osdc,
523 struct ceph_osd_request *req)
525 struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
529 struct ceph_osd *newosd = NULL;
531 dout("map_osds %p tid %lld\n", req, req->r_tid);
532 err = ceph_calc_object_layout(&reqhead->layout, req->r_oid,
533 &req->r_file_layout, osdc->osdmap);
536 pgid.pg64 = le64_to_cpu(reqhead->layout.ol_pgid);
537 o = ceph_calc_pg_primary(osdc->osdmap, pgid);
539 if ((req->r_osd && req->r_osd->o_osd == o &&
540 req->r_sent >= req->r_osd->o_incarnation) ||
541 (req->r_osd == NULL && o == -1))
542 return 0; /* no change */
544 dout("map_osds tid %llu pgid %llx pool %d osd%d (was osd%d)\n",
545 req->r_tid, pgid.pg64, pgid.pg.pool, o,
546 req->r_osd ? req->r_osd->o_osd : -1);
549 __cancel_request(req);
550 list_del_init(&req->r_osd_item);
551 if (list_empty(&req->r_osd->o_requests)) {
552 /* try to re-use r_osd if possible */
553 newosd = get_osd(req->r_osd);
554 remove_osd(osdc, newosd);
559 req->r_osd = __lookup_osd(osdc, o);
560 if (!req->r_osd && o >= 0) {
566 req->r_osd = create_osd(osdc);
571 dout("map_osds osd %p is osd%d\n", req->r_osd, o);
572 req->r_osd->o_osd = o;
573 req->r_osd->o_con.peer_name.num = cpu_to_le64(o);
574 __insert_osd(osdc, req->r_osd);
576 ceph_con_open(&req->r_osd->o_con, &osdc->osdmap->osd_addr[o]);
580 list_add(&req->r_osd_item, &req->r_osd->o_requests);
581 err = 1; /* osd changed */
590 * caller should hold map_sem (for read) and request_mutex
592 static int __send_request(struct ceph_osd_client *osdc,
593 struct ceph_osd_request *req)
595 struct ceph_osd_request_head *reqhead;
598 err = __map_osds(osdc, req);
601 if (req->r_osd == NULL) {
602 dout("send_request %p no up osds in pg\n", req);
603 ceph_monc_request_next_osdmap(&osdc->client->monc);
607 dout("send_request %p tid %llu to osd%d flags %d\n",
608 req, req->r_tid, req->r_osd->o_osd, req->r_flags);
610 reqhead = req->r_request->front.iov_base;
611 reqhead->osdmap_epoch = cpu_to_le32(osdc->osdmap->epoch);
612 reqhead->flags |= cpu_to_le32(req->r_flags); /* e.g., RETRY */
613 reqhead->reassert_version = req->r_reassert_version;
615 req->r_timeout_stamp = jiffies+osdc->client->mount_args.osd_timeout*HZ;
617 ceph_msg_get(req->r_request); /* send consumes a ref */
618 ceph_con_send(&req->r_osd->o_con, req->r_request);
619 req->r_sent = req->r_osd->o_incarnation;
624 * Timeout callback, called every N seconds when 1 or more osd
625 * requests has been active for more than N seconds. When this
626 * happens, we ping all OSDs with requests who have timed out to
627 * ensure any communications channel reset is detected. Reset the
628 * request timeouts another N seconds in the future as we go.
629 * Reschedule the timeout event another N seconds in future (unless
630 * there are no open requests).
632 static void handle_timeout(struct work_struct *work)
634 struct ceph_osd_client *osdc =
635 container_of(work, struct ceph_osd_client, timeout_work.work);
636 struct ceph_osd_request *req;
637 struct ceph_osd *osd;
638 unsigned long timeout = osdc->client->mount_args.osd_timeout * HZ;
639 unsigned long next_timeout = timeout + jiffies;
643 down_read(&osdc->map_sem);
645 ceph_monc_request_next_osdmap(&osdc->client->monc);
647 mutex_lock(&osdc->request_mutex);
648 for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
649 req = rb_entry(p, struct ceph_osd_request, r_node);
654 dout("osdc resending prev failed %lld\n", req->r_tid);
655 err = __send_request(osdc, req);
657 dout("osdc failed again on %lld\n", req->r_tid);
659 req->r_resend = false;
663 for (p = rb_first(&osdc->osds); p; p = rb_next(p)) {
664 osd = rb_entry(p, struct ceph_osd, o_node);
665 if (list_empty(&osd->o_requests))
667 req = list_first_entry(&osd->o_requests,
668 struct ceph_osd_request, r_osd_item);
669 if (time_before(jiffies, req->r_timeout_stamp))
672 dout(" tid %llu (at least) timed out on osd%d\n",
673 req->r_tid, osd->o_osd);
674 req->r_timeout_stamp = next_timeout;
675 ceph_con_keepalive(&osd->o_con);
678 if (osdc->timeout_tid)
679 schedule_delayed_work(&osdc->timeout_work,
680 round_jiffies_relative(timeout));
682 mutex_unlock(&osdc->request_mutex);
684 up_read(&osdc->map_sem);
688 * handle osd op reply. either call the callback if it is specified,
689 * or do the completion to wake up the waiting thread.
691 static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg)
693 struct ceph_osd_reply_head *rhead = msg->front.iov_base;
694 struct ceph_osd_request *req;
696 int numops, object_len, flags;
698 if (msg->front.iov_len < sizeof(*rhead))
700 tid = le64_to_cpu(rhead->tid);
701 numops = le32_to_cpu(rhead->num_ops);
702 object_len = le32_to_cpu(rhead->object_len);
703 if (msg->front.iov_len != sizeof(*rhead) + object_len +
704 numops * sizeof(struct ceph_osd_op))
706 dout("handle_reply %p tid %llu\n", msg, tid);
709 mutex_lock(&osdc->request_mutex);
710 req = __lookup_request(osdc, tid);
712 dout("handle_reply tid %llu dne\n", tid);
713 mutex_unlock(&osdc->request_mutex);
716 ceph_osdc_get_request(req);
717 flags = le32_to_cpu(rhead->flags);
721 * once we see the message has been received, we don't
722 * need a ref (which is only needed for revoking
725 ceph_msg_put(req->r_reply);
729 if (!req->r_got_reply) {
732 req->r_result = le32_to_cpu(rhead->result);
733 bytes = le32_to_cpu(msg->hdr.data_len);
734 dout("handle_reply result %d bytes %d\n", req->r_result,
736 if (req->r_result == 0)
737 req->r_result = bytes;
739 /* in case this is a write and we need to replay, */
740 req->r_reassert_version = rhead->reassert_version;
742 req->r_got_reply = 1;
743 } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
744 dout("handle_reply tid %llu dup ack\n", tid);
748 dout("handle_reply tid %llu flags %d\n", tid, flags);
750 /* either this is a read, or we got the safe response */
751 if ((flags & CEPH_OSD_FLAG_ONDISK) ||
752 ((flags & CEPH_OSD_FLAG_WRITE) == 0))
753 __unregister_request(osdc, req);
755 mutex_unlock(&osdc->request_mutex);
758 req->r_callback(req, msg);
760 complete(&req->r_completion);
762 if (flags & CEPH_OSD_FLAG_ONDISK) {
763 if (req->r_safe_callback)
764 req->r_safe_callback(req, msg);
765 complete(&req->r_safe_completion); /* fsync waiter */
769 ceph_osdc_put_request(req);
773 pr_err("corrupt osd_op_reply got %d %d expected %d\n",
774 (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len),
775 (int)sizeof(*rhead));
780 * Resubmit osd requests whose osd or osd address has changed. Request
781 * a new osd map if osds are down, or we are otherwise unable to determine
782 * how to direct a request.
784 * Close connections to down osds.
786 * If @who is specified, resubmit requests for that specific osd.
788 * Caller should hold map_sem for read and request_mutex.
790 static void kick_requests(struct ceph_osd_client *osdc,
791 struct ceph_osd *kickosd)
793 struct ceph_osd_request *req;
794 struct rb_node *p, *n;
798 dout("kick_requests osd%d\n", kickosd ? kickosd->o_osd : -1);
799 mutex_lock(&osdc->request_mutex);
801 for (p = rb_first(&osdc->osds); p; p = n) {
802 struct ceph_osd *osd =
803 rb_entry(p, struct ceph_osd, o_node);
806 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
807 !ceph_entity_addr_equal(&osd->o_con.peer_addr,
808 ceph_osd_addr(osdc->osdmap,
810 reset_osd(osdc, osd);
814 for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
815 req = rb_entry(p, struct ceph_osd_request, r_node);
818 dout(" r_resend set on tid %llu\n", req->r_tid);
821 if (req->r_osd && kickosd == req->r_osd)
824 err = __map_osds(osdc, req);
826 continue; /* no change */
829 * FIXME: really, we should set the request
830 * error and fail if this isn't a 'nofail'
831 * request, but that's a fair bit more
832 * complicated to do. So retry!
834 dout(" setting r_resend on %llu\n", req->r_tid);
835 req->r_resend = true;
838 if (req->r_osd == NULL) {
839 dout("tid %llu maps to no valid osd\n", req->r_tid);
840 needmap++; /* request a newer map */
845 dout("kicking %p tid %llu osd%d\n", req, req->r_tid,
847 req->r_flags |= CEPH_OSD_FLAG_RETRY;
848 err = __send_request(osdc, req);
850 dout(" setting r_resend on %llu\n", req->r_tid);
851 req->r_resend = true;
854 mutex_unlock(&osdc->request_mutex);
857 dout("%d requests for down osds, need new map\n", needmap);
858 ceph_monc_request_next_osdmap(&osdc->client->monc);
863 * Process updated osd map.
865 * The message contains any number of incremental and full maps, normally
866 * indicating some sort of topology change in the cluster. Kick requests
867 * off to different OSDs as needed.
869 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
871 void *p, *end, *next;
874 struct ceph_osdmap *newmap = NULL, *oldmap;
876 struct ceph_fsid fsid;
878 dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
879 p = msg->front.iov_base;
880 end = p + msg->front.iov_len;
883 ceph_decode_need(&p, end, sizeof(fsid), bad);
884 ceph_decode_copy(&p, &fsid, sizeof(fsid));
885 if (ceph_fsid_compare(&fsid, &osdc->client->monc.monmap->fsid)) {
886 pr_err("got osdmap with wrong fsid, ignoring\n");
890 down_write(&osdc->map_sem);
892 /* incremental maps */
893 ceph_decode_32_safe(&p, end, nr_maps, bad);
894 dout(" %d inc maps\n", nr_maps);
895 while (nr_maps > 0) {
896 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
897 ceph_decode_32(&p, epoch);
898 ceph_decode_32(&p, maplen);
899 ceph_decode_need(&p, end, maplen, bad);
901 if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) {
902 dout("applying incremental map %u len %d\n",
904 newmap = osdmap_apply_incremental(&p, next,
907 if (IS_ERR(newmap)) {
908 err = PTR_ERR(newmap);
911 if (newmap != osdc->osdmap) {
912 ceph_osdmap_destroy(osdc->osdmap);
913 osdc->osdmap = newmap;
916 dout("ignoring incremental map %u len %d\n",
926 ceph_decode_32_safe(&p, end, nr_maps, bad);
927 dout(" %d full maps\n", nr_maps);
929 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
930 ceph_decode_32(&p, epoch);
931 ceph_decode_32(&p, maplen);
932 ceph_decode_need(&p, end, maplen, bad);
934 dout("skipping non-latest full map %u len %d\n",
936 } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) {
937 dout("skipping full map %u len %d, "
938 "older than our %u\n", epoch, maplen,
939 osdc->osdmap->epoch);
941 dout("taking full map %u len %d\n", epoch, maplen);
942 newmap = osdmap_decode(&p, p+maplen);
943 if (IS_ERR(newmap)) {
944 err = PTR_ERR(newmap);
947 oldmap = osdc->osdmap;
948 osdc->osdmap = newmap;
950 ceph_osdmap_destroy(oldmap);
957 downgrade_write(&osdc->map_sem);
958 ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
960 kick_requests(osdc, NULL);
961 up_read(&osdc->map_sem);
965 pr_err("osdc handle_map corrupt msg\n");
966 up_write(&osdc->map_sem);
972 * A read request prepares specific pages that data is to be read into.
973 * When a message is being read off the wire, we call prepare_pages to
975 * 0 = success, -1 failure.
977 static int prepare_pages(struct ceph_connection *con, struct ceph_msg *m,
980 struct ceph_osd *osd = con->private;
981 struct ceph_osd_client *osdc;
982 struct ceph_osd_reply_head *rhead = m->front.iov_base;
983 struct ceph_osd_request *req;
986 int type = le16_to_cpu(m->hdr.type);
992 dout("prepare_pages on msg %p want %d\n", m, want);
993 if (unlikely(type != CEPH_MSG_OSD_OPREPLY))
994 return -1; /* hmm! */
996 tid = le64_to_cpu(rhead->tid);
997 mutex_lock(&osdc->request_mutex);
998 req = __lookup_request(osdc, tid);
1000 dout("prepare_pages unknown tid %llu\n", tid);
1003 dout("prepare_pages tid %llu has %d pages, want %d\n",
1004 tid, req->r_num_pages, want);
1005 if (likely(req->r_num_pages >= want && !req->r_prepared_pages)) {
1006 m->pages = req->r_pages;
1007 m->nr_pages = req->r_num_pages;
1008 req->r_reply = m; /* only for duration of read over socket */
1010 req->r_prepared_pages = 1;
1011 ret = 0; /* success */
1014 mutex_unlock(&osdc->request_mutex);
1019 * Register request, send initial attempt.
1021 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
1022 struct ceph_osd_request *req,
1027 req->r_request->pages = req->r_pages;
1028 req->r_request->nr_pages = req->r_num_pages;
1030 register_request(osdc, req);
1032 down_read(&osdc->map_sem);
1033 mutex_lock(&osdc->request_mutex);
1035 * a racing kick_requests() may have sent the message for us
1036 * while we dropped request_mutex above, so only send now if
1037 * the request still han't been touched yet.
1039 if (req->r_sent == 0) {
1040 rc = __send_request(osdc, req);
1043 dout("osdc_start_request failed send, "
1044 " marking %lld\n", req->r_tid);
1045 req->r_resend = true;
1048 __unregister_request(osdc, req);
1052 mutex_unlock(&osdc->request_mutex);
1053 up_read(&osdc->map_sem);
1058 * wait for a request to complete
1060 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
1061 struct ceph_osd_request *req)
1065 rc = wait_for_completion_interruptible(&req->r_completion);
1067 mutex_lock(&osdc->request_mutex);
1068 __cancel_request(req);
1069 mutex_unlock(&osdc->request_mutex);
1070 dout("wait_request tid %llu timed out\n", req->r_tid);
1074 dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result);
1075 return req->r_result;
1079 * sync - wait for all in-flight requests to flush. avoid starvation.
1081 void ceph_osdc_sync(struct ceph_osd_client *osdc)
1083 struct ceph_osd_request *req;
1084 u64 last_tid, next_tid = 0;
1086 mutex_lock(&osdc->request_mutex);
1087 last_tid = osdc->last_tid;
1089 req = __lookup_request_ge(osdc, next_tid);
1092 if (req->r_tid > last_tid)
1095 next_tid = req->r_tid + 1;
1096 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0)
1099 ceph_osdc_get_request(req);
1100 mutex_unlock(&osdc->request_mutex);
1101 dout("sync waiting on tid %llu (last is %llu)\n",
1102 req->r_tid, last_tid);
1103 wait_for_completion(&req->r_safe_completion);
1104 mutex_lock(&osdc->request_mutex);
1105 ceph_osdc_put_request(req);
1107 mutex_unlock(&osdc->request_mutex);
1108 dout("sync done (thru tid %llu)\n", last_tid);
1114 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
1119 osdc->client = client;
1120 osdc->osdmap = NULL;
1121 init_rwsem(&osdc->map_sem);
1122 init_completion(&osdc->map_waiters);
1123 osdc->last_requested_map = 0;
1124 mutex_init(&osdc->request_mutex);
1125 osdc->timeout_tid = 0;
1127 osdc->osds = RB_ROOT;
1128 osdc->requests = RB_ROOT;
1129 osdc->num_requests = 0;
1130 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
1132 osdc->req_mempool = mempool_create_kmalloc_pool(10,
1133 sizeof(struct ceph_osd_request));
1134 if (!osdc->req_mempool)
1137 err = ceph_msgpool_init(&osdc->msgpool_op, 4096, 10, true);
1140 err = ceph_msgpool_init(&osdc->msgpool_op_reply, 512, 0, false);
1147 void ceph_osdc_stop(struct ceph_osd_client *osdc)
1149 cancel_delayed_work_sync(&osdc->timeout_work);
1151 ceph_osdmap_destroy(osdc->osdmap);
1152 osdc->osdmap = NULL;
1154 mempool_destroy(osdc->req_mempool);
1155 ceph_msgpool_destroy(&osdc->msgpool_op);
1156 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
1160 * Read some contiguous pages. If we cross a stripe boundary, shorten
1161 * *plen. Return number of bytes read, or error.
1163 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
1164 struct ceph_vino vino, struct ceph_file_layout *layout,
1166 u32 truncate_seq, u64 truncate_size,
1167 struct page **pages, int num_pages)
1169 struct ceph_osd_request *req;
1172 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
1173 vino.snap, off, *plen);
1174 req = ceph_osdc_new_request(osdc, layout, vino, off, plen,
1175 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
1176 NULL, 0, truncate_seq, truncate_size, NULL,
1179 return PTR_ERR(req);
1181 /* it may be a short read due to an object boundary */
1182 req->r_pages = pages;
1183 num_pages = calc_pages_for(off, *plen);
1184 req->r_num_pages = num_pages;
1186 dout("readpages final extent is %llu~%llu (%d pages)\n",
1187 off, *plen, req->r_num_pages);
1189 rc = ceph_osdc_start_request(osdc, req, false);
1191 rc = ceph_osdc_wait_request(osdc, req);
1193 ceph_osdc_put_request(req);
1194 dout("readpages result %d\n", rc);
1199 * do a synchronous write on N pages
1201 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
1202 struct ceph_file_layout *layout,
1203 struct ceph_snap_context *snapc,
1205 u32 truncate_seq, u64 truncate_size,
1206 struct timespec *mtime,
1207 struct page **pages, int num_pages,
1208 int flags, int do_sync, bool nofail)
1210 struct ceph_osd_request *req;
1213 BUG_ON(vino.snap != CEPH_NOSNAP);
1214 req = ceph_osdc_new_request(osdc, layout, vino, off, &len,
1216 flags | CEPH_OSD_FLAG_ONDISK |
1217 CEPH_OSD_FLAG_WRITE,
1219 truncate_seq, truncate_size, mtime,
1222 return PTR_ERR(req);
1224 /* it may be a short write due to an object boundary */
1225 req->r_pages = pages;
1226 req->r_num_pages = calc_pages_for(off, len);
1227 dout("writepages %llu~%llu (%d pages)\n", off, len,
1230 rc = ceph_osdc_start_request(osdc, req, nofail);
1232 rc = ceph_osdc_wait_request(osdc, req);
1234 ceph_osdc_put_request(req);
1237 dout("writepages result %d\n", rc);
1242 * handle incoming message
1244 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
1246 struct ceph_osd *osd = con->private;
1247 struct ceph_osd_client *osdc = osd->o_osdc;
1248 int type = le16_to_cpu(msg->hdr.type);
1254 case CEPH_MSG_OSD_MAP:
1255 ceph_osdc_handle_map(osdc, msg);
1257 case CEPH_MSG_OSD_OPREPLY:
1258 handle_reply(osdc, msg);
1262 pr_err("received unknown message type %d %s\n", type,
1263 ceph_msg_type_name(type));
1268 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
1269 struct ceph_msg_header *hdr)
1271 struct ceph_osd *osd = con->private;
1272 struct ceph_osd_client *osdc = osd->o_osdc;
1273 int type = le16_to_cpu(hdr->type);
1276 case CEPH_MSG_OSD_OPREPLY:
1277 return ceph_msgpool_get(&osdc->msgpool_op_reply);
1279 return ceph_alloc_msg(con, hdr);
1283 * Wrappers to refcount containing ceph_osd struct
1285 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
1287 struct ceph_osd *osd = con->private;
1293 static void put_osd_con(struct ceph_connection *con)
1295 struct ceph_osd *osd = con->private;
1299 const static struct ceph_connection_operations osd_con_ops = {
1302 .dispatch = dispatch,
1303 .alloc_msg = alloc_msg,
1304 .peer_reset = osd_reset,
1305 .alloc_middle = ceph_alloc_middle,
1306 .prepare_pages = prepare_pages,