1 #include "ceph_debug.h"
4 #include <linux/kernel.h>
5 #include <linux/sched.h>
6 #include <linux/slab.h>
7 #include <linux/vmalloc.h>
8 #include <linux/wait.h>
9 #include <linux/writeback.h>
13 #include "messenger.h"
16 * Capability management
18 * The Ceph metadata servers control client access to inode metadata
19 * and file data by issuing capabilities, granting clients permission
20 * to read and/or write both inode field and file data to OSDs
21 * (storage nodes). Each capability consists of a set of bits
22 * indicating which operations are allowed.
24 * If the client holds a *_SHARED cap, the client has a coherent value
25 * that can be safely read from the cached inode.
27 * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the
28 * client is allowed to change inode attributes (e.g., file size,
29 * mtime), note its dirty state in the ceph_cap, and asynchronously
30 * flush that metadata change to the MDS.
32 * In the event of a conflicting operation (perhaps by another
33 * client), the MDS will revoke the conflicting client capabilities.
35 * In order for a client to cache an inode, it must hold a capability
36 * with at least one MDS server. When inodes are released, release
37 * notifications are batched and periodically sent en masse to the MDS
38 * cluster to release server state.
43 * Generate readable cap strings for debugging output.
45 #define MAX_CAP_STR 20
46 static char cap_str[MAX_CAP_STR][40];
47 static DEFINE_SPINLOCK(cap_str_lock);
48 static int last_cap_str;
50 static char *gcap_string(char *s, int c)
52 if (c & CEPH_CAP_GSHARED)
54 if (c & CEPH_CAP_GEXCL)
56 if (c & CEPH_CAP_GCACHE)
62 if (c & CEPH_CAP_GBUFFER)
64 if (c & CEPH_CAP_GLAZYIO)
69 const char *ceph_cap_string(int caps)
75 spin_lock(&cap_str_lock);
77 if (last_cap_str == MAX_CAP_STR)
79 spin_unlock(&cap_str_lock);
83 if (caps & CEPH_CAP_PIN)
86 c = (caps >> CEPH_CAP_SAUTH) & 3;
89 s = gcap_string(s, c);
92 c = (caps >> CEPH_CAP_SLINK) & 3;
95 s = gcap_string(s, c);
98 c = (caps >> CEPH_CAP_SXATTR) & 3;
101 s = gcap_string(s, c);
104 c = caps >> CEPH_CAP_SFILE;
107 s = gcap_string(s, c);
119 * Maintain a global pool of preallocated struct ceph_caps, referenced
120 * by struct ceph_caps_reservations. This ensures that we preallocate
121 * memory needed to successfully process an MDS response. (If an MDS
122 * sends us cap information and we fail to process it, we will have
123 * problems due to the client and MDS being out of sync.)
125 * Reservations are 'owned' by a ceph_cap_reservation context.
127 static spinlock_t caps_list_lock;
128 static struct list_head caps_list; /* unused (reserved or unreserved) */
129 static int caps_total_count; /* total caps allocated */
130 static int caps_use_count; /* in use */
131 static int caps_reserve_count; /* unused, reserved */
132 static int caps_avail_count; /* unused, unreserved */
133 static int caps_min_count; /* keep at least this many (unreserved) */
135 void __init ceph_caps_init(void)
137 INIT_LIST_HEAD(&caps_list);
138 spin_lock_init(&caps_list_lock);
141 void ceph_caps_finalize(void)
143 struct ceph_cap *cap;
145 spin_lock(&caps_list_lock);
146 while (!list_empty(&caps_list)) {
147 cap = list_first_entry(&caps_list, struct ceph_cap, caps_item);
148 list_del(&cap->caps_item);
149 kmem_cache_free(ceph_cap_cachep, cap);
151 caps_total_count = 0;
152 caps_avail_count = 0;
154 caps_reserve_count = 0;
156 spin_unlock(&caps_list_lock);
159 void ceph_adjust_min_caps(int delta)
161 spin_lock(&caps_list_lock);
162 caps_min_count += delta;
163 BUG_ON(caps_min_count < 0);
164 spin_unlock(&caps_list_lock);
167 int ceph_reserve_caps(struct ceph_cap_reservation *ctx, int need)
170 struct ceph_cap *cap;
176 dout("reserve caps ctx=%p need=%d\n", ctx, need);
178 /* first reserve any caps that are already allocated */
179 spin_lock(&caps_list_lock);
180 if (caps_avail_count >= need)
183 have = caps_avail_count;
184 caps_avail_count -= have;
185 caps_reserve_count += have;
186 BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
188 spin_unlock(&caps_list_lock);
190 for (i = have; i < need; i++) {
191 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
194 goto out_alloc_count;
196 list_add(&cap->caps_item, &newcaps);
199 BUG_ON(have + alloc != need);
201 spin_lock(&caps_list_lock);
202 caps_total_count += alloc;
203 caps_reserve_count += alloc;
204 list_splice(&newcaps, &caps_list);
206 BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
208 spin_unlock(&caps_list_lock);
211 dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
212 ctx, caps_total_count, caps_use_count, caps_reserve_count,
217 /* we didn't manage to reserve as much as we needed */
218 pr_warning("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
223 int ceph_unreserve_caps(struct ceph_cap_reservation *ctx)
225 dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
227 spin_lock(&caps_list_lock);
228 BUG_ON(caps_reserve_count < ctx->count);
229 caps_reserve_count -= ctx->count;
230 caps_avail_count += ctx->count;
232 dout("unreserve caps %d = %d used + %d resv + %d avail\n",
233 caps_total_count, caps_use_count, caps_reserve_count,
235 BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
237 spin_unlock(&caps_list_lock);
242 static struct ceph_cap *get_cap(struct ceph_cap_reservation *ctx)
244 struct ceph_cap *cap = NULL;
246 /* temporary, until we do something about cap import/export */
248 return kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
250 spin_lock(&caps_list_lock);
251 dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
252 ctx, ctx->count, caps_total_count, caps_use_count,
253 caps_reserve_count, caps_avail_count);
255 BUG_ON(ctx->count > caps_reserve_count);
256 BUG_ON(list_empty(&caps_list));
259 caps_reserve_count--;
262 cap = list_first_entry(&caps_list, struct ceph_cap, caps_item);
263 list_del(&cap->caps_item);
265 BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
267 spin_unlock(&caps_list_lock);
271 void ceph_put_cap(struct ceph_cap *cap)
273 spin_lock(&caps_list_lock);
274 dout("put_cap %p %d = %d used + %d resv + %d avail\n",
275 cap, caps_total_count, caps_use_count,
276 caps_reserve_count, caps_avail_count);
279 * Keep some preallocated caps around (ceph_min_count), to
280 * avoid lots of free/alloc churn.
282 if (caps_avail_count >= caps_reserve_count + caps_min_count) {
284 kmem_cache_free(ceph_cap_cachep, cap);
287 list_add(&cap->caps_item, &caps_list);
290 BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
292 spin_unlock(&caps_list_lock);
295 void ceph_reservation_status(struct ceph_client *client,
296 int *total, int *avail, int *used, int *reserved,
300 *total = caps_total_count;
302 *avail = caps_avail_count;
304 *used = caps_use_count;
306 *reserved = caps_reserve_count;
308 *min = caps_min_count;
312 * Find ceph_cap for given mds, if any.
314 * Called with i_lock held.
316 static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
318 struct ceph_cap *cap;
319 struct rb_node *n = ci->i_caps.rb_node;
322 cap = rb_entry(n, struct ceph_cap, ci_node);
325 else if (mds > cap->mds)
334 * Return id of any MDS with a cap, preferably FILE_WR|WRBUFFER|EXCL, else
337 static int __ceph_get_cap_mds(struct ceph_inode_info *ci, u32 *mseq)
339 struct ceph_cap *cap;
343 /* prefer mds with WR|WRBUFFER|EXCL caps */
344 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
345 cap = rb_entry(p, struct ceph_cap, ci_node);
349 if (cap->issued & (CEPH_CAP_FILE_WR |
350 CEPH_CAP_FILE_BUFFER |
357 int ceph_get_cap_mds(struct inode *inode)
360 spin_lock(&inode->i_lock);
361 mds = __ceph_get_cap_mds(ceph_inode(inode), NULL);
362 spin_unlock(&inode->i_lock);
367 * Called under i_lock.
369 static void __insert_cap_node(struct ceph_inode_info *ci,
370 struct ceph_cap *new)
372 struct rb_node **p = &ci->i_caps.rb_node;
373 struct rb_node *parent = NULL;
374 struct ceph_cap *cap = NULL;
378 cap = rb_entry(parent, struct ceph_cap, ci_node);
379 if (new->mds < cap->mds)
381 else if (new->mds > cap->mds)
387 rb_link_node(&new->ci_node, parent, p);
388 rb_insert_color(&new->ci_node, &ci->i_caps);
392 * (re)set cap hold timeouts, which control the delayed release
393 * of unused caps back to the MDS. Should be called on cap use.
395 static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
396 struct ceph_inode_info *ci)
398 struct ceph_mount_args *ma = mdsc->client->mount_args;
400 ci->i_hold_caps_min = round_jiffies(jiffies +
401 ma->caps_wanted_delay_min * HZ);
402 ci->i_hold_caps_max = round_jiffies(jiffies +
403 ma->caps_wanted_delay_max * HZ);
404 dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
405 ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies);
409 * (Re)queue cap at the end of the delayed cap release list.
411 * If I_FLUSH is set, leave the inode at the front of the list.
413 * Caller holds i_lock
414 * -> we take mdsc->cap_delay_lock
416 static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
417 struct ceph_inode_info *ci)
419 __cap_set_timeouts(mdsc, ci);
420 dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
421 ci->i_ceph_flags, ci->i_hold_caps_max);
422 if (!mdsc->stopping) {
423 spin_lock(&mdsc->cap_delay_lock);
424 if (!list_empty(&ci->i_cap_delay_list)) {
425 if (ci->i_ceph_flags & CEPH_I_FLUSH)
427 list_del_init(&ci->i_cap_delay_list);
429 list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
431 spin_unlock(&mdsc->cap_delay_lock);
436 * Queue an inode for immediate writeback. Mark inode with I_FLUSH,
437 * indicating we should send a cap message to flush dirty metadata
438 * asap, and move to the front of the delayed cap list.
440 static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
441 struct ceph_inode_info *ci)
443 dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
444 spin_lock(&mdsc->cap_delay_lock);
445 ci->i_ceph_flags |= CEPH_I_FLUSH;
446 if (!list_empty(&ci->i_cap_delay_list))
447 list_del_init(&ci->i_cap_delay_list);
448 list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
449 spin_unlock(&mdsc->cap_delay_lock);
453 * Cancel delayed work on cap.
455 * Caller must hold i_lock.
457 static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
458 struct ceph_inode_info *ci)
460 dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
461 if (list_empty(&ci->i_cap_delay_list))
463 spin_lock(&mdsc->cap_delay_lock);
464 list_del_init(&ci->i_cap_delay_list);
465 spin_unlock(&mdsc->cap_delay_lock);
469 * Common issue checks for add_cap, handle_cap_grant.
471 static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
474 unsigned had = __ceph_caps_issued(ci, NULL);
477 * Each time we receive FILE_CACHE anew, we increment
480 if ((issued & CEPH_CAP_FILE_CACHE) &&
481 (had & CEPH_CAP_FILE_CACHE) == 0)
485 * if we are newly issued FILE_SHARED, clear I_COMPLETE; we
486 * don't know what happened to this directory while we didn't
489 if ((issued & CEPH_CAP_FILE_SHARED) &&
490 (had & CEPH_CAP_FILE_SHARED) == 0) {
492 if (S_ISDIR(ci->vfs_inode.i_mode)) {
493 dout(" marking %p NOT complete\n", &ci->vfs_inode);
494 ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
500 * Add a capability under the given MDS session.
502 * Caller should hold session snap_rwsem (read) and s_mutex.
504 * @fmode is the open file mode, if we are opening a file, otherwise
505 * it is < 0. (This is so we can atomically add the cap and add an
506 * open file reference to it.)
508 int ceph_add_cap(struct inode *inode,
509 struct ceph_mds_session *session, u64 cap_id,
510 int fmode, unsigned issued, unsigned wanted,
511 unsigned seq, unsigned mseq, u64 realmino, int flags,
512 struct ceph_cap_reservation *caps_reservation)
514 struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
515 struct ceph_inode_info *ci = ceph_inode(inode);
516 struct ceph_cap *new_cap = NULL;
517 struct ceph_cap *cap;
518 int mds = session->s_mds;
521 dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
522 session->s_mds, cap_id, ceph_cap_string(issued), seq);
525 * If we are opening the file, include file mode wanted bits
529 wanted |= ceph_caps_for_mode(fmode);
532 spin_lock(&inode->i_lock);
533 cap = __get_cap_for_mds(ci, mds);
539 spin_unlock(&inode->i_lock);
540 new_cap = get_cap(caps_reservation);
547 cap->implemented = 0;
552 __insert_cap_node(ci, cap);
554 /* clear out old exporting info? (i.e. on cap import) */
555 if (ci->i_cap_exporting_mds == mds) {
556 ci->i_cap_exporting_issued = 0;
557 ci->i_cap_exporting_mseq = 0;
558 ci->i_cap_exporting_mds = -1;
561 /* add to session cap list */
562 cap->session = session;
563 spin_lock(&session->s_cap_lock);
564 list_add_tail(&cap->session_caps, &session->s_caps);
565 session->s_nr_caps++;
566 spin_unlock(&session->s_cap_lock);
569 if (!ci->i_snap_realm) {
571 * add this inode to the appropriate snap realm
573 struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
576 ceph_get_snap_realm(mdsc, realm);
577 spin_lock(&realm->inodes_with_caps_lock);
578 ci->i_snap_realm = realm;
579 list_add(&ci->i_snap_realm_item,
580 &realm->inodes_with_caps);
581 spin_unlock(&realm->inodes_with_caps_lock);
583 pr_err("ceph_add_cap: couldn't find snap realm %llx\n",
588 __check_cap_issue(ci, cap, issued);
591 * If we are issued caps we don't want, or the mds' wanted
592 * value appears to be off, queue a check so we'll release
593 * later and/or update the mds wanted value.
595 actual_wanted = __ceph_caps_wanted(ci);
596 if ((wanted & ~actual_wanted) ||
597 (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
598 dout(" issued %s, mds wanted %s, actual %s, queueing\n",
599 ceph_cap_string(issued), ceph_cap_string(wanted),
600 ceph_cap_string(actual_wanted));
601 __cap_delay_requeue(mdsc, ci);
604 if (flags & CEPH_CAP_FLAG_AUTH)
605 ci->i_auth_cap = cap;
606 else if (ci->i_auth_cap == cap)
607 ci->i_auth_cap = NULL;
609 dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
610 inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
611 ceph_cap_string(issued|cap->issued), seq, mds);
612 cap->cap_id = cap_id;
613 cap->issued = issued;
614 cap->implemented |= issued;
615 cap->mds_wanted |= wanted;
617 cap->issue_seq = seq;
619 cap->cap_gen = session->s_cap_gen;
622 __ceph_get_fmode(ci, fmode);
623 spin_unlock(&inode->i_lock);
624 wake_up(&ci->i_cap_wq);
629 * Return true if cap has not timed out and belongs to the current
630 * generation of the MDS session (i.e. has not gone 'stale' due to
631 * us losing touch with the mds).
633 static int __cap_is_valid(struct ceph_cap *cap)
638 spin_lock(&cap->session->s_cap_lock);
639 gen = cap->session->s_cap_gen;
640 ttl = cap->session->s_cap_ttl;
641 spin_unlock(&cap->session->s_cap_lock);
643 if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
644 dout("__cap_is_valid %p cap %p issued %s "
645 "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
646 cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
654 * Return set of valid cap bits issued to us. Note that caps time
655 * out, and may be invalidated in bulk if the client session times out
656 * and session->s_cap_gen is bumped.
658 int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
660 int have = ci->i_snap_caps | ci->i_cap_exporting_issued;
661 struct ceph_cap *cap;
666 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
667 cap = rb_entry(p, struct ceph_cap, ci_node);
668 if (!__cap_is_valid(cap))
670 dout("__ceph_caps_issued %p cap %p issued %s\n",
671 &ci->vfs_inode, cap, ceph_cap_string(cap->issued));
674 *implemented |= cap->implemented;
680 * Get cap bits issued by caps other than @ocap
682 int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
684 int have = ci->i_snap_caps;
685 struct ceph_cap *cap;
688 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
689 cap = rb_entry(p, struct ceph_cap, ci_node);
692 if (!__cap_is_valid(cap))
700 * Move a cap to the end of the LRU (oldest caps at list head, newest
703 static void __touch_cap(struct ceph_cap *cap)
705 struct ceph_mds_session *s = cap->session;
707 spin_lock(&s->s_cap_lock);
708 if (s->s_cap_iterator == NULL) {
709 dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
711 list_move_tail(&cap->session_caps, &s->s_caps);
713 dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
714 &cap->ci->vfs_inode, cap, s->s_mds);
716 spin_unlock(&s->s_cap_lock);
720 * Check if we hold the given mask. If so, move the cap(s) to the
721 * front of their respective LRUs. (This is the preferred way for
722 * callers to check for caps they want.)
724 int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
726 struct ceph_cap *cap;
728 int have = ci->i_snap_caps;
730 if ((have & mask) == mask) {
731 dout("__ceph_caps_issued_mask %p snap issued %s"
732 " (mask %s)\n", &ci->vfs_inode,
733 ceph_cap_string(have),
734 ceph_cap_string(mask));
738 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
739 cap = rb_entry(p, struct ceph_cap, ci_node);
740 if (!__cap_is_valid(cap))
742 if ((cap->issued & mask) == mask) {
743 dout("__ceph_caps_issued_mask %p cap %p issued %s"
744 " (mask %s)\n", &ci->vfs_inode, cap,
745 ceph_cap_string(cap->issued),
746 ceph_cap_string(mask));
752 /* does a combination of caps satisfy mask? */
754 if ((have & mask) == mask) {
755 dout("__ceph_caps_issued_mask %p combo issued %s"
756 " (mask %s)\n", &ci->vfs_inode,
757 ceph_cap_string(cap->issued),
758 ceph_cap_string(mask));
762 /* touch this + preceeding caps */
764 for (q = rb_first(&ci->i_caps); q != p;
766 cap = rb_entry(q, struct ceph_cap,
768 if (!__cap_is_valid(cap))
781 * Return true if mask caps are currently being revoked by an MDS.
783 int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
785 struct inode *inode = &ci->vfs_inode;
786 struct ceph_cap *cap;
790 spin_lock(&inode->i_lock);
791 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
792 cap = rb_entry(p, struct ceph_cap, ci_node);
793 if (__cap_is_valid(cap) &&
794 (cap->implemented & ~cap->issued & mask)) {
799 spin_unlock(&inode->i_lock);
800 dout("ceph_caps_revoking %p %s = %d\n", inode,
801 ceph_cap_string(mask), ret);
805 int __ceph_caps_used(struct ceph_inode_info *ci)
809 used |= CEPH_CAP_PIN;
811 used |= CEPH_CAP_FILE_RD;
812 if (ci->i_rdcache_ref || ci->i_rdcache_gen)
813 used |= CEPH_CAP_FILE_CACHE;
815 used |= CEPH_CAP_FILE_WR;
816 if (ci->i_wrbuffer_ref)
817 used |= CEPH_CAP_FILE_BUFFER;
822 * wanted, by virtue of open file modes
824 int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
828 for (mode = 0; mode < 4; mode++)
829 if (ci->i_nr_by_mode[mode])
830 want |= ceph_caps_for_mode(mode);
835 * Return caps we have registered with the MDS(s) as 'wanted'.
837 int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
839 struct ceph_cap *cap;
843 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
844 cap = rb_entry(p, struct ceph_cap, ci_node);
845 if (!__cap_is_valid(cap))
847 mds_wanted |= cap->mds_wanted;
853 * called under i_lock
855 static int __ceph_is_any_caps(struct ceph_inode_info *ci)
857 return !RB_EMPTY_ROOT(&ci->i_caps) || ci->i_cap_exporting_mds >= 0;
861 * Remove a cap. Take steps to deal with a racing iterate_session_caps.
863 * caller should hold i_lock.
864 * caller will not hold session s_mutex if called from destroy_inode.
866 void __ceph_remove_cap(struct ceph_cap *cap)
868 struct ceph_mds_session *session = cap->session;
869 struct ceph_inode_info *ci = cap->ci;
870 struct ceph_mds_client *mdsc =
871 &ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
874 dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
876 /* remove from session list */
877 spin_lock(&session->s_cap_lock);
878 if (session->s_cap_iterator == cap) {
879 /* not yet, we are iterating over this very cap */
880 dout("__ceph_remove_cap delaying %p removal from session %p\n",
883 list_del_init(&cap->session_caps);
884 session->s_nr_caps--;
888 /* protect backpointer with s_cap_lock: see iterate_session_caps */
890 spin_unlock(&session->s_cap_lock);
892 /* remove from inode list */
893 rb_erase(&cap->ci_node, &ci->i_caps);
894 if (ci->i_auth_cap == cap)
895 ci->i_auth_cap = NULL;
900 if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) {
901 struct ceph_snap_realm *realm = ci->i_snap_realm;
902 spin_lock(&realm->inodes_with_caps_lock);
903 list_del_init(&ci->i_snap_realm_item);
904 ci->i_snap_realm_counter++;
905 ci->i_snap_realm = NULL;
906 spin_unlock(&realm->inodes_with_caps_lock);
907 ceph_put_snap_realm(mdsc, realm);
909 if (!__ceph_is_any_real_caps(ci))
910 __cap_delay_cancel(mdsc, ci);
914 * Build and send a cap message to the given MDS.
916 * Caller should be holding s_mutex.
918 static int send_cap_msg(struct ceph_mds_session *session,
919 u64 ino, u64 cid, int op,
920 int caps, int wanted, int dirty,
921 u32 seq, u64 flush_tid, u32 issue_seq, u32 mseq,
922 u64 size, u64 max_size,
923 struct timespec *mtime, struct timespec *atime,
925 uid_t uid, gid_t gid, mode_t mode,
927 struct ceph_buffer *xattrs_buf,
930 struct ceph_mds_caps *fc;
931 struct ceph_msg *msg;
933 dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
934 " seq %u/%u mseq %u follows %lld size %llu/%llu"
935 " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op),
936 cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted),
937 ceph_cap_string(dirty),
938 seq, issue_seq, mseq, follows, size, max_size,
939 xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
941 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc));
945 msg->hdr.tid = cpu_to_le64(flush_tid);
947 fc = msg->front.iov_base;
948 memset(fc, 0, sizeof(*fc));
950 fc->cap_id = cpu_to_le64(cid);
951 fc->op = cpu_to_le32(op);
952 fc->seq = cpu_to_le32(seq);
953 fc->issue_seq = cpu_to_le32(issue_seq);
954 fc->migrate_seq = cpu_to_le32(mseq);
955 fc->caps = cpu_to_le32(caps);
956 fc->wanted = cpu_to_le32(wanted);
957 fc->dirty = cpu_to_le32(dirty);
958 fc->ino = cpu_to_le64(ino);
959 fc->snap_follows = cpu_to_le64(follows);
961 fc->size = cpu_to_le64(size);
962 fc->max_size = cpu_to_le64(max_size);
964 ceph_encode_timespec(&fc->mtime, mtime);
966 ceph_encode_timespec(&fc->atime, atime);
967 fc->time_warp_seq = cpu_to_le32(time_warp_seq);
969 fc->uid = cpu_to_le32(uid);
970 fc->gid = cpu_to_le32(gid);
971 fc->mode = cpu_to_le32(mode);
973 fc->xattr_version = cpu_to_le64(xattr_version);
975 msg->middle = ceph_buffer_get(xattrs_buf);
976 fc->xattr_len = cpu_to_le32(xattrs_buf->vec.iov_len);
977 msg->hdr.middle_len = cpu_to_le32(xattrs_buf->vec.iov_len);
980 ceph_con_send(&session->s_con, msg);
985 * Queue cap releases when an inode is dropped from our cache. Since
986 * inode is about to be destroyed, there is no need for i_lock.
988 void ceph_queue_caps_release(struct inode *inode)
990 struct ceph_inode_info *ci = ceph_inode(inode);
993 p = rb_first(&ci->i_caps);
995 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
996 struct ceph_mds_session *session = cap->session;
997 struct ceph_msg *msg;
998 struct ceph_mds_cap_release *head;
999 struct ceph_mds_cap_item *item;
1001 spin_lock(&session->s_cap_lock);
1002 BUG_ON(!session->s_num_cap_releases);
1003 msg = list_first_entry(&session->s_cap_releases,
1004 struct ceph_msg, list_head);
1006 dout(" adding %p release to mds%d msg %p (%d left)\n",
1007 inode, session->s_mds, msg, session->s_num_cap_releases);
1009 BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE);
1010 head = msg->front.iov_base;
1011 head->num = cpu_to_le32(le32_to_cpu(head->num) + 1);
1012 item = msg->front.iov_base + msg->front.iov_len;
1013 item->ino = cpu_to_le64(ceph_ino(inode));
1014 item->cap_id = cpu_to_le64(cap->cap_id);
1015 item->migrate_seq = cpu_to_le32(cap->mseq);
1016 item->seq = cpu_to_le32(cap->issue_seq);
1018 session->s_num_cap_releases--;
1020 msg->front.iov_len += sizeof(*item);
1021 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
1022 dout(" release msg %p full\n", msg);
1023 list_move_tail(&msg->list_head,
1024 &session->s_cap_releases_done);
1026 dout(" release msg %p at %d/%d (%d)\n", msg,
1027 (int)le32_to_cpu(head->num),
1028 (int)CEPH_CAPS_PER_RELEASE,
1029 (int)msg->front.iov_len);
1031 spin_unlock(&session->s_cap_lock);
1033 __ceph_remove_cap(cap);
1038 * Send a cap msg on the given inode. Update our caps state, then
1039 * drop i_lock and send the message.
1041 * Make note of max_size reported/requested from mds, revoked caps
1042 * that have now been implemented.
1044 * Make half-hearted attempt ot to invalidate page cache if we are
1045 * dropping RDCACHE. Note that this will leave behind locked pages
1046 * that we'll then need to deal with elsewhere.
1048 * Return non-zero if delayed release, or we experienced an error
1049 * such that the caller should requeue + retry later.
1051 * called with i_lock, then drops it.
1052 * caller should hold snap_rwsem (read), s_mutex.
1054 static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1055 int op, int used, int want, int retain, int flushing,
1056 unsigned *pflush_tid)
1057 __releases(cap->ci->vfs_inode->i_lock)
1059 struct ceph_inode_info *ci = cap->ci;
1060 struct inode *inode = &ci->vfs_inode;
1061 u64 cap_id = cap->cap_id;
1062 int held, revoking, dropping, keep;
1063 u64 seq, issue_seq, mseq, time_warp_seq, follows;
1065 struct timespec mtime, atime;
1070 struct ceph_mds_session *session;
1071 u64 xattr_version = 0;
1077 held = cap->issued | cap->implemented;
1078 revoking = cap->implemented & ~cap->issued;
1079 retain &= ~revoking;
1080 dropping = cap->issued & ~retain;
1082 dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
1083 inode, cap, cap->session,
1084 ceph_cap_string(held), ceph_cap_string(held & retain),
1085 ceph_cap_string(revoking));
1086 BUG_ON((retain & CEPH_CAP_PIN) == 0);
1088 session = cap->session;
1090 /* don't release wanted unless we've waited a bit. */
1091 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1092 time_before(jiffies, ci->i_hold_caps_min)) {
1093 dout(" delaying issued %s -> %s, wanted %s -> %s on send\n",
1094 ceph_cap_string(cap->issued),
1095 ceph_cap_string(cap->issued & retain),
1096 ceph_cap_string(cap->mds_wanted),
1097 ceph_cap_string(want));
1098 want |= cap->mds_wanted;
1099 retain |= cap->issued;
1102 ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH);
1104 cap->issued &= retain; /* drop bits we don't want */
1105 if (cap->implemented & ~cap->issued) {
1107 * Wake up any waiters on wanted -> needed transition.
1108 * This is due to the weird transition from buffered
1109 * to sync IO... we need to flush dirty pages _before_
1110 * allowing sync writes to avoid reordering.
1114 cap->implemented &= cap->issued | used;
1115 cap->mds_wanted = want;
1119 * assign a tid for flush operations so we can avoid
1120 * flush1 -> dirty1 -> flush2 -> flushack1 -> mark
1121 * clean type races. track latest tid for every bit
1122 * so we can handle flush AxFw, flush Fw, and have the
1123 * first ack clean Ax.
1125 flush_tid = ++ci->i_cap_flush_last_tid;
1127 *pflush_tid = flush_tid;
1128 dout(" cap_flush_tid %d\n", (int)flush_tid);
1129 for (i = 0; i < CEPH_CAP_BITS; i++)
1130 if (flushing & (1 << i))
1131 ci->i_cap_flush_tid[i] = flush_tid;
1134 keep = cap->implemented;
1136 issue_seq = cap->issue_seq;
1138 size = inode->i_size;
1139 ci->i_reported_size = size;
1140 max_size = ci->i_wanted_max_size;
1141 ci->i_requested_max_size = max_size;
1142 mtime = inode->i_mtime;
1143 atime = inode->i_atime;
1144 time_warp_seq = ci->i_time_warp_seq;
1145 follows = ci->i_snap_realm->cached_context->seq;
1148 mode = inode->i_mode;
1150 if (dropping & CEPH_CAP_XATTR_EXCL) {
1151 __ceph_build_xattrs_blob(ci);
1152 xattr_version = ci->i_xattrs.version + 1;
1155 spin_unlock(&inode->i_lock);
1157 ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
1158 op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
1159 size, max_size, &mtime, &atime, time_warp_seq,
1162 (flushing & CEPH_CAP_XATTR_EXCL) ? ci->i_xattrs.blob : NULL,
1165 dout("error sending cap msg, must requeue %p\n", inode);
1170 wake_up(&ci->i_cap_wq);
1176 * When a snapshot is taken, clients accumulate dirty metadata on
1177 * inodes with capabilities in ceph_cap_snaps to describe the file
1178 * state at the time the snapshot was taken. This must be flushed
1179 * asynchronously back to the MDS once sync writes complete and dirty
1180 * data is written out.
1182 * Called under i_lock. Takes s_mutex as needed.
1184 void __ceph_flush_snaps(struct ceph_inode_info *ci,
1185 struct ceph_mds_session **psession)
1187 struct inode *inode = &ci->vfs_inode;
1189 struct ceph_cap_snap *capsnap;
1191 struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
1192 struct ceph_mds_session *session = NULL; /* if session != NULL, we hold
1194 u64 next_follows = 0; /* keep track of how far we've gotten through the
1195 i_cap_snaps list, and skip these entries next time
1196 around to avoid an infinite loop */
1199 session = *psession;
1201 dout("__flush_snaps %p\n", inode);
1203 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
1204 /* avoid an infiniute loop after retry */
1205 if (capsnap->follows < next_follows)
1208 * we need to wait for sync writes to complete and for dirty
1209 * pages to be written out.
1211 if (capsnap->dirty_pages || capsnap->writing)
1215 * if cap writeback already occurred, we should have dropped
1216 * the capsnap in ceph_put_wrbuffer_cap_refs.
1218 BUG_ON(capsnap->dirty == 0);
1220 /* pick mds, take s_mutex */
1221 mds = __ceph_get_cap_mds(ci, &mseq);
1222 if (session && session->s_mds != mds) {
1223 dout("oops, wrong session %p mutex\n", session);
1224 mutex_unlock(&session->s_mutex);
1225 ceph_put_mds_session(session);
1229 spin_unlock(&inode->i_lock);
1230 mutex_lock(&mdsc->mutex);
1231 session = __ceph_lookup_mds_session(mdsc, mds);
1232 mutex_unlock(&mdsc->mutex);
1234 dout("inverting session/ino locks on %p\n",
1236 mutex_lock(&session->s_mutex);
1239 * if session == NULL, we raced against a cap
1240 * deletion. retry, and we'll get a better
1241 * @mds value next time.
1243 spin_lock(&inode->i_lock);
1247 capsnap->flush_tid = ++ci->i_cap_flush_last_tid;
1248 atomic_inc(&capsnap->nref);
1249 if (!list_empty(&capsnap->flushing_item))
1250 list_del_init(&capsnap->flushing_item);
1251 list_add_tail(&capsnap->flushing_item,
1252 &session->s_cap_snaps_flushing);
1253 spin_unlock(&inode->i_lock);
1255 dout("flush_snaps %p cap_snap %p follows %lld size %llu\n",
1256 inode, capsnap, next_follows, capsnap->size);
1257 send_cap_msg(session, ceph_vino(inode).ino, 0,
1258 CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
1259 capsnap->dirty, 0, capsnap->flush_tid, 0, mseq,
1261 &capsnap->mtime, &capsnap->atime,
1262 capsnap->time_warp_seq,
1263 capsnap->uid, capsnap->gid, capsnap->mode,
1267 next_follows = capsnap->follows + 1;
1268 ceph_put_cap_snap(capsnap);
1270 spin_lock(&inode->i_lock);
1274 /* we flushed them all; remove this inode from the queue */
1275 spin_lock(&mdsc->snap_flush_lock);
1276 list_del_init(&ci->i_snap_flush_item);
1277 spin_unlock(&mdsc->snap_flush_lock);
1280 *psession = session;
1282 mutex_unlock(&session->s_mutex);
1283 ceph_put_mds_session(session);
1287 static void ceph_flush_snaps(struct ceph_inode_info *ci)
1289 struct inode *inode = &ci->vfs_inode;
1291 spin_lock(&inode->i_lock);
1292 __ceph_flush_snaps(ci, NULL);
1293 spin_unlock(&inode->i_lock);
1297 * Mark caps dirty. If inode is newly dirty, add to the global dirty
1300 void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
1302 struct ceph_mds_client *mdsc =
1303 &ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
1304 struct inode *inode = &ci->vfs_inode;
1305 int was = ci->i_dirty_caps;
1308 dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
1309 ceph_cap_string(mask), ceph_cap_string(was),
1310 ceph_cap_string(was | mask));
1311 ci->i_dirty_caps |= mask;
1313 dout(" inode %p now dirty\n", &ci->vfs_inode);
1314 BUG_ON(!list_empty(&ci->i_dirty_item));
1315 spin_lock(&mdsc->cap_dirty_lock);
1316 list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
1317 spin_unlock(&mdsc->cap_dirty_lock);
1318 if (ci->i_flushing_caps == 0) {
1320 dirty |= I_DIRTY_SYNC;
1323 BUG_ON(list_empty(&ci->i_dirty_item));
1324 if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
1325 (mask & CEPH_CAP_FILE_BUFFER))
1326 dirty |= I_DIRTY_DATASYNC;
1328 __mark_inode_dirty(inode, dirty);
1329 __cap_delay_requeue(mdsc, ci);
1333 * Add dirty inode to the flushing list. Assigned a seq number so we
1334 * can wait for caps to flush without starving.
1336 * Called under i_lock.
1338 static int __mark_caps_flushing(struct inode *inode,
1339 struct ceph_mds_session *session)
1341 struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
1342 struct ceph_inode_info *ci = ceph_inode(inode);
1345 BUG_ON(ci->i_dirty_caps == 0);
1346 BUG_ON(list_empty(&ci->i_dirty_item));
1348 flushing = ci->i_dirty_caps;
1349 dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
1350 ceph_cap_string(flushing),
1351 ceph_cap_string(ci->i_flushing_caps),
1352 ceph_cap_string(ci->i_flushing_caps | flushing));
1353 ci->i_flushing_caps |= flushing;
1354 ci->i_dirty_caps = 0;
1355 dout(" inode %p now !dirty\n", inode);
1357 spin_lock(&mdsc->cap_dirty_lock);
1358 list_del_init(&ci->i_dirty_item);
1360 ci->i_cap_flush_seq = ++mdsc->cap_flush_seq;
1361 if (list_empty(&ci->i_flushing_item)) {
1362 list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1363 mdsc->num_cap_flushing++;
1364 dout(" inode %p now flushing seq %lld\n", inode,
1365 ci->i_cap_flush_seq);
1367 list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1368 dout(" inode %p now flushing (more) seq %lld\n", inode,
1369 ci->i_cap_flush_seq);
1371 spin_unlock(&mdsc->cap_dirty_lock);
1377 * try to invalidate mapping pages without blocking.
1379 static int mapping_is_empty(struct address_space *mapping)
1381 struct page *page = find_get_page(mapping, 0);
1390 static int try_nonblocking_invalidate(struct inode *inode)
1392 struct ceph_inode_info *ci = ceph_inode(inode);
1393 u32 invalidating_gen = ci->i_rdcache_gen;
1395 spin_unlock(&inode->i_lock);
1396 invalidate_mapping_pages(&inode->i_data, 0, -1);
1397 spin_lock(&inode->i_lock);
1399 if (mapping_is_empty(&inode->i_data) &&
1400 invalidating_gen == ci->i_rdcache_gen) {
1402 dout("try_nonblocking_invalidate %p success\n", inode);
1403 ci->i_rdcache_gen = 0;
1404 ci->i_rdcache_revoking = 0;
1407 dout("try_nonblocking_invalidate %p failed\n", inode);
1412 * Swiss army knife function to examine currently used and wanted
1413 * versus held caps. Release, flush, ack revoked caps to mds as
1416 * CHECK_CAPS_NODELAY - caller is delayed work and we should not delay
1417 * cap release further.
1418 * CHECK_CAPS_AUTHONLY - we should only check the auth cap
1419 * CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
1422 void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1423 struct ceph_mds_session *session)
1424 __releases(session->s_mutex)
1426 struct ceph_client *client = ceph_inode_to_client(&ci->vfs_inode);
1427 struct ceph_mds_client *mdsc = &client->mdsc;
1428 struct inode *inode = &ci->vfs_inode;
1429 struct ceph_cap *cap;
1430 int file_wanted, used;
1431 int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */
1432 int issued, implemented, want, retain, revoking, flushing = 0;
1433 int mds = -1; /* keep track of how far we've gone through i_caps list
1434 to avoid an infinite loop on retry */
1436 int tried_invalidate = 0;
1437 int delayed = 0, sent = 0, force_requeue = 0, num;
1438 int queue_invalidate = 0;
1439 int is_delayed = flags & CHECK_CAPS_NODELAY;
1441 /* if we are unmounting, flush any unused caps immediately. */
1445 spin_lock(&inode->i_lock);
1447 if (ci->i_ceph_flags & CEPH_I_FLUSH)
1448 flags |= CHECK_CAPS_FLUSH;
1450 /* flush snaps first time around only */
1451 if (!list_empty(&ci->i_cap_snaps))
1452 __ceph_flush_snaps(ci, &session);
1455 spin_lock(&inode->i_lock);
1457 file_wanted = __ceph_caps_file_wanted(ci);
1458 used = __ceph_caps_used(ci);
1459 want = file_wanted | used;
1460 issued = __ceph_caps_issued(ci, &implemented);
1461 revoking = implemented & ~issued;
1463 retain = want | CEPH_CAP_PIN;
1464 if (!mdsc->stopping && inode->i_nlink > 0) {
1466 retain |= CEPH_CAP_ANY; /* be greedy */
1468 retain |= CEPH_CAP_ANY_SHARED;
1470 * keep RD only if we didn't have the file open RW,
1471 * because then the mds would revoke it anyway to
1472 * journal max_size=0.
1474 if (ci->i_max_size == 0)
1475 retain |= CEPH_CAP_ANY_RD;
1479 dout("check_caps %p file_want %s used %s dirty %s flushing %s"
1480 " issued %s revoking %s retain %s %s%s%s\n", inode,
1481 ceph_cap_string(file_wanted),
1482 ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
1483 ceph_cap_string(ci->i_flushing_caps),
1484 ceph_cap_string(issued), ceph_cap_string(revoking),
1485 ceph_cap_string(retain),
1486 (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
1487 (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "",
1488 (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "");
1491 * If we no longer need to hold onto old our caps, and we may
1492 * have cached pages, but don't want them, then try to invalidate.
1493 * If we fail, it's because pages are locked.... try again later.
1495 if ((!is_delayed || mdsc->stopping) &&
1496 ci->i_wrbuffer_ref == 0 && /* no dirty pages... */
1497 ci->i_rdcache_gen && /* may have cached pages */
1498 (file_wanted == 0 || /* no open files */
1499 (revoking & CEPH_CAP_FILE_CACHE)) && /* or revoking cache */
1500 !tried_invalidate) {
1501 dout("check_caps trying to invalidate on %p\n", inode);
1502 if (try_nonblocking_invalidate(inode) < 0) {
1503 if (revoking & CEPH_CAP_FILE_CACHE) {
1504 dout("check_caps queuing invalidate\n");
1505 queue_invalidate = 1;
1506 ci->i_rdcache_revoking = ci->i_rdcache_gen;
1508 dout("check_caps failed to invalidate pages\n");
1509 /* we failed to invalidate pages. check these
1510 caps again later. */
1512 __cap_set_timeouts(mdsc, ci);
1515 tried_invalidate = 1;
1520 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
1521 cap = rb_entry(p, struct ceph_cap, ci_node);
1524 /* avoid looping forever */
1525 if (mds >= cap->mds ||
1526 ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap))
1529 /* NOTE: no side-effects allowed, until we take s_mutex */
1531 revoking = cap->implemented & ~cap->issued;
1533 dout(" mds%d revoking %s\n", cap->mds,
1534 ceph_cap_string(revoking));
1536 if (cap == ci->i_auth_cap &&
1537 (cap->issued & CEPH_CAP_FILE_WR)) {
1538 /* request larger max_size from MDS? */
1539 if (ci->i_wanted_max_size > ci->i_max_size &&
1540 ci->i_wanted_max_size > ci->i_requested_max_size) {
1541 dout("requesting new max_size\n");
1545 /* approaching file_max? */
1546 if ((inode->i_size << 1) >= ci->i_max_size &&
1547 (ci->i_reported_size << 1) < ci->i_max_size) {
1548 dout("i_size approaching max_size\n");
1552 /* flush anything dirty? */
1553 if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) &&
1555 dout("flushing dirty caps\n");
1559 /* completed revocation? going down and there are no caps? */
1560 if (revoking && (revoking & used) == 0) {
1561 dout("completed revocation of %s\n",
1562 ceph_cap_string(cap->implemented & ~cap->issued));
1566 /* want more caps from mds? */
1567 if (want & ~(cap->mds_wanted | cap->issued))
1570 /* things we might delay */
1571 if ((cap->issued & ~retain) == 0 &&
1572 cap->mds_wanted == want)
1573 continue; /* nope, all good */
1579 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1580 time_before(jiffies, ci->i_hold_caps_max)) {
1581 dout(" delaying issued %s -> %s, wanted %s -> %s\n",
1582 ceph_cap_string(cap->issued),
1583 ceph_cap_string(cap->issued & retain),
1584 ceph_cap_string(cap->mds_wanted),
1585 ceph_cap_string(want));
1591 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1592 dout(" skipping %p I_NOFLUSH set\n", inode);
1596 if (session && session != cap->session) {
1597 dout("oops, wrong session %p mutex\n", session);
1598 mutex_unlock(&session->s_mutex);
1602 session = cap->session;
1603 if (mutex_trylock(&session->s_mutex) == 0) {
1604 dout("inverting session/ino locks on %p\n",
1606 spin_unlock(&inode->i_lock);
1607 if (took_snap_rwsem) {
1608 up_read(&mdsc->snap_rwsem);
1609 took_snap_rwsem = 0;
1611 mutex_lock(&session->s_mutex);
1615 /* take snap_rwsem after session mutex */
1616 if (!took_snap_rwsem) {
1617 if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
1618 dout("inverting snap/in locks on %p\n",
1620 spin_unlock(&inode->i_lock);
1621 down_read(&mdsc->snap_rwsem);
1622 took_snap_rwsem = 1;
1625 took_snap_rwsem = 1;
1628 if (cap == ci->i_auth_cap && ci->i_dirty_caps)
1629 flushing = __mark_caps_flushing(inode, session);
1631 mds = cap->mds; /* remember mds, so we don't repeat */
1634 /* __send_cap drops i_lock */
1635 delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want,
1636 retain, flushing, NULL);
1637 goto retry; /* retake i_lock and restart our cap scan. */
1641 * Reschedule delayed caps release if we delayed anything,
1644 if (delayed && is_delayed)
1645 force_requeue = 1; /* __send_cap delayed release; requeue */
1646 if (!delayed && !is_delayed)
1647 __cap_delay_cancel(mdsc, ci);
1648 else if (!is_delayed || force_requeue)
1649 __cap_delay_requeue(mdsc, ci);
1651 spin_unlock(&inode->i_lock);
1653 if (queue_invalidate)
1654 ceph_queue_invalidate(inode);
1657 mutex_unlock(&session->s_mutex);
1658 if (took_snap_rwsem)
1659 up_read(&mdsc->snap_rwsem);
1663 * Try to flush dirty caps back to the auth mds.
1665 static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
1666 unsigned *flush_tid)
1668 struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
1669 struct ceph_inode_info *ci = ceph_inode(inode);
1670 int unlock_session = session ? 0 : 1;
1674 spin_lock(&inode->i_lock);
1675 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1676 dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
1679 if (ci->i_dirty_caps && ci->i_auth_cap) {
1680 struct ceph_cap *cap = ci->i_auth_cap;
1681 int used = __ceph_caps_used(ci);
1682 int want = __ceph_caps_wanted(ci);
1686 spin_unlock(&inode->i_lock);
1687 session = cap->session;
1688 mutex_lock(&session->s_mutex);
1691 BUG_ON(session != cap->session);
1692 if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
1695 flushing = __mark_caps_flushing(inode, session);
1697 /* __send_cap drops i_lock */
1698 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
1699 cap->issued | cap->implemented, flushing,
1704 spin_lock(&inode->i_lock);
1705 __cap_delay_requeue(mdsc, ci);
1708 spin_unlock(&inode->i_lock);
1710 if (session && unlock_session)
1711 mutex_unlock(&session->s_mutex);
1716 * Return true if we've flushed caps through the given flush_tid.
1718 static int caps_are_flushed(struct inode *inode, unsigned tid)
1720 struct ceph_inode_info *ci = ceph_inode(inode);
1721 int dirty, i, ret = 1;
1723 spin_lock(&inode->i_lock);
1724 dirty = __ceph_caps_dirty(ci);
1725 for (i = 0; i < CEPH_CAP_BITS; i++)
1726 if ((ci->i_flushing_caps & (1 << i)) &&
1727 ci->i_cap_flush_tid[i] <= tid) {
1728 /* still flushing this bit */
1732 spin_unlock(&inode->i_lock);
1737 * Wait on any unsafe replies for the given inode. First wait on the
1738 * newest request, and make that the upper bound. Then, if there are
1739 * more requests, keep waiting on the oldest as long as it is still older
1740 * than the original request.
1742 static void sync_write_wait(struct inode *inode)
1744 struct ceph_inode_info *ci = ceph_inode(inode);
1745 struct list_head *head = &ci->i_unsafe_writes;
1746 struct ceph_osd_request *req;
1749 spin_lock(&ci->i_unsafe_lock);
1750 if (list_empty(head))
1753 /* set upper bound as _last_ entry in chain */
1754 req = list_entry(head->prev, struct ceph_osd_request,
1756 last_tid = req->r_tid;
1759 ceph_osdc_get_request(req);
1760 spin_unlock(&ci->i_unsafe_lock);
1761 dout("sync_write_wait on tid %llu (until %llu)\n",
1762 req->r_tid, last_tid);
1763 wait_for_completion(&req->r_safe_completion);
1764 spin_lock(&ci->i_unsafe_lock);
1765 ceph_osdc_put_request(req);
1768 * from here on look at first entry in chain, since we
1769 * only want to wait for anything older than last_tid
1771 if (list_empty(head))
1773 req = list_entry(head->next, struct ceph_osd_request,
1775 } while (req->r_tid < last_tid);
1777 spin_unlock(&ci->i_unsafe_lock);
1780 int ceph_fsync(struct file *file, struct dentry *dentry, int datasync)
1782 struct inode *inode = dentry->d_inode;
1783 struct ceph_inode_info *ci = ceph_inode(inode);
1788 dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
1789 sync_write_wait(inode);
1791 ret = filemap_write_and_wait(inode->i_mapping);
1795 dirty = try_flush_caps(inode, NULL, &flush_tid);
1796 dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
1799 * only wait on non-file metadata writeback (the mds
1800 * can recover size and mtime, so we don't need to
1803 if (!datasync && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
1804 dout("fsync waiting for flush_tid %u\n", flush_tid);
1805 ret = wait_event_interruptible(ci->i_cap_wq,
1806 caps_are_flushed(inode, flush_tid));
1809 dout("fsync %p%s done\n", inode, datasync ? " datasync" : "");
1814 * Flush any dirty caps back to the mds. If we aren't asked to wait,
1815 * queue inode for flush but don't do so immediately, because we can
1816 * get by with fewer MDS messages if we wait for data writeback to
1819 int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
1821 struct ceph_inode_info *ci = ceph_inode(inode);
1825 int wait = wbc->sync_mode == WB_SYNC_ALL;
1827 dout("write_inode %p wait=%d\n", inode, wait);
1829 dirty = try_flush_caps(inode, NULL, &flush_tid);
1831 err = wait_event_interruptible(ci->i_cap_wq,
1832 caps_are_flushed(inode, flush_tid));
1834 struct ceph_mds_client *mdsc =
1835 &ceph_sb_to_client(inode->i_sb)->mdsc;
1837 spin_lock(&inode->i_lock);
1838 if (__ceph_caps_dirty(ci))
1839 __cap_delay_requeue_front(mdsc, ci);
1840 spin_unlock(&inode->i_lock);
1846 * After a recovering MDS goes active, we need to resend any caps
1849 * Caller holds session->s_mutex.
1851 static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
1852 struct ceph_mds_session *session)
1854 struct ceph_cap_snap *capsnap;
1856 dout("kick_flushing_capsnaps mds%d\n", session->s_mds);
1857 list_for_each_entry(capsnap, &session->s_cap_snaps_flushing,
1859 struct ceph_inode_info *ci = capsnap->ci;
1860 struct inode *inode = &ci->vfs_inode;
1861 struct ceph_cap *cap;
1863 spin_lock(&inode->i_lock);
1864 cap = ci->i_auth_cap;
1865 if (cap && cap->session == session) {
1866 dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
1868 __ceph_flush_snaps(ci, &session);
1870 pr_err("%p auth cap %p not mds%d ???\n", inode,
1871 cap, session->s_mds);
1873 spin_unlock(&inode->i_lock);
1877 void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
1878 struct ceph_mds_session *session)
1880 struct ceph_inode_info *ci;
1882 kick_flushing_capsnaps(mdsc, session);
1884 dout("kick_flushing_caps mds%d\n", session->s_mds);
1885 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
1886 struct inode *inode = &ci->vfs_inode;
1887 struct ceph_cap *cap;
1890 spin_lock(&inode->i_lock);
1891 cap = ci->i_auth_cap;
1892 if (cap && cap->session == session) {
1893 dout("kick_flushing_caps %p cap %p %s\n", inode,
1894 cap, ceph_cap_string(ci->i_flushing_caps));
1895 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
1896 __ceph_caps_used(ci),
1897 __ceph_caps_wanted(ci),
1898 cap->issued | cap->implemented,
1899 ci->i_flushing_caps, NULL);
1901 spin_lock(&inode->i_lock);
1902 __cap_delay_requeue(mdsc, ci);
1903 spin_unlock(&inode->i_lock);
1906 pr_err("%p auth cap %p not mds%d ???\n", inode,
1907 cap, session->s_mds);
1908 spin_unlock(&inode->i_lock);
1915 * Take references to capabilities we hold, so that we don't release
1916 * them to the MDS prematurely.
1918 * Protected by i_lock.
1920 static void __take_cap_refs(struct ceph_inode_info *ci, int got)
1922 if (got & CEPH_CAP_PIN)
1924 if (got & CEPH_CAP_FILE_RD)
1926 if (got & CEPH_CAP_FILE_CACHE)
1927 ci->i_rdcache_ref++;
1928 if (got & CEPH_CAP_FILE_WR)
1930 if (got & CEPH_CAP_FILE_BUFFER) {
1931 if (ci->i_wrbuffer_ref == 0)
1932 igrab(&ci->vfs_inode);
1933 ci->i_wrbuffer_ref++;
1934 dout("__take_cap_refs %p wrbuffer %d -> %d (?)\n",
1935 &ci->vfs_inode, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref);
1940 * Try to grab cap references. Specify those refs we @want, and the
1941 * minimal set we @need. Also include the larger offset we are writing
1942 * to (when applicable), and check against max_size here as well.
1943 * Note that caller is responsible for ensuring max_size increases are
1944 * requested from the MDS.
1946 static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
1947 int *got, loff_t endoff, int *check_max, int *err)
1949 struct inode *inode = &ci->vfs_inode;
1951 int have, implemented;
1954 dout("get_cap_refs %p need %s want %s\n", inode,
1955 ceph_cap_string(need), ceph_cap_string(want));
1956 spin_lock(&inode->i_lock);
1958 /* make sure file is actually open */
1959 file_wanted = __ceph_caps_file_wanted(ci);
1960 if ((file_wanted & need) == 0) {
1961 dout("try_get_cap_refs need %s file_wanted %s, EBADF\n",
1962 ceph_cap_string(need), ceph_cap_string(file_wanted));
1968 if (need & CEPH_CAP_FILE_WR) {
1969 if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
1970 dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
1971 inode, endoff, ci->i_max_size);
1972 if (endoff > ci->i_wanted_max_size) {
1979 * If a sync write is in progress, we must wait, so that we
1980 * can get a final snapshot value for size+mtime.
1982 if (__ceph_have_pending_cap_snap(ci)) {
1983 dout("get_cap_refs %p cap_snap_pending\n", inode);
1987 have = __ceph_caps_issued(ci, &implemented);
1990 * disallow writes while a truncate is pending
1992 if (ci->i_truncate_pending)
1993 have &= ~CEPH_CAP_FILE_WR;
1995 if ((have & need) == need) {
1997 * Look at (implemented & ~have & not) so that we keep waiting
1998 * on transition from wanted -> needed caps. This is needed
1999 * for WRBUFFER|WR -> WR to avoid a new WR sync write from
2000 * going before a prior buffered writeback happens.
2002 int not = want & ~(have & need);
2003 int revoking = implemented & ~have;
2004 dout("get_cap_refs %p have %s but not %s (revoking %s)\n",
2005 inode, ceph_cap_string(have), ceph_cap_string(not),
2006 ceph_cap_string(revoking));
2007 if ((revoking & not) == 0) {
2008 *got = need | (have & want);
2009 __take_cap_refs(ci, *got);
2013 dout("get_cap_refs %p have %s needed %s\n", inode,
2014 ceph_cap_string(have), ceph_cap_string(need));
2017 spin_unlock(&inode->i_lock);
2018 dout("get_cap_refs %p ret %d got %s\n", inode,
2019 ret, ceph_cap_string(*got));
2024 * Check the offset we are writing up to against our current
2025 * max_size. If necessary, tell the MDS we want to write to
2028 static void check_max_size(struct inode *inode, loff_t endoff)
2030 struct ceph_inode_info *ci = ceph_inode(inode);
2033 /* do we need to explicitly request a larger max_size? */
2034 spin_lock(&inode->i_lock);
2035 if ((endoff >= ci->i_max_size ||
2036 endoff > (inode->i_size << 1)) &&
2037 endoff > ci->i_wanted_max_size) {
2038 dout("write %p at large endoff %llu, req max_size\n",
2040 ci->i_wanted_max_size = endoff;
2043 spin_unlock(&inode->i_lock);
2045 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2049 * Wait for caps, and take cap references. If we can't get a WR cap
2050 * due to a small max_size, make sure we check_max_size (and possibly
2051 * ask the mds) so we don't get hung up indefinitely.
2053 int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, int *got,
2056 int check_max, ret, err;
2060 check_max_size(&ci->vfs_inode, endoff);
2063 ret = wait_event_interruptible(ci->i_cap_wq,
2064 try_get_cap_refs(ci, need, want,
2075 * Take cap refs. Caller must already know we hold at least one ref
2076 * on the caps in question or we don't know this is safe.
2078 void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
2080 spin_lock(&ci->vfs_inode.i_lock);
2081 __take_cap_refs(ci, caps);
2082 spin_unlock(&ci->vfs_inode.i_lock);
2088 * If we released the last ref on any given cap, call ceph_check_caps
2089 * to release (or schedule a release).
2091 * If we are releasing a WR cap (from a sync write), finalize any affected
2092 * cap_snap, and wake up any waiters.
2094 void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2096 struct inode *inode = &ci->vfs_inode;
2097 int last = 0, put = 0, flushsnaps = 0, wake = 0;
2098 struct ceph_cap_snap *capsnap;
2100 spin_lock(&inode->i_lock);
2101 if (had & CEPH_CAP_PIN)
2103 if (had & CEPH_CAP_FILE_RD)
2104 if (--ci->i_rd_ref == 0)
2106 if (had & CEPH_CAP_FILE_CACHE)
2107 if (--ci->i_rdcache_ref == 0)
2109 if (had & CEPH_CAP_FILE_BUFFER) {
2110 if (--ci->i_wrbuffer_ref == 0) {
2114 dout("put_cap_refs %p wrbuffer %d -> %d (?)\n",
2115 inode, ci->i_wrbuffer_ref+1, ci->i_wrbuffer_ref);
2117 if (had & CEPH_CAP_FILE_WR)
2118 if (--ci->i_wr_ref == 0) {
2120 if (!list_empty(&ci->i_cap_snaps)) {
2121 capsnap = list_first_entry(&ci->i_cap_snaps,
2122 struct ceph_cap_snap,
2124 if (capsnap->writing) {
2125 capsnap->writing = 0;
2127 __ceph_finish_cap_snap(ci,
2133 spin_unlock(&inode->i_lock);
2135 dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
2136 last ? " last" : "", put ? " put" : "");
2138 if (last && !flushsnaps)
2139 ceph_check_caps(ci, 0, NULL);
2140 else if (flushsnaps)
2141 ceph_flush_snaps(ci);
2143 wake_up(&ci->i_cap_wq);
2149 * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
2150 * context. Adjust per-snap dirty page accounting as appropriate.
2151 * Once all dirty data for a cap_snap is flushed, flush snapped file
2152 * metadata back to the MDS. If we dropped the last ref, call
2155 void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2156 struct ceph_snap_context *snapc)
2158 struct inode *inode = &ci->vfs_inode;
2160 int complete_capsnap = 0;
2161 int drop_capsnap = 0;
2163 struct ceph_cap_snap *capsnap = NULL;
2165 spin_lock(&inode->i_lock);
2166 ci->i_wrbuffer_ref -= nr;
2167 last = !ci->i_wrbuffer_ref;
2169 if (ci->i_head_snapc == snapc) {
2170 ci->i_wrbuffer_ref_head -= nr;
2171 if (!ci->i_wrbuffer_ref_head) {
2172 ceph_put_snap_context(ci->i_head_snapc);
2173 ci->i_head_snapc = NULL;
2175 dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
2177 ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
2178 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
2179 last ? " LAST" : "");
2181 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2182 if (capsnap->context == snapc) {
2188 capsnap->dirty_pages -= nr;
2189 if (capsnap->dirty_pages == 0) {
2190 complete_capsnap = 1;
2191 if (capsnap->dirty == 0)
2192 /* cap writeback completed before we created
2193 * the cap_snap; no FLUSHSNAP is needed */
2196 dout("put_wrbuffer_cap_refs on %p cap_snap %p "
2197 " snap %lld %d/%d -> %d/%d %s%s%s\n",
2198 inode, capsnap, capsnap->context->seq,
2199 ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
2200 ci->i_wrbuffer_ref, capsnap->dirty_pages,
2201 last ? " (wrbuffer last)" : "",
2202 complete_capsnap ? " (complete capsnap)" : "",
2203 drop_capsnap ? " (drop capsnap)" : "");
2205 ceph_put_snap_context(capsnap->context);
2206 list_del(&capsnap->ci_item);
2207 list_del(&capsnap->flushing_item);
2208 ceph_put_cap_snap(capsnap);
2212 spin_unlock(&inode->i_lock);
2215 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2217 } else if (complete_capsnap) {
2218 ceph_flush_snaps(ci);
2219 wake_up(&ci->i_cap_wq);
2226 * Handle a cap GRANT message from the MDS. (Note that a GRANT may
2227 * actually be a revocation if it specifies a smaller cap set.)
2229 * caller holds s_mutex and i_lock, we drop both.
2233 * 1 - check_caps on auth cap only (writeback)
2234 * 2 - check_caps (ack revoke)
2236 static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2237 struct ceph_mds_session *session,
2238 struct ceph_cap *cap,
2239 struct ceph_buffer *xattr_buf)
2240 __releases(inode->i_lock)
2241 __releases(session->s_mutex)
2243 struct ceph_inode_info *ci = ceph_inode(inode);
2244 int mds = session->s_mds;
2245 int seq = le32_to_cpu(grant->seq);
2246 int newcaps = le32_to_cpu(grant->caps);
2247 int issued, implemented, used, wanted, dirty;
2248 u64 size = le64_to_cpu(grant->size);
2249 u64 max_size = le64_to_cpu(grant->max_size);
2250 struct timespec mtime, atime, ctime;
2254 int revoked_rdcache = 0;
2255 int queue_invalidate = 0;
2257 dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
2258 inode, cap, mds, seq, ceph_cap_string(newcaps));
2259 dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
2263 * If CACHE is being revoked, and we have no dirty buffers,
2264 * try to invalidate (once). (If there are dirty buffers, we
2265 * will invalidate _after_ writeback.)
2267 if (((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
2268 !ci->i_wrbuffer_ref) {
2269 if (try_nonblocking_invalidate(inode) == 0) {
2270 revoked_rdcache = 1;
2272 /* there were locked pages.. invalidate later
2273 in a separate thread. */
2274 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
2275 queue_invalidate = 1;
2276 ci->i_rdcache_revoking = ci->i_rdcache_gen;
2281 /* side effects now are allowed */
2283 issued = __ceph_caps_issued(ci, &implemented);
2284 issued |= implemented | __ceph_caps_dirty(ci);
2286 cap->cap_gen = session->s_cap_gen;
2288 __check_cap_issue(ci, cap, newcaps);
2290 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
2291 inode->i_mode = le32_to_cpu(grant->mode);
2292 inode->i_uid = le32_to_cpu(grant->uid);
2293 inode->i_gid = le32_to_cpu(grant->gid);
2294 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
2295 inode->i_uid, inode->i_gid);
2298 if ((issued & CEPH_CAP_LINK_EXCL) == 0)
2299 inode->i_nlink = le32_to_cpu(grant->nlink);
2301 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) {
2302 int len = le32_to_cpu(grant->xattr_len);
2303 u64 version = le64_to_cpu(grant->xattr_version);
2305 if (version > ci->i_xattrs.version) {
2306 dout(" got new xattrs v%llu on %p len %d\n",
2307 version, inode, len);
2308 if (ci->i_xattrs.blob)
2309 ceph_buffer_put(ci->i_xattrs.blob);
2310 ci->i_xattrs.blob = ceph_buffer_get(xattr_buf);
2311 ci->i_xattrs.version = version;
2315 /* size/ctime/mtime/atime? */
2316 ceph_fill_file_size(inode, issued,
2317 le32_to_cpu(grant->truncate_seq),
2318 le64_to_cpu(grant->truncate_size), size);
2319 ceph_decode_timespec(&mtime, &grant->mtime);
2320 ceph_decode_timespec(&atime, &grant->atime);
2321 ceph_decode_timespec(&ctime, &grant->ctime);
2322 ceph_fill_file_time(inode, issued,
2323 le32_to_cpu(grant->time_warp_seq), &ctime, &mtime,
2326 /* max size increase? */
2327 if (max_size != ci->i_max_size) {
2328 dout("max_size %lld -> %llu\n", ci->i_max_size, max_size);
2329 ci->i_max_size = max_size;
2330 if (max_size >= ci->i_wanted_max_size) {
2331 ci->i_wanted_max_size = 0; /* reset */
2332 ci->i_requested_max_size = 0;
2337 /* check cap bits */
2338 wanted = __ceph_caps_wanted(ci);
2339 used = __ceph_caps_used(ci);
2340 dirty = __ceph_caps_dirty(ci);
2341 dout(" my wanted = %s, used = %s, dirty %s\n",
2342 ceph_cap_string(wanted),
2343 ceph_cap_string(used),
2344 ceph_cap_string(dirty));
2345 if (wanted != le32_to_cpu(grant->wanted)) {
2346 dout("mds wanted %s -> %s\n",
2347 ceph_cap_string(le32_to_cpu(grant->wanted)),
2348 ceph_cap_string(wanted));
2349 grant->wanted = cpu_to_le32(wanted);
2354 /* file layout may have changed */
2355 ci->i_layout = grant->layout;
2357 /* revocation, grant, or no-op? */
2358 if (cap->issued & ~newcaps) {
2359 dout("revocation: %s -> %s\n", ceph_cap_string(cap->issued),
2360 ceph_cap_string(newcaps));
2361 if ((used & ~newcaps) & CEPH_CAP_FILE_BUFFER)
2362 writeback = 1; /* will delay ack */
2363 else if (dirty & ~newcaps)
2364 check_caps = 1; /* initiate writeback in check_caps */
2365 else if (((used & ~newcaps) & CEPH_CAP_FILE_CACHE) == 0 ||
2367 check_caps = 2; /* send revoke ack in check_caps */
2368 cap->issued = newcaps;
2369 cap->implemented |= newcaps;
2370 } else if (cap->issued == newcaps) {
2371 dout("caps unchanged: %s -> %s\n",
2372 ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
2374 dout("grant: %s -> %s\n", ceph_cap_string(cap->issued),
2375 ceph_cap_string(newcaps));
2376 cap->issued = newcaps;
2377 cap->implemented |= newcaps; /* add bits only, to
2378 * avoid stepping on a
2379 * pending revocation */
2382 BUG_ON(cap->issued & ~cap->implemented);
2384 spin_unlock(&inode->i_lock);
2387 * queue inode for writeback: we can't actually call
2388 * filemap_write_and_wait, etc. from message handler
2391 ceph_queue_writeback(inode);
2392 if (queue_invalidate)
2393 ceph_queue_invalidate(inode);
2395 wake_up(&ci->i_cap_wq);
2397 if (check_caps == 1)
2398 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
2400 else if (check_caps == 2)
2401 ceph_check_caps(ci, CHECK_CAPS_NODELAY, session);
2403 mutex_unlock(&session->s_mutex);
2407 * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the
2408 * MDS has been safely committed.
2410 static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
2411 struct ceph_mds_caps *m,
2412 struct ceph_mds_session *session,
2413 struct ceph_cap *cap)
2414 __releases(inode->i_lock)
2416 struct ceph_inode_info *ci = ceph_inode(inode);
2417 struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
2418 unsigned seq = le32_to_cpu(m->seq);
2419 int dirty = le32_to_cpu(m->dirty);
2424 for (i = 0; i < CEPH_CAP_BITS; i++)
2425 if ((dirty & (1 << i)) &&
2426 flush_tid == ci->i_cap_flush_tid[i])
2429 dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
2430 " flushing %s -> %s\n",
2431 inode, session->s_mds, seq, ceph_cap_string(dirty),
2432 ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps),
2433 ceph_cap_string(ci->i_flushing_caps & ~cleaned));
2435 if (ci->i_flushing_caps == (ci->i_flushing_caps & ~cleaned))
2438 ci->i_flushing_caps &= ~cleaned;
2440 spin_lock(&mdsc->cap_dirty_lock);
2441 if (ci->i_flushing_caps == 0) {
2442 list_del_init(&ci->i_flushing_item);
2443 if (!list_empty(&session->s_cap_flushing))
2444 dout(" mds%d still flushing cap on %p\n",
2446 &list_entry(session->s_cap_flushing.next,
2447 struct ceph_inode_info,
2448 i_flushing_item)->vfs_inode);
2449 mdsc->num_cap_flushing--;
2450 wake_up(&mdsc->cap_flushing_wq);
2451 dout(" inode %p now !flushing\n", inode);
2453 if (ci->i_dirty_caps == 0) {
2454 dout(" inode %p now clean\n", inode);
2455 BUG_ON(!list_empty(&ci->i_dirty_item));
2458 BUG_ON(list_empty(&ci->i_dirty_item));
2461 spin_unlock(&mdsc->cap_dirty_lock);
2462 wake_up(&ci->i_cap_wq);
2465 spin_unlock(&inode->i_lock);
2471 * Handle FLUSHSNAP_ACK. MDS has flushed snap data to disk and we can
2472 * throw away our cap_snap.
2474 * Caller hold s_mutex.
2476 static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
2477 struct ceph_mds_caps *m,
2478 struct ceph_mds_session *session)
2480 struct ceph_inode_info *ci = ceph_inode(inode);
2481 u64 follows = le64_to_cpu(m->snap_follows);
2482 struct ceph_cap_snap *capsnap;
2485 dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
2486 inode, ci, session->s_mds, follows);
2488 spin_lock(&inode->i_lock);
2489 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2490 if (capsnap->follows == follows) {
2491 if (capsnap->flush_tid != flush_tid) {
2492 dout(" cap_snap %p follows %lld tid %lld !="
2493 " %lld\n", capsnap, follows,
2494 flush_tid, capsnap->flush_tid);
2497 WARN_ON(capsnap->dirty_pages || capsnap->writing);
2498 dout(" removing %p cap_snap %p follows %lld\n",
2499 inode, capsnap, follows);
2500 ceph_put_snap_context(capsnap->context);
2501 list_del(&capsnap->ci_item);
2502 list_del(&capsnap->flushing_item);
2503 ceph_put_cap_snap(capsnap);
2507 dout(" skipping cap_snap %p follows %lld\n",
2508 capsnap, capsnap->follows);
2511 spin_unlock(&inode->i_lock);
2517 * Handle TRUNC from MDS, indicating file truncation.
2519 * caller hold s_mutex.
2521 static void handle_cap_trunc(struct inode *inode,
2522 struct ceph_mds_caps *trunc,
2523 struct ceph_mds_session *session)
2524 __releases(inode->i_lock)
2526 struct ceph_inode_info *ci = ceph_inode(inode);
2527 int mds = session->s_mds;
2528 int seq = le32_to_cpu(trunc->seq);
2529 u32 truncate_seq = le32_to_cpu(trunc->truncate_seq);
2530 u64 truncate_size = le64_to_cpu(trunc->truncate_size);
2531 u64 size = le64_to_cpu(trunc->size);
2532 int implemented = 0;
2533 int dirty = __ceph_caps_dirty(ci);
2534 int issued = __ceph_caps_issued(ceph_inode(inode), &implemented);
2535 int queue_trunc = 0;
2537 issued |= implemented | dirty;
2539 dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n",
2540 inode, mds, seq, truncate_size, truncate_seq);
2541 queue_trunc = ceph_fill_file_size(inode, issued,
2542 truncate_seq, truncate_size, size);
2543 spin_unlock(&inode->i_lock);
2546 ceph_queue_vmtruncate(inode);
2550 * Handle EXPORT from MDS. Cap is being migrated _from_ this mds to a
2551 * different one. If we are the most recent migration we've seen (as
2552 * indicated by mseq), make note of the migrating cap bits for the
2553 * duration (until we see the corresponding IMPORT).
2555 * caller holds s_mutex
2557 static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
2558 struct ceph_mds_session *session)
2560 struct ceph_inode_info *ci = ceph_inode(inode);
2561 int mds = session->s_mds;
2562 unsigned mseq = le32_to_cpu(ex->migrate_seq);
2563 struct ceph_cap *cap = NULL, *t;
2567 dout("handle_cap_export inode %p ci %p mds%d mseq %d\n",
2568 inode, ci, mds, mseq);
2570 spin_lock(&inode->i_lock);
2572 /* make sure we haven't seen a higher mseq */
2573 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
2574 t = rb_entry(p, struct ceph_cap, ci_node);
2575 if (ceph_seq_cmp(t->mseq, mseq) > 0) {
2576 dout(" higher mseq on cap from mds%d\n",
2580 if (t->session->s_mds == mds)
2587 ci->i_cap_exporting_mds = mds;
2588 ci->i_cap_exporting_mseq = mseq;
2589 ci->i_cap_exporting_issued = cap->issued;
2591 __ceph_remove_cap(cap);
2593 /* else, we already released it */
2595 spin_unlock(&inode->i_lock);
2599 * Handle cap IMPORT. If there are temp bits from an older EXPORT,
2602 * caller holds s_mutex.
2604 static void handle_cap_import(struct ceph_mds_client *mdsc,
2605 struct inode *inode, struct ceph_mds_caps *im,
2606 struct ceph_mds_session *session,
2607 void *snaptrace, int snaptrace_len)
2609 struct ceph_inode_info *ci = ceph_inode(inode);
2610 int mds = session->s_mds;
2611 unsigned issued = le32_to_cpu(im->caps);
2612 unsigned wanted = le32_to_cpu(im->wanted);
2613 unsigned seq = le32_to_cpu(im->seq);
2614 unsigned mseq = le32_to_cpu(im->migrate_seq);
2615 u64 realmino = le64_to_cpu(im->realm);
2616 u64 cap_id = le64_to_cpu(im->cap_id);
2618 if (ci->i_cap_exporting_mds >= 0 &&
2619 ceph_seq_cmp(ci->i_cap_exporting_mseq, mseq) < 0) {
2620 dout("handle_cap_import inode %p ci %p mds%d mseq %d"
2621 " - cleared exporting from mds%d\n",
2622 inode, ci, mds, mseq,
2623 ci->i_cap_exporting_mds);
2624 ci->i_cap_exporting_issued = 0;
2625 ci->i_cap_exporting_mseq = 0;
2626 ci->i_cap_exporting_mds = -1;
2628 dout("handle_cap_import inode %p ci %p mds%d mseq %d\n",
2629 inode, ci, mds, mseq);
2632 down_write(&mdsc->snap_rwsem);
2633 ceph_update_snap_trace(mdsc, snaptrace, snaptrace+snaptrace_len,
2635 downgrade_write(&mdsc->snap_rwsem);
2636 ceph_add_cap(inode, session, cap_id, -1,
2637 issued, wanted, seq, mseq, realmino, CEPH_CAP_FLAG_AUTH,
2638 NULL /* no caps context */);
2639 try_flush_caps(inode, session, NULL);
2640 up_read(&mdsc->snap_rwsem);
2644 * Handle a caps message from the MDS.
2646 * Identify the appropriate session, inode, and call the right handler
2647 * based on the cap op.
2649 void ceph_handle_caps(struct ceph_mds_session *session,
2650 struct ceph_msg *msg)
2652 struct ceph_mds_client *mdsc = session->s_mdsc;
2653 struct super_block *sb = mdsc->client->sb;
2654 struct inode *inode;
2655 struct ceph_cap *cap;
2656 struct ceph_mds_caps *h;
2657 int mds = session->s_mds;
2660 struct ceph_vino vino;
2666 dout("handle_caps from mds%d\n", mds);
2669 tid = le64_to_cpu(msg->hdr.tid);
2670 if (msg->front.iov_len < sizeof(*h))
2672 h = msg->front.iov_base;
2674 op = le32_to_cpu(h->op);
2675 vino.ino = le64_to_cpu(h->ino);
2676 vino.snap = CEPH_NOSNAP;
2677 cap_id = le64_to_cpu(h->cap_id);
2678 seq = le32_to_cpu(h->seq);
2679 size = le64_to_cpu(h->size);
2680 max_size = le64_to_cpu(h->max_size);
2682 mutex_lock(&session->s_mutex);
2684 dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
2688 inode = ceph_find_inode(sb, vino);
2689 dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
2692 dout(" i don't have ino %llx\n", vino.ino);
2696 /* these will work even if we don't have a cap yet */
2698 case CEPH_CAP_OP_FLUSHSNAP_ACK:
2699 handle_cap_flushsnap_ack(inode, tid, h, session);
2702 case CEPH_CAP_OP_EXPORT:
2703 handle_cap_export(inode, h, session);
2706 case CEPH_CAP_OP_IMPORT:
2707 handle_cap_import(mdsc, inode, h, session,
2708 snaptrace, le32_to_cpu(h->snap_trace_len));
2709 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_NODELAY,
2714 /* the rest require a cap */
2715 spin_lock(&inode->i_lock);
2716 cap = __get_cap_for_mds(ceph_inode(inode), mds);
2718 dout("no cap on %p ino %llx.%llx from mds%d, releasing\n",
2719 inode, ceph_ino(inode), ceph_snap(inode), mds);
2720 spin_unlock(&inode->i_lock);
2724 /* note that each of these drops i_lock for us */
2726 case CEPH_CAP_OP_REVOKE:
2727 case CEPH_CAP_OP_GRANT:
2728 handle_cap_grant(inode, h, session, cap, msg->middle);
2731 case CEPH_CAP_OP_FLUSH_ACK:
2732 handle_cap_flush_ack(inode, tid, h, session, cap);
2735 case CEPH_CAP_OP_TRUNC:
2736 handle_cap_trunc(inode, h, session);
2740 spin_unlock(&inode->i_lock);
2741 pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
2742 ceph_cap_op_name(op));
2746 mutex_unlock(&session->s_mutex);
2753 pr_err("ceph_handle_caps: corrupt message\n");
2759 * Delayed work handler to process end of delayed cap release LRU list.
2761 void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
2763 struct ceph_inode_info *ci;
2764 int flags = CHECK_CAPS_NODELAY;
2766 dout("check_delayed_caps\n");
2768 spin_lock(&mdsc->cap_delay_lock);
2769 if (list_empty(&mdsc->cap_delay_list))
2771 ci = list_first_entry(&mdsc->cap_delay_list,
2772 struct ceph_inode_info,
2774 if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
2775 time_before(jiffies, ci->i_hold_caps_max))
2777 list_del_init(&ci->i_cap_delay_list);
2778 spin_unlock(&mdsc->cap_delay_lock);
2779 dout("check_delayed_caps on %p\n", &ci->vfs_inode);
2780 ceph_check_caps(ci, flags, NULL);
2782 spin_unlock(&mdsc->cap_delay_lock);
2786 * Flush all dirty caps to the mds
2788 void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
2790 struct ceph_inode_info *ci, *nci = NULL;
2791 struct inode *inode, *ninode = NULL;
2792 struct list_head *p, *n;
2794 dout("flush_dirty_caps\n");
2795 spin_lock(&mdsc->cap_dirty_lock);
2796 list_for_each_safe(p, n, &mdsc->cap_dirty) {
2800 ci->i_ceph_flags &= ~CEPH_I_NOFLUSH;
2801 dout("flush_dirty_caps inode %p (was next inode)\n",
2804 ci = list_entry(p, struct ceph_inode_info,
2806 inode = igrab(&ci->vfs_inode);
2808 dout("flush_dirty_caps inode %p\n", inode);
2810 if (n != &mdsc->cap_dirty) {
2811 nci = list_entry(n, struct ceph_inode_info,
2813 ninode = igrab(&nci->vfs_inode);
2815 nci->i_ceph_flags |= CEPH_I_NOFLUSH;
2816 dout("flush_dirty_caps next inode %p, noflush\n",
2822 spin_unlock(&mdsc->cap_dirty_lock);
2824 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH,
2828 spin_lock(&mdsc->cap_dirty_lock);
2830 spin_unlock(&mdsc->cap_dirty_lock);
2834 * Drop open file reference. If we were the last open file,
2835 * we may need to release capabilities to the MDS (or schedule
2836 * their delayed release).
2838 void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
2840 struct inode *inode = &ci->vfs_inode;
2843 spin_lock(&inode->i_lock);
2844 dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
2845 ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
2846 BUG_ON(ci->i_nr_by_mode[fmode] == 0);
2847 if (--ci->i_nr_by_mode[fmode] == 0)
2849 spin_unlock(&inode->i_lock);
2851 if (last && ci->i_vino.snap == CEPH_NOSNAP)
2852 ceph_check_caps(ci, 0, NULL);
2856 * Helpers for embedding cap and dentry lease releases into mds
2859 * @force is used by dentry_release (below) to force inclusion of a
2860 * record for the directory inode, even when there aren't any caps to
2863 int ceph_encode_inode_release(void **p, struct inode *inode,
2864 int mds, int drop, int unless, int force)
2866 struct ceph_inode_info *ci = ceph_inode(inode);
2867 struct ceph_cap *cap;
2868 struct ceph_mds_request_release *rel = *p;
2872 spin_lock(&inode->i_lock);
2873 used = __ceph_caps_used(ci);
2875 dout("encode_inode_release %p mds%d used %s drop %s unless %s\n", inode,
2876 mds, ceph_cap_string(used), ceph_cap_string(drop),
2877 ceph_cap_string(unless));
2879 /* only drop unused caps */
2882 cap = __get_cap_for_mds(ci, mds);
2883 if (cap && __cap_is_valid(cap)) {
2885 ((cap->issued & drop) &&
2886 (cap->issued & unless) == 0)) {
2887 if ((cap->issued & drop) &&
2888 (cap->issued & unless) == 0) {
2889 dout("encode_inode_release %p cap %p %s -> "
2891 ceph_cap_string(cap->issued),
2892 ceph_cap_string(cap->issued & ~drop));
2893 cap->issued &= ~drop;
2894 cap->implemented &= ~drop;
2895 if (ci->i_ceph_flags & CEPH_I_NODELAY) {
2896 int wanted = __ceph_caps_wanted(ci);
2897 dout(" wanted %s -> %s (act %s)\n",
2898 ceph_cap_string(cap->mds_wanted),
2899 ceph_cap_string(cap->mds_wanted &
2901 ceph_cap_string(wanted));
2902 cap->mds_wanted &= wanted;
2905 dout("encode_inode_release %p cap %p %s"
2906 " (force)\n", inode, cap,
2907 ceph_cap_string(cap->issued));
2910 rel->ino = cpu_to_le64(ceph_ino(inode));
2911 rel->cap_id = cpu_to_le64(cap->cap_id);
2912 rel->seq = cpu_to_le32(cap->seq);
2913 rel->issue_seq = cpu_to_le32(cap->issue_seq),
2914 rel->mseq = cpu_to_le32(cap->mseq);
2915 rel->caps = cpu_to_le32(cap->issued);
2916 rel->wanted = cpu_to_le32(cap->mds_wanted);
2922 dout("encode_inode_release %p cap %p %s\n",
2923 inode, cap, ceph_cap_string(cap->issued));
2926 spin_unlock(&inode->i_lock);
2930 int ceph_encode_dentry_release(void **p, struct dentry *dentry,
2931 int mds, int drop, int unless)
2933 struct inode *dir = dentry->d_parent->d_inode;
2934 struct ceph_mds_request_release *rel = *p;
2935 struct ceph_dentry_info *di = ceph_dentry(dentry);
2940 * force an record for the directory caps if we have a dentry lease.
2941 * this is racy (can't take i_lock and d_lock together), but it
2942 * doesn't have to be perfect; the mds will revoke anything we don't
2945 spin_lock(&dentry->d_lock);
2946 if (di->lease_session && di->lease_session->s_mds == mds)
2948 spin_unlock(&dentry->d_lock);
2950 ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
2952 spin_lock(&dentry->d_lock);
2953 if (ret && di->lease_session && di->lease_session->s_mds == mds) {
2954 dout("encode_dentry_release %p mds%d seq %d\n",
2955 dentry, mds, (int)di->lease_seq);
2956 rel->dname_len = cpu_to_le32(dentry->d_name.len);
2957 memcpy(*p, dentry->d_name.name, dentry->d_name.len);
2958 *p += dentry->d_name.len;
2959 rel->dname_seq = cpu_to_le32(di->lease_seq);
2961 spin_unlock(&dentry->d_lock);