1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Code which implements an OCFS2 specific interface to our DLM.
8 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
26 #include <linux/types.h>
27 #include <linux/slab.h>
28 #include <linux/highmem.h>
30 #include <linux/kthread.h>
31 #include <linux/pagemap.h>
32 #include <linux/debugfs.h>
33 #include <linux/seq_file.h>
34 #include <linux/time.h>
35 #include <linux/quotaops.h>
37 #define MLOG_MASK_PREFIX ML_DLM_GLUE
38 #include <cluster/masklog.h>
41 #include "ocfs2_lockingver.h"
46 #include "extent_map.h"
48 #include "heartbeat.h"
51 #include "stackglue.h"
56 #include "refcounttree.h"
58 #include "buffer_head_io.h"
60 struct ocfs2_mask_waiter {
61 struct list_head mw_item;
63 struct completion mw_complete;
64 unsigned long mw_mask;
65 unsigned long mw_goal;
66 #ifdef CONFIG_OCFS2_FS_STATS
67 unsigned long long mw_lock_start;
71 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres);
72 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres);
73 static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres);
74 static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres);
77 * Return value from ->downconvert_worker functions.
79 * These control the precise actions of ocfs2_unblock_lock()
80 * and ocfs2_process_blocked_lock()
83 enum ocfs2_unblock_action {
84 UNBLOCK_CONTINUE = 0, /* Continue downconvert */
85 UNBLOCK_CONTINUE_POST = 1, /* Continue downconvert, fire
86 * ->post_unlock callback */
87 UNBLOCK_STOP_POST = 2, /* Do not downconvert, fire
88 * ->post_unlock() callback. */
91 struct ocfs2_unblock_ctl {
93 enum ocfs2_unblock_action unblock_action;
96 /* Lockdep class keys */
97 struct lock_class_key lockdep_keys[OCFS2_NUM_LOCK_TYPES];
99 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
101 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres);
103 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
106 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
109 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
110 struct ocfs2_lock_res *lockres);
112 static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres);
114 static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
116 static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
119 #define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
121 /* This aids in debugging situations where a bad LVB might be involved. */
122 static void ocfs2_dump_meta_lvb_info(u64 level,
123 const char *function,
125 struct ocfs2_lock_res *lockres)
127 struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
129 mlog(level, "LVB information for %s (called from %s:%u):\n",
130 lockres->l_name, function, line);
131 mlog(level, "version: %u, clusters: %u, generation: 0x%x\n",
132 lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters),
133 be32_to_cpu(lvb->lvb_igeneration));
134 mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n",
135 (unsigned long long)be64_to_cpu(lvb->lvb_isize),
136 be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid),
137 be16_to_cpu(lvb->lvb_imode));
138 mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
139 "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink),
140 (long long)be64_to_cpu(lvb->lvb_iatime_packed),
141 (long long)be64_to_cpu(lvb->lvb_ictime_packed),
142 (long long)be64_to_cpu(lvb->lvb_imtime_packed),
143 be32_to_cpu(lvb->lvb_iattr));
148 * OCFS2 Lock Resource Operations
150 * These fine tune the behavior of the generic dlmglue locking infrastructure.
152 * The most basic of lock types can point ->l_priv to their respective
153 * struct ocfs2_super and allow the default actions to manage things.
155 * Right now, each lock type also needs to implement an init function,
156 * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres()
157 * should be called when the lock is no longer needed (i.e., object
160 struct ocfs2_lock_res_ops {
162 * Translate an ocfs2_lock_res * into an ocfs2_super *. Define
163 * this callback if ->l_priv is not an ocfs2_super pointer
165 struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *);
168 * Optionally called in the downconvert thread after a
169 * successful downconvert. The lockres will not be referenced
170 * after this callback is called, so it is safe to free
173 * The exact semantics of when this is called are controlled
174 * by ->downconvert_worker()
176 void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *);
179 * Allow a lock type to add checks to determine whether it is
180 * safe to downconvert a lock. Return 0 to re-queue the
181 * downconvert at a later time, nonzero to continue.
183 * For most locks, the default checks that there are no
184 * incompatible holders are sufficient.
186 * Called with the lockres spinlock held.
188 int (*check_downconvert)(struct ocfs2_lock_res *, int);
191 * Allows a lock type to populate the lock value block. This
192 * is called on downconvert, and when we drop a lock.
194 * Locks that want to use this should set LOCK_TYPE_USES_LVB
195 * in the flags field.
197 * Called with the lockres spinlock held.
199 void (*set_lvb)(struct ocfs2_lock_res *);
202 * Called from the downconvert thread when it is determined
203 * that a lock will be downconverted. This is called without
204 * any locks held so the function can do work that might
205 * schedule (syncing out data, etc).
207 * This should return any one of the ocfs2_unblock_action
208 * values, depending on what it wants the thread to do.
210 int (*downconvert_worker)(struct ocfs2_lock_res *, int);
213 * LOCK_TYPE_* flags which describe the specific requirements
214 * of a lock type. Descriptions of each individual flag follow.
220 * Some locks want to "refresh" potentially stale data when a
221 * meaningful (PRMODE or EXMODE) lock level is first obtained. If this
222 * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the
223 * individual lockres l_flags member from the ast function. It is
224 * expected that the locking wrapper will clear the
225 * OCFS2_LOCK_NEEDS_REFRESH flag when done.
227 #define LOCK_TYPE_REQUIRES_REFRESH 0x1
230 * Indicate that a lock type makes use of the lock value block. The
231 * ->set_lvb lock type callback must be defined.
233 #define LOCK_TYPE_USES_LVB 0x2
235 static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
236 .get_osb = ocfs2_get_inode_osb,
240 static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = {
241 .get_osb = ocfs2_get_inode_osb,
242 .check_downconvert = ocfs2_check_meta_downconvert,
243 .set_lvb = ocfs2_set_meta_lvb,
244 .downconvert_worker = ocfs2_data_convert_worker,
245 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
248 static struct ocfs2_lock_res_ops ocfs2_super_lops = {
249 .flags = LOCK_TYPE_REQUIRES_REFRESH,
252 static struct ocfs2_lock_res_ops ocfs2_rename_lops = {
256 static struct ocfs2_lock_res_ops ocfs2_nfs_sync_lops = {
260 static struct ocfs2_lock_res_ops ocfs2_orphan_scan_lops = {
261 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
264 static struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
265 .get_osb = ocfs2_get_dentry_osb,
266 .post_unlock = ocfs2_dentry_post_unlock,
267 .downconvert_worker = ocfs2_dentry_convert_worker,
271 static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = {
272 .get_osb = ocfs2_get_inode_osb,
276 static struct ocfs2_lock_res_ops ocfs2_flock_lops = {
277 .get_osb = ocfs2_get_file_osb,
281 static struct ocfs2_lock_res_ops ocfs2_qinfo_lops = {
282 .set_lvb = ocfs2_set_qinfo_lvb,
283 .get_osb = ocfs2_get_qinfo_osb,
284 .flags = LOCK_TYPE_REQUIRES_REFRESH | LOCK_TYPE_USES_LVB,
287 static struct ocfs2_lock_res_ops ocfs2_refcount_block_lops = {
288 .check_downconvert = ocfs2_check_refcount_downconvert,
289 .downconvert_worker = ocfs2_refcount_convert_worker,
293 static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
295 return lockres->l_type == OCFS2_LOCK_TYPE_META ||
296 lockres->l_type == OCFS2_LOCK_TYPE_RW ||
297 lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
300 static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
302 BUG_ON(!ocfs2_is_inode_lock(lockres));
304 return (struct inode *) lockres->l_priv;
307 static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres)
309 BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_DENTRY);
311 return (struct ocfs2_dentry_lock *)lockres->l_priv;
314 static inline struct ocfs2_mem_dqinfo *ocfs2_lock_res_qinfo(struct ocfs2_lock_res *lockres)
316 BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_QINFO);
318 return (struct ocfs2_mem_dqinfo *)lockres->l_priv;
321 static inline struct ocfs2_refcount_tree *
322 ocfs2_lock_res_refcount_tree(struct ocfs2_lock_res *res)
324 return container_of(res, struct ocfs2_refcount_tree, rf_lockres);
327 static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres)
329 if (lockres->l_ops->get_osb)
330 return lockres->l_ops->get_osb(lockres);
332 return (struct ocfs2_super *)lockres->l_priv;
335 static int ocfs2_lock_create(struct ocfs2_super *osb,
336 struct ocfs2_lock_res *lockres,
339 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
341 static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
342 struct ocfs2_lock_res *lockres,
343 int level, unsigned long caller_ip);
344 static inline void ocfs2_cluster_unlock(struct ocfs2_super *osb,
345 struct ocfs2_lock_res *lockres,
348 __ocfs2_cluster_unlock(osb, lockres, level, _RET_IP_);
351 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
352 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
353 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
354 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level);
355 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
356 struct ocfs2_lock_res *lockres);
357 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
359 #define ocfs2_log_dlm_error(_func, _err, _lockres) do { \
360 if ((_lockres)->l_type != OCFS2_LOCK_TYPE_DENTRY) \
361 mlog(ML_ERROR, "DLM error %d while calling %s on resource %s\n", \
362 _err, _func, _lockres->l_name); \
364 mlog(ML_ERROR, "DLM error %d while calling %s on resource %.*s%08x\n", \
365 _err, _func, OCFS2_DENTRY_LOCK_INO_START - 1, (_lockres)->l_name, \
366 (unsigned int)ocfs2_get_dentry_lock_ino(_lockres)); \
368 static int ocfs2_downconvert_thread(void *arg);
369 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
370 struct ocfs2_lock_res *lockres);
371 static int ocfs2_inode_lock_update(struct inode *inode,
372 struct buffer_head **bh);
373 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
374 static inline int ocfs2_highest_compat_lock_level(int level);
375 static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
377 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
378 struct ocfs2_lock_res *lockres,
381 unsigned int generation);
382 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
383 struct ocfs2_lock_res *lockres);
384 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
385 struct ocfs2_lock_res *lockres);
388 static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
397 BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
399 len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
400 ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD,
401 (long long)blkno, generation);
403 BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
405 mlog(0, "built lock resource with name: %s\n", name);
410 static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
412 static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
413 struct ocfs2_dlm_debug *dlm_debug)
415 mlog(0, "Add tracking for lockres %s\n", res->l_name);
417 spin_lock(&ocfs2_dlm_tracking_lock);
418 list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking);
419 spin_unlock(&ocfs2_dlm_tracking_lock);
422 static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
424 spin_lock(&ocfs2_dlm_tracking_lock);
425 if (!list_empty(&res->l_debug_list))
426 list_del_init(&res->l_debug_list);
427 spin_unlock(&ocfs2_dlm_tracking_lock);
430 #ifdef CONFIG_OCFS2_FS_STATS
431 static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
433 res->l_lock_num_prmode = 0;
434 res->l_lock_num_prmode_failed = 0;
435 res->l_lock_total_prmode = 0;
436 res->l_lock_max_prmode = 0;
437 res->l_lock_num_exmode = 0;
438 res->l_lock_num_exmode_failed = 0;
439 res->l_lock_total_exmode = 0;
440 res->l_lock_max_exmode = 0;
441 res->l_lock_refresh = 0;
444 static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
445 struct ocfs2_mask_waiter *mw, int ret)
447 unsigned long long *num, *sum;
448 unsigned int *max, *failed;
449 struct timespec ts = current_kernel_time();
450 unsigned long long time = timespec_to_ns(&ts) - mw->mw_lock_start;
452 if (level == LKM_PRMODE) {
453 num = &res->l_lock_num_prmode;
454 sum = &res->l_lock_total_prmode;
455 max = &res->l_lock_max_prmode;
456 failed = &res->l_lock_num_prmode_failed;
457 } else if (level == LKM_EXMODE) {
458 num = &res->l_lock_num_exmode;
459 sum = &res->l_lock_total_exmode;
460 max = &res->l_lock_max_exmode;
461 failed = &res->l_lock_num_exmode_failed;
473 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
475 lockres->l_lock_refresh++;
478 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
480 struct timespec ts = current_kernel_time();
481 mw->mw_lock_start = timespec_to_ns(&ts);
484 static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
487 static inline void ocfs2_update_lock_stats(struct ocfs2_lock_res *res,
488 int level, struct ocfs2_mask_waiter *mw, int ret)
491 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
494 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
499 static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
500 struct ocfs2_lock_res *res,
501 enum ocfs2_lock_type type,
502 struct ocfs2_lock_res_ops *ops,
509 res->l_level = DLM_LOCK_IV;
510 res->l_requested = DLM_LOCK_IV;
511 res->l_blocking = DLM_LOCK_IV;
512 res->l_action = OCFS2_AST_INVALID;
513 res->l_unlock_action = OCFS2_UNLOCK_INVALID;
515 res->l_flags = OCFS2_LOCK_INITIALIZED;
517 ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
519 ocfs2_init_lock_stats(res);
520 #ifdef CONFIG_DEBUG_LOCK_ALLOC
521 if (type != OCFS2_LOCK_TYPE_OPEN)
522 lockdep_init_map(&res->l_lockdep_map, ocfs2_lock_type_strings[type],
523 &lockdep_keys[type], 0);
525 res->l_lockdep_map.key = NULL;
529 void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
531 /* This also clears out the lock status block */
532 memset(res, 0, sizeof(struct ocfs2_lock_res));
533 spin_lock_init(&res->l_lock);
534 init_waitqueue_head(&res->l_event);
535 INIT_LIST_HEAD(&res->l_blocked_list);
536 INIT_LIST_HEAD(&res->l_mask_waiters);
539 void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
540 enum ocfs2_lock_type type,
541 unsigned int generation,
544 struct ocfs2_lock_res_ops *ops;
547 case OCFS2_LOCK_TYPE_RW:
548 ops = &ocfs2_inode_rw_lops;
550 case OCFS2_LOCK_TYPE_META:
551 ops = &ocfs2_inode_inode_lops;
553 case OCFS2_LOCK_TYPE_OPEN:
554 ops = &ocfs2_inode_open_lops;
557 mlog_bug_on_msg(1, "type: %d\n", type);
558 ops = NULL; /* thanks, gcc */
562 ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno,
563 generation, res->l_name);
564 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, ops, inode);
567 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres)
569 struct inode *inode = ocfs2_lock_res_inode(lockres);
571 return OCFS2_SB(inode->i_sb);
574 static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres)
576 struct ocfs2_mem_dqinfo *info = lockres->l_priv;
578 return OCFS2_SB(info->dqi_gi.dqi_sb);
581 static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres)
583 struct ocfs2_file_private *fp = lockres->l_priv;
585 return OCFS2_SB(fp->fp_file->f_mapping->host->i_sb);
588 static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres)
590 __be64 inode_blkno_be;
592 memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START],
595 return be64_to_cpu(inode_blkno_be);
598 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres)
600 struct ocfs2_dentry_lock *dl = lockres->l_priv;
602 return OCFS2_SB(dl->dl_inode->i_sb);
605 void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl,
606 u64 parent, struct inode *inode)
609 u64 inode_blkno = OCFS2_I(inode)->ip_blkno;
610 __be64 inode_blkno_be = cpu_to_be64(inode_blkno);
611 struct ocfs2_lock_res *lockres = &dl->dl_lockres;
613 ocfs2_lock_res_init_once(lockres);
616 * Unfortunately, the standard lock naming scheme won't work
617 * here because we have two 16 byte values to use. Instead,
618 * we'll stuff the inode number as a binary value. We still
619 * want error prints to show something without garbling the
620 * display, so drop a null byte in there before the inode
621 * number. A future version of OCFS2 will likely use all
622 * binary lock names. The stringified names have been a
623 * tremendous aid in debugging, but now that the debugfs
624 * interface exists, we can mangle things there if need be.
626 * NOTE: We also drop the standard "pad" value (the total lock
627 * name size stays the same though - the last part is all
628 * zeros due to the memset in ocfs2_lock_res_init_once()
630 len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START,
632 ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY),
635 BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1));
637 memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be,
640 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
641 OCFS2_LOCK_TYPE_DENTRY, &ocfs2_dentry_lops,
645 static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res,
646 struct ocfs2_super *osb)
648 /* Superblock lockres doesn't come from a slab so we call init
649 * once on it manually. */
650 ocfs2_lock_res_init_once(res);
651 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO,
653 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER,
654 &ocfs2_super_lops, osb);
657 static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res,
658 struct ocfs2_super *osb)
660 /* Rename lockres doesn't come from a slab so we call init
661 * once on it manually. */
662 ocfs2_lock_res_init_once(res);
663 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name);
664 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME,
665 &ocfs2_rename_lops, osb);
668 static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
669 struct ocfs2_super *osb)
671 /* nfs_sync lockres doesn't come from a slab so we call init
672 * once on it manually. */
673 ocfs2_lock_res_init_once(res);
674 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_NFS_SYNC, 0, 0, res->l_name);
675 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_NFS_SYNC,
676 &ocfs2_nfs_sync_lops, osb);
679 static void ocfs2_orphan_scan_lock_res_init(struct ocfs2_lock_res *res,
680 struct ocfs2_super *osb)
682 ocfs2_lock_res_init_once(res);
683 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_ORPHAN_SCAN, 0, 0, res->l_name);
684 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_ORPHAN_SCAN,
685 &ocfs2_orphan_scan_lops, osb);
688 void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres,
689 struct ocfs2_file_private *fp)
691 struct inode *inode = fp->fp_file->f_mapping->host;
692 struct ocfs2_inode_info *oi = OCFS2_I(inode);
694 ocfs2_lock_res_init_once(lockres);
695 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_FLOCK, oi->ip_blkno,
696 inode->i_generation, lockres->l_name);
697 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
698 OCFS2_LOCK_TYPE_FLOCK, &ocfs2_flock_lops,
700 lockres->l_flags |= OCFS2_LOCK_NOCACHE;
703 void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res *lockres,
704 struct ocfs2_mem_dqinfo *info)
706 ocfs2_lock_res_init_once(lockres);
707 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_QINFO, info->dqi_gi.dqi_type,
709 ocfs2_lock_res_init_common(OCFS2_SB(info->dqi_gi.dqi_sb), lockres,
710 OCFS2_LOCK_TYPE_QINFO, &ocfs2_qinfo_lops,
714 void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres,
715 struct ocfs2_super *osb, u64 ref_blkno,
716 unsigned int generation)
718 ocfs2_lock_res_init_once(lockres);
719 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_REFCOUNT, ref_blkno,
720 generation, lockres->l_name);
721 ocfs2_lock_res_init_common(osb, lockres, OCFS2_LOCK_TYPE_REFCOUNT,
722 &ocfs2_refcount_block_lops, osb);
725 void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
729 if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
732 ocfs2_remove_lockres_tracking(res);
734 mlog_bug_on_msg(!list_empty(&res->l_blocked_list),
735 "Lockres %s is on the blocked list\n",
737 mlog_bug_on_msg(!list_empty(&res->l_mask_waiters),
738 "Lockres %s has mask waiters pending\n",
740 mlog_bug_on_msg(spin_is_locked(&res->l_lock),
741 "Lockres %s is locked\n",
743 mlog_bug_on_msg(res->l_ro_holders,
744 "Lockres %s has %u ro holders\n",
745 res->l_name, res->l_ro_holders);
746 mlog_bug_on_msg(res->l_ex_holders,
747 "Lockres %s has %u ex holders\n",
748 res->l_name, res->l_ex_holders);
750 /* Need to clear out the lock status block for the dlm */
751 memset(&res->l_lksb, 0, sizeof(res->l_lksb));
757 static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
766 lockres->l_ex_holders++;
769 lockres->l_ro_holders++;
778 static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
787 BUG_ON(!lockres->l_ex_holders);
788 lockres->l_ex_holders--;
791 BUG_ON(!lockres->l_ro_holders);
792 lockres->l_ro_holders--;
800 /* WARNING: This function lives in a world where the only three lock
801 * levels are EX, PR, and NL. It *will* have to be adjusted when more
802 * lock types are added. */
803 static inline int ocfs2_highest_compat_lock_level(int level)
805 int new_level = DLM_LOCK_EX;
807 if (level == DLM_LOCK_EX)
808 new_level = DLM_LOCK_NL;
809 else if (level == DLM_LOCK_PR)
810 new_level = DLM_LOCK_PR;
814 static void lockres_set_flags(struct ocfs2_lock_res *lockres,
815 unsigned long newflags)
817 struct ocfs2_mask_waiter *mw, *tmp;
819 assert_spin_locked(&lockres->l_lock);
821 lockres->l_flags = newflags;
823 list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) {
824 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
827 list_del_init(&mw->mw_item);
829 complete(&mw->mw_complete);
832 static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
834 lockres_set_flags(lockres, lockres->l_flags | or);
836 static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
839 lockres_set_flags(lockres, lockres->l_flags & ~clear);
842 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
846 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
847 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
848 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
849 BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
851 lockres->l_level = lockres->l_requested;
852 if (lockres->l_level <=
853 ocfs2_highest_compat_lock_level(lockres->l_blocking)) {
854 lockres->l_blocking = DLM_LOCK_NL;
855 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
857 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
862 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
866 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
867 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
869 /* Convert from RO to EX doesn't really need anything as our
870 * information is already up to data. Convert from NL to
871 * *anything* however should mark ourselves as needing an
873 if (lockres->l_level == DLM_LOCK_NL &&
874 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
875 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
877 lockres->l_level = lockres->l_requested;
878 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
883 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
887 BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY)));
888 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
890 if (lockres->l_requested > DLM_LOCK_NL &&
891 !(lockres->l_flags & OCFS2_LOCK_LOCAL) &&
892 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
893 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
895 lockres->l_level = lockres->l_requested;
896 lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
897 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
902 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
905 int needs_downconvert = 0;
908 assert_spin_locked(&lockres->l_lock);
910 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
912 if (level > lockres->l_blocking) {
913 /* only schedule a downconvert if we haven't already scheduled
914 * one that goes low enough to satisfy the level we're
915 * blocking. this also catches the case where we get
917 if (ocfs2_highest_compat_lock_level(level) <
918 ocfs2_highest_compat_lock_level(lockres->l_blocking))
919 needs_downconvert = 1;
921 lockres->l_blocking = level;
924 mlog_exit(needs_downconvert);
925 return needs_downconvert;
929 * OCFS2_LOCK_PENDING and l_pending_gen.
931 * Why does OCFS2_LOCK_PENDING exist? To close a race between setting
932 * OCFS2_LOCK_BUSY and calling ocfs2_dlm_lock(). See ocfs2_unblock_lock()
933 * for more details on the race.
935 * OCFS2_LOCK_PENDING closes the race quite nicely. However, it introduces
936 * a race on itself. In o2dlm, we can get the ast before ocfs2_dlm_lock()
937 * returns. The ast clears OCFS2_LOCK_BUSY, and must therefore clear
938 * OCFS2_LOCK_PENDING at the same time. When ocfs2_dlm_lock() returns,
939 * the caller is going to try to clear PENDING again. If nothing else is
940 * happening, __lockres_clear_pending() sees PENDING is unset and does
943 * But what if another path (eg downconvert thread) has just started a
944 * new locking action? The other path has re-set PENDING. Our path
945 * cannot clear PENDING, because that will re-open the original race
951 * ocfs2_cluster_lock()
956 * ocfs2_locking_ast() ocfs2_downconvert_thread()
957 * clear PENDING ocfs2_unblock_lock()
960 * ocfs2_prepare_downconvert()
970 * So as you can see, we now have a window where l_lock is not held,
971 * PENDING is not set, and ocfs2_dlm_lock() has not been called.
973 * The core problem is that ocfs2_cluster_lock() has cleared the PENDING
974 * set by ocfs2_prepare_downconvert(). That wasn't nice.
976 * To solve this we introduce l_pending_gen. A call to
977 * lockres_clear_pending() will only do so when it is passed a generation
978 * number that matches the lockres. lockres_set_pending() will return the
979 * current generation number. When ocfs2_cluster_lock() goes to clear
980 * PENDING, it passes the generation it got from set_pending(). In our
981 * example above, the generation numbers will *not* match. Thus,
982 * ocfs2_cluster_lock() will not clear the PENDING set by
983 * ocfs2_prepare_downconvert().
986 /* Unlocked version for ocfs2_locking_ast() */
987 static void __lockres_clear_pending(struct ocfs2_lock_res *lockres,
988 unsigned int generation,
989 struct ocfs2_super *osb)
991 assert_spin_locked(&lockres->l_lock);
994 * The ast and locking functions can race us here. The winner
995 * will clear pending, the loser will not.
997 if (!(lockres->l_flags & OCFS2_LOCK_PENDING) ||
998 (lockres->l_pending_gen != generation))
1001 lockres_clear_flags(lockres, OCFS2_LOCK_PENDING);
1002 lockres->l_pending_gen++;
1005 * The downconvert thread may have skipped us because we
1006 * were PENDING. Wake it up.
1008 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
1009 ocfs2_wake_downconvert_thread(osb);
1012 /* Locked version for callers of ocfs2_dlm_lock() */
1013 static void lockres_clear_pending(struct ocfs2_lock_res *lockres,
1014 unsigned int generation,
1015 struct ocfs2_super *osb)
1017 unsigned long flags;
1019 spin_lock_irqsave(&lockres->l_lock, flags);
1020 __lockres_clear_pending(lockres, generation, osb);
1021 spin_unlock_irqrestore(&lockres->l_lock, flags);
1024 static unsigned int lockres_set_pending(struct ocfs2_lock_res *lockres)
1026 assert_spin_locked(&lockres->l_lock);
1027 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
1029 lockres_or_flags(lockres, OCFS2_LOCK_PENDING);
1031 return lockres->l_pending_gen;
1035 static void ocfs2_blocking_ast(void *opaque, int level)
1037 struct ocfs2_lock_res *lockres = opaque;
1038 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1039 int needs_downconvert;
1040 unsigned long flags;
1042 BUG_ON(level <= DLM_LOCK_NL);
1044 mlog(0, "BAST fired for lockres %s, blocking %d, level %d type %s\n",
1045 lockres->l_name, level, lockres->l_level,
1046 ocfs2_lock_type_string(lockres->l_type));
1049 * We can skip the bast for locks which don't enable caching -
1050 * they'll be dropped at the earliest possible time anyway.
1052 if (lockres->l_flags & OCFS2_LOCK_NOCACHE)
1055 spin_lock_irqsave(&lockres->l_lock, flags);
1056 needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
1057 if (needs_downconvert)
1058 ocfs2_schedule_blocked_lock(osb, lockres);
1059 spin_unlock_irqrestore(&lockres->l_lock, flags);
1061 wake_up(&lockres->l_event);
1063 ocfs2_wake_downconvert_thread(osb);
1066 static void ocfs2_locking_ast(void *opaque)
1068 struct ocfs2_lock_res *lockres = opaque;
1069 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1070 unsigned long flags;
1073 spin_lock_irqsave(&lockres->l_lock, flags);
1075 status = ocfs2_dlm_lock_status(&lockres->l_lksb);
1077 if (status == -EAGAIN) {
1078 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1083 mlog(ML_ERROR, "lockres %s: lksb status value of %d!\n",
1084 lockres->l_name, status);
1085 spin_unlock_irqrestore(&lockres->l_lock, flags);
1089 switch(lockres->l_action) {
1090 case OCFS2_AST_ATTACH:
1091 ocfs2_generic_handle_attach_action(lockres);
1092 lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL);
1094 case OCFS2_AST_CONVERT:
1095 ocfs2_generic_handle_convert_action(lockres);
1097 case OCFS2_AST_DOWNCONVERT:
1098 ocfs2_generic_handle_downconvert_action(lockres);
1101 mlog(ML_ERROR, "lockres %s: ast fired with invalid action: %u "
1102 "lockres flags = 0x%lx, unlock action: %u\n",
1103 lockres->l_name, lockres->l_action, lockres->l_flags,
1104 lockres->l_unlock_action);
1108 /* set it to something invalid so if we get called again we
1110 lockres->l_action = OCFS2_AST_INVALID;
1112 /* Did we try to cancel this lock? Clear that state */
1113 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT)
1114 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1117 * We may have beaten the locking functions here. We certainly
1118 * know that dlm_lock() has been called :-)
1119 * Because we can't have two lock calls in flight at once, we
1120 * can use lockres->l_pending_gen.
1122 __lockres_clear_pending(lockres, lockres->l_pending_gen, osb);
1124 wake_up(&lockres->l_event);
1125 spin_unlock_irqrestore(&lockres->l_lock, flags);
1128 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
1131 unsigned long flags;
1134 spin_lock_irqsave(&lockres->l_lock, flags);
1135 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1137 lockres->l_action = OCFS2_AST_INVALID;
1139 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1140 spin_unlock_irqrestore(&lockres->l_lock, flags);
1142 wake_up(&lockres->l_event);
1146 /* Note: If we detect another process working on the lock (i.e.,
1147 * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
1148 * to do the right thing in that case.
1150 static int ocfs2_lock_create(struct ocfs2_super *osb,
1151 struct ocfs2_lock_res *lockres,
1156 unsigned long flags;
1161 mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level,
1164 spin_lock_irqsave(&lockres->l_lock, flags);
1165 if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) ||
1166 (lockres->l_flags & OCFS2_LOCK_BUSY)) {
1167 spin_unlock_irqrestore(&lockres->l_lock, flags);
1171 lockres->l_action = OCFS2_AST_ATTACH;
1172 lockres->l_requested = level;
1173 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1174 gen = lockres_set_pending(lockres);
1175 spin_unlock_irqrestore(&lockres->l_lock, flags);
1177 ret = ocfs2_dlm_lock(osb->cconn,
1182 OCFS2_LOCK_ID_MAX_LEN - 1,
1184 lockres_clear_pending(lockres, gen, osb);
1186 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
1187 ocfs2_recover_from_dlm_error(lockres, 1);
1190 mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name);
1197 static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres,
1200 unsigned long flags;
1203 spin_lock_irqsave(&lockres->l_lock, flags);
1204 ret = lockres->l_flags & flag;
1205 spin_unlock_irqrestore(&lockres->l_lock, flags);
1210 static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres)
1213 wait_event(lockres->l_event,
1214 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY));
1217 static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres)
1220 wait_event(lockres->l_event,
1221 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING));
1224 /* predict what lock level we'll be dropping down to on behalf
1225 * of another node, and return true if the currently wanted
1226 * level will be compatible with it. */
1227 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
1230 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
1232 return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking);
1235 static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
1237 INIT_LIST_HEAD(&mw->mw_item);
1238 init_completion(&mw->mw_complete);
1239 ocfs2_init_start_time(mw);
1242 static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
1244 wait_for_completion(&mw->mw_complete);
1245 /* Re-arm the completion in case we want to wait on it again */
1246 INIT_COMPLETION(mw->mw_complete);
1247 return mw->mw_status;
1250 static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
1251 struct ocfs2_mask_waiter *mw,
1255 BUG_ON(!list_empty(&mw->mw_item));
1257 assert_spin_locked(&lockres->l_lock);
1259 list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
1264 /* returns 0 if the mw that was removed was already satisfied, -EBUSY
1265 * if the mask still hadn't reached its goal */
1266 static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
1267 struct ocfs2_mask_waiter *mw)
1269 unsigned long flags;
1272 spin_lock_irqsave(&lockres->l_lock, flags);
1273 if (!list_empty(&mw->mw_item)) {
1274 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
1277 list_del_init(&mw->mw_item);
1278 init_completion(&mw->mw_complete);
1280 spin_unlock_irqrestore(&lockres->l_lock, flags);
1286 static int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw,
1287 struct ocfs2_lock_res *lockres)
1291 ret = wait_for_completion_interruptible(&mw->mw_complete);
1293 lockres_remove_mask_waiter(lockres, mw);
1295 ret = mw->mw_status;
1296 /* Re-arm the completion in case we want to wait on it again */
1297 INIT_COMPLETION(mw->mw_complete);
1301 static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
1302 struct ocfs2_lock_res *lockres,
1307 unsigned long caller_ip)
1309 struct ocfs2_mask_waiter mw;
1310 int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
1311 int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
1312 unsigned long flags;
1314 int noqueue_attempted = 0;
1318 ocfs2_init_mask_waiter(&mw);
1320 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
1321 lkm_flags |= DLM_LKF_VALBLK;
1326 if (catch_signals && signal_pending(current)) {
1331 spin_lock_irqsave(&lockres->l_lock, flags);
1333 mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
1334 "Cluster lock called on freeing lockres %s! flags "
1335 "0x%lx\n", lockres->l_name, lockres->l_flags);
1337 /* We only compare against the currently granted level
1338 * here. If the lock is blocked waiting on a downconvert,
1339 * we'll get caught below. */
1340 if (lockres->l_flags & OCFS2_LOCK_BUSY &&
1341 level > lockres->l_level) {
1342 /* is someone sitting in dlm_lock? If so, wait on
1344 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1349 if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
1350 !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
1351 /* is the lock is currently blocked on behalf of
1353 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0);
1358 if (level > lockres->l_level) {
1359 if (noqueue_attempted > 0) {
1363 if (lkm_flags & DLM_LKF_NOQUEUE)
1364 noqueue_attempted = 1;
1366 if (lockres->l_action != OCFS2_AST_INVALID)
1367 mlog(ML_ERROR, "lockres %s has action %u pending\n",
1368 lockres->l_name, lockres->l_action);
1370 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
1371 lockres->l_action = OCFS2_AST_ATTACH;
1372 lkm_flags &= ~DLM_LKF_CONVERT;
1374 lockres->l_action = OCFS2_AST_CONVERT;
1375 lkm_flags |= DLM_LKF_CONVERT;
1378 lockres->l_requested = level;
1379 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1380 gen = lockres_set_pending(lockres);
1381 spin_unlock_irqrestore(&lockres->l_lock, flags);
1383 BUG_ON(level == DLM_LOCK_IV);
1384 BUG_ON(level == DLM_LOCK_NL);
1386 mlog(0, "lock %s, convert from %d to level = %d\n",
1387 lockres->l_name, lockres->l_level, level);
1389 /* call dlm_lock to upgrade lock now */
1390 ret = ocfs2_dlm_lock(osb->cconn,
1395 OCFS2_LOCK_ID_MAX_LEN - 1,
1397 lockres_clear_pending(lockres, gen, osb);
1399 if (!(lkm_flags & DLM_LKF_NOQUEUE) ||
1401 ocfs2_log_dlm_error("ocfs2_dlm_lock",
1404 ocfs2_recover_from_dlm_error(lockres, 1);
1408 mlog(0, "lock %s, successful return from ocfs2_dlm_lock\n",
1411 /* At this point we've gone inside the dlm and need to
1412 * complete our work regardless. */
1415 /* wait for busy to clear and carry on */
1419 /* Ok, if we get here then we're good to go. */
1420 ocfs2_inc_holders(lockres, level);
1424 spin_unlock_irqrestore(&lockres->l_lock, flags);
1427 * This is helping work around a lock inversion between the page lock
1428 * and dlm locks. One path holds the page lock while calling aops
1429 * which block acquiring dlm locks. The voting thread holds dlm
1430 * locks while acquiring page locks while down converting data locks.
1431 * This block is helping an aop path notice the inversion and back
1432 * off to unlock its page lock before trying the dlm lock again.
1434 if (wait && arg_flags & OCFS2_LOCK_NONBLOCK &&
1435 mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) {
1437 if (lockres_remove_mask_waiter(lockres, &mw))
1443 ret = ocfs2_wait_for_mask(&mw);
1448 ocfs2_update_lock_stats(lockres, level, &mw, ret);
1450 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1451 if (!ret && lockres->l_lockdep_map.key != NULL) {
1452 if (level == DLM_LOCK_PR)
1453 rwsem_acquire_read(&lockres->l_lockdep_map, l_subclass,
1454 !!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
1457 rwsem_acquire(&lockres->l_lockdep_map, l_subclass,
1458 !!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
1466 static inline int ocfs2_cluster_lock(struct ocfs2_super *osb,
1467 struct ocfs2_lock_res *lockres,
1472 return __ocfs2_cluster_lock(osb, lockres, level, lkm_flags, arg_flags,
1477 static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
1478 struct ocfs2_lock_res *lockres,
1480 unsigned long caller_ip)
1482 unsigned long flags;
1485 spin_lock_irqsave(&lockres->l_lock, flags);
1486 ocfs2_dec_holders(lockres, level);
1487 ocfs2_downconvert_on_unlock(osb, lockres);
1488 spin_unlock_irqrestore(&lockres->l_lock, flags);
1489 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1490 if (lockres->l_lockdep_map.key != NULL)
1491 rwsem_release(&lockres->l_lockdep_map, 1, caller_ip);
1496 static int ocfs2_create_new_lock(struct ocfs2_super *osb,
1497 struct ocfs2_lock_res *lockres,
1501 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
1502 unsigned long flags;
1503 u32 lkm_flags = local ? DLM_LKF_LOCAL : 0;
1505 spin_lock_irqsave(&lockres->l_lock, flags);
1506 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
1507 lockres_or_flags(lockres, OCFS2_LOCK_LOCAL);
1508 spin_unlock_irqrestore(&lockres->l_lock, flags);
1510 return ocfs2_lock_create(osb, lockres, level, lkm_flags);
1513 /* Grants us an EX lock on the data and metadata resources, skipping
1514 * the normal cluster directory lookup. Use this ONLY on newly created
1515 * inodes which other nodes can't possibly see, and which haven't been
1516 * hashed in the inode hash yet. This can give us a good performance
1517 * increase as it'll skip the network broadcast normally associated
1518 * with creating a new lock resource. */
1519 int ocfs2_create_new_inode_locks(struct inode *inode)
1522 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1525 BUG_ON(!ocfs2_inode_is_new(inode));
1529 mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
1531 /* NOTE: That we don't increment any of the holder counts, nor
1532 * do we add anything to a journal handle. Since this is
1533 * supposed to be a new inode which the cluster doesn't know
1534 * about yet, there is no need to. As far as the LVB handling
1535 * is concerned, this is basically like acquiring an EX lock
1536 * on a resource which has an invalid one -- we'll set it
1537 * valid when we release the EX. */
1539 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1);
1546 * We don't want to use DLM_LKF_LOCAL on a meta data lock as they
1547 * don't use a generation in their lock names.
1549 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_inode_lockres, 1, 0);
1555 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0);
1566 int ocfs2_rw_lock(struct inode *inode, int write)
1569 struct ocfs2_lock_res *lockres;
1570 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1576 mlog(0, "inode %llu take %s RW lock\n",
1577 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1578 write ? "EXMODE" : "PRMODE");
1580 if (ocfs2_mount_local(osb))
1583 lockres = &OCFS2_I(inode)->ip_rw_lockres;
1585 level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1587 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level, 0,
1596 void ocfs2_rw_unlock(struct inode *inode, int write)
1598 int level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1599 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
1600 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1604 mlog(0, "inode %llu drop %s RW lock\n",
1605 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1606 write ? "EXMODE" : "PRMODE");
1608 if (!ocfs2_mount_local(osb))
1609 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1615 * ocfs2_open_lock always get PR mode lock.
1617 int ocfs2_open_lock(struct inode *inode)
1620 struct ocfs2_lock_res *lockres;
1621 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1627 mlog(0, "inode %llu take PRMODE open lock\n",
1628 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1630 if (ocfs2_mount_local(osb))
1633 lockres = &OCFS2_I(inode)->ip_open_lockres;
1635 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1645 int ocfs2_try_open_lock(struct inode *inode, int write)
1647 int status = 0, level;
1648 struct ocfs2_lock_res *lockres;
1649 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1655 mlog(0, "inode %llu try to take %s open lock\n",
1656 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1657 write ? "EXMODE" : "PRMODE");
1659 if (ocfs2_mount_local(osb))
1662 lockres = &OCFS2_I(inode)->ip_open_lockres;
1664 level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1667 * The file system may already holding a PRMODE/EXMODE open lock.
1668 * Since we pass DLM_LKF_NOQUEUE, the request won't block waiting on
1669 * other nodes and the -EAGAIN will indicate to the caller that
1670 * this inode is still in use.
1672 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1673 level, DLM_LKF_NOQUEUE, 0);
1681 * ocfs2_open_unlock unlock PR and EX mode open locks.
1683 void ocfs2_open_unlock(struct inode *inode)
1685 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
1686 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1690 mlog(0, "inode %llu drop open lock\n",
1691 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1693 if (ocfs2_mount_local(osb))
1696 if(lockres->l_ro_holders)
1697 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1699 if(lockres->l_ex_holders)
1700 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1707 static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres,
1711 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1712 unsigned long flags;
1713 struct ocfs2_mask_waiter mw;
1715 ocfs2_init_mask_waiter(&mw);
1718 spin_lock_irqsave(&lockres->l_lock, flags);
1719 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
1720 ret = ocfs2_prepare_cancel_convert(osb, lockres);
1722 spin_unlock_irqrestore(&lockres->l_lock, flags);
1723 ret = ocfs2_cancel_convert(osb, lockres);
1730 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1731 spin_unlock_irqrestore(&lockres->l_lock, flags);
1733 ocfs2_wait_for_mask(&mw);
1739 * We may still have gotten the lock, in which case there's no
1740 * point to restarting the syscall.
1742 if (lockres->l_level == level)
1745 mlog(0, "Cancel returning %d. flags: 0x%lx, level: %d, act: %d\n", ret,
1746 lockres->l_flags, lockres->l_level, lockres->l_action);
1748 spin_unlock_irqrestore(&lockres->l_lock, flags);
1755 * ocfs2_file_lock() and ocfs2_file_unlock() map to a single pair of
1756 * flock() calls. The locking approach this requires is sufficiently
1757 * different from all other cluster lock types that we implement a
1758 * seperate path to the "low-level" dlm calls. In particular:
1760 * - No optimization of lock levels is done - we take at exactly
1761 * what's been requested.
1763 * - No lock caching is employed. We immediately downconvert to
1764 * no-lock at unlock time. This also means flock locks never go on
1765 * the blocking list).
1767 * - Since userspace can trivially deadlock itself with flock, we make
1768 * sure to allow cancellation of a misbehaving applications flock()
1771 * - Access to any flock lockres doesn't require concurrency, so we
1772 * can simplify the code by requiring the caller to guarantee
1773 * serialization of dlmglue flock calls.
1775 int ocfs2_file_lock(struct file *file, int ex, int trylock)
1777 int ret, level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
1778 unsigned int lkm_flags = trylock ? DLM_LKF_NOQUEUE : 0;
1779 unsigned long flags;
1780 struct ocfs2_file_private *fp = file->private_data;
1781 struct ocfs2_lock_res *lockres = &fp->fp_flock;
1782 struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
1783 struct ocfs2_mask_waiter mw;
1785 ocfs2_init_mask_waiter(&mw);
1787 if ((lockres->l_flags & OCFS2_LOCK_BUSY) ||
1788 (lockres->l_level > DLM_LOCK_NL)) {
1790 "File lock \"%s\" has busy or locked state: flags: 0x%lx, "
1791 "level: %u\n", lockres->l_name, lockres->l_flags,
1796 spin_lock_irqsave(&lockres->l_lock, flags);
1797 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
1798 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1799 spin_unlock_irqrestore(&lockres->l_lock, flags);
1802 * Get the lock at NLMODE to start - that way we
1803 * can cancel the upconvert request if need be.
1805 ret = ocfs2_lock_create(osb, lockres, DLM_LOCK_NL, 0);
1811 ret = ocfs2_wait_for_mask(&mw);
1816 spin_lock_irqsave(&lockres->l_lock, flags);
1819 lockres->l_action = OCFS2_AST_CONVERT;
1820 lkm_flags |= DLM_LKF_CONVERT;
1821 lockres->l_requested = level;
1822 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1824 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1825 spin_unlock_irqrestore(&lockres->l_lock, flags);
1827 ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags,
1828 lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1,
1831 if (!trylock || (ret != -EAGAIN)) {
1832 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
1836 ocfs2_recover_from_dlm_error(lockres, 1);
1837 lockres_remove_mask_waiter(lockres, &mw);
1841 ret = ocfs2_wait_for_mask_interruptible(&mw, lockres);
1842 if (ret == -ERESTARTSYS) {
1844 * Userspace can cause deadlock itself with
1845 * flock(). Current behavior locally is to allow the
1846 * deadlock, but abort the system call if a signal is
1847 * received. We follow this example, otherwise a
1848 * poorly written program could sit in kernel until
1851 * Handling this is a bit more complicated for Ocfs2
1852 * though. We can't exit this function with an
1853 * outstanding lock request, so a cancel convert is
1854 * required. We intentionally overwrite 'ret' - if the
1855 * cancel fails and the lock was granted, it's easier
1856 * to just bubble sucess back up to the user.
1858 ret = ocfs2_flock_handle_signal(lockres, level);
1859 } else if (!ret && (level > lockres->l_level)) {
1860 /* Trylock failed asynchronously */
1867 mlog(0, "Lock: \"%s\" ex: %d, trylock: %d, returns: %d\n",
1868 lockres->l_name, ex, trylock, ret);
1872 void ocfs2_file_unlock(struct file *file)
1876 unsigned long flags;
1877 struct ocfs2_file_private *fp = file->private_data;
1878 struct ocfs2_lock_res *lockres = &fp->fp_flock;
1879 struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
1880 struct ocfs2_mask_waiter mw;
1882 ocfs2_init_mask_waiter(&mw);
1884 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED))
1887 if (lockres->l_level == DLM_LOCK_NL)
1890 mlog(0, "Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n",
1891 lockres->l_name, lockres->l_flags, lockres->l_level,
1894 spin_lock_irqsave(&lockres->l_lock, flags);
1896 * Fake a blocking ast for the downconvert code.
1898 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
1899 lockres->l_blocking = DLM_LOCK_EX;
1901 gen = ocfs2_prepare_downconvert(lockres, DLM_LOCK_NL);
1902 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1903 spin_unlock_irqrestore(&lockres->l_lock, flags);
1905 ret = ocfs2_downconvert_lock(osb, lockres, DLM_LOCK_NL, 0, gen);
1911 ret = ocfs2_wait_for_mask(&mw);
1916 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
1917 struct ocfs2_lock_res *lockres)
1923 /* If we know that another node is waiting on our lock, kick
1924 * the downconvert thread * pre-emptively when we reach a release
1926 if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
1927 switch(lockres->l_blocking) {
1929 if (!lockres->l_ex_holders && !lockres->l_ro_holders)
1933 if (!lockres->l_ex_holders)
1942 ocfs2_wake_downconvert_thread(osb);
1947 #define OCFS2_SEC_BITS 34
1948 #define OCFS2_SEC_SHIFT (64 - 34)
1949 #define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1)
1951 /* LVB only has room for 64 bits of time here so we pack it for
1953 static u64 ocfs2_pack_timespec(struct timespec *spec)
1956 u64 sec = spec->tv_sec;
1957 u32 nsec = spec->tv_nsec;
1959 res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK);
1964 /* Call this with the lockres locked. I am reasonably sure we don't
1965 * need ip_lock in this function as anyone who would be changing those
1966 * values is supposed to be blocked in ocfs2_inode_lock right now. */
1967 static void __ocfs2_stuff_meta_lvb(struct inode *inode)
1969 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1970 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
1971 struct ocfs2_meta_lvb *lvb;
1975 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
1978 * Invalidate the LVB of a deleted inode - this way other
1979 * nodes are forced to go to disk and discover the new inode
1982 if (oi->ip_flags & OCFS2_INODE_DELETED) {
1983 lvb->lvb_version = 0;
1987 lvb->lvb_version = OCFS2_LVB_VERSION;
1988 lvb->lvb_isize = cpu_to_be64(i_size_read(inode));
1989 lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
1990 lvb->lvb_iuid = cpu_to_be32(inode->i_uid);
1991 lvb->lvb_igid = cpu_to_be32(inode->i_gid);
1992 lvb->lvb_imode = cpu_to_be16(inode->i_mode);
1993 lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
1994 lvb->lvb_iatime_packed =
1995 cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
1996 lvb->lvb_ictime_packed =
1997 cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime));
1998 lvb->lvb_imtime_packed =
1999 cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
2000 lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
2001 lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
2002 lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
2005 mlog_meta_lvb(0, lockres);
2010 static void ocfs2_unpack_timespec(struct timespec *spec,
2013 spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT;
2014 spec->tv_nsec = packed_time & OCFS2_NSEC_MASK;
2017 static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
2019 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2020 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2021 struct ocfs2_meta_lvb *lvb;
2025 mlog_meta_lvb(0, lockres);
2027 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2029 /* We're safe here without the lockres lock... */
2030 spin_lock(&oi->ip_lock);
2031 oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters);
2032 i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
2034 oi->ip_attr = be32_to_cpu(lvb->lvb_iattr);
2035 oi->ip_dyn_features = be16_to_cpu(lvb->lvb_idynfeatures);
2036 ocfs2_set_inode_flags(inode);
2038 /* fast-symlinks are a special case */
2039 if (S_ISLNK(inode->i_mode) && !oi->ip_clusters)
2040 inode->i_blocks = 0;
2042 inode->i_blocks = ocfs2_inode_sector_count(inode);
2044 inode->i_uid = be32_to_cpu(lvb->lvb_iuid);
2045 inode->i_gid = be32_to_cpu(lvb->lvb_igid);
2046 inode->i_mode = be16_to_cpu(lvb->lvb_imode);
2047 inode->i_nlink = be16_to_cpu(lvb->lvb_inlink);
2048 ocfs2_unpack_timespec(&inode->i_atime,
2049 be64_to_cpu(lvb->lvb_iatime_packed));
2050 ocfs2_unpack_timespec(&inode->i_mtime,
2051 be64_to_cpu(lvb->lvb_imtime_packed));
2052 ocfs2_unpack_timespec(&inode->i_ctime,
2053 be64_to_cpu(lvb->lvb_ictime_packed));
2054 spin_unlock(&oi->ip_lock);
2059 static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
2060 struct ocfs2_lock_res *lockres)
2062 struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2064 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)
2065 && lvb->lvb_version == OCFS2_LVB_VERSION
2066 && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation)
2071 /* Determine whether a lock resource needs to be refreshed, and
2072 * arbitrate who gets to refresh it.
2074 * 0 means no refresh needed.
2076 * > 0 means you need to refresh this and you MUST call
2077 * ocfs2_complete_lock_res_refresh afterwards. */
2078 static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
2080 unsigned long flags;
2086 spin_lock_irqsave(&lockres->l_lock, flags);
2087 if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
2088 spin_unlock_irqrestore(&lockres->l_lock, flags);
2092 if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
2093 spin_unlock_irqrestore(&lockres->l_lock, flags);
2095 ocfs2_wait_on_refreshing_lock(lockres);
2099 /* Ok, I'll be the one to refresh this lock. */
2100 lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING);
2101 spin_unlock_irqrestore(&lockres->l_lock, flags);
2109 /* If status is non zero, I'll mark it as not being in refresh
2110 * anymroe, but i won't clear the needs refresh flag. */
2111 static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres,
2114 unsigned long flags;
2117 spin_lock_irqsave(&lockres->l_lock, flags);
2118 lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
2120 lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
2121 spin_unlock_irqrestore(&lockres->l_lock, flags);
2123 wake_up(&lockres->l_event);
2128 /* may or may not return a bh if it went to disk. */
2129 static int ocfs2_inode_lock_update(struct inode *inode,
2130 struct buffer_head **bh)
2133 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2134 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2135 struct ocfs2_dinode *fe;
2136 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2140 if (ocfs2_mount_local(osb))
2143 spin_lock(&oi->ip_lock);
2144 if (oi->ip_flags & OCFS2_INODE_DELETED) {
2145 mlog(0, "Orphaned inode %llu was deleted while we "
2146 "were waiting on a lock. ip_flags = 0x%x\n",
2147 (unsigned long long)oi->ip_blkno, oi->ip_flags);
2148 spin_unlock(&oi->ip_lock);
2152 spin_unlock(&oi->ip_lock);
2154 if (!ocfs2_should_refresh_lock_res(lockres))
2157 /* This will discard any caching information we might have had
2158 * for the inode metadata. */
2159 ocfs2_metadata_cache_purge(INODE_CACHE(inode));
2161 ocfs2_extent_map_trunc(inode, 0);
2163 if (ocfs2_meta_lvb_is_trustable(inode, lockres)) {
2164 mlog(0, "Trusting LVB on inode %llu\n",
2165 (unsigned long long)oi->ip_blkno);
2166 ocfs2_refresh_inode_from_lvb(inode);
2168 /* Boo, we have to go to disk. */
2169 /* read bh, cast, ocfs2_refresh_inode */
2170 status = ocfs2_read_inode_block(inode, bh);
2175 fe = (struct ocfs2_dinode *) (*bh)->b_data;
2177 /* This is a good chance to make sure we're not
2178 * locking an invalid object. ocfs2_read_inode_block()
2179 * already checked that the inode block is sane.
2181 * We bug on a stale inode here because we checked
2182 * above whether it was wiped from disk. The wiping
2183 * node provides a guarantee that we receive that
2184 * message and can mark the inode before dropping any
2185 * locks associated with it. */
2186 mlog_bug_on_msg(inode->i_generation !=
2187 le32_to_cpu(fe->i_generation),
2188 "Invalid dinode %llu disk generation: %u "
2189 "inode->i_generation: %u\n",
2190 (unsigned long long)oi->ip_blkno,
2191 le32_to_cpu(fe->i_generation),
2192 inode->i_generation);
2193 mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) ||
2194 !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)),
2195 "Stale dinode %llu dtime: %llu flags: 0x%x\n",
2196 (unsigned long long)oi->ip_blkno,
2197 (unsigned long long)le64_to_cpu(fe->i_dtime),
2198 le32_to_cpu(fe->i_flags));
2200 ocfs2_refresh_inode(inode, fe);
2201 ocfs2_track_lock_refresh(lockres);
2206 ocfs2_complete_lock_res_refresh(lockres, status);
2212 static int ocfs2_assign_bh(struct inode *inode,
2213 struct buffer_head **ret_bh,
2214 struct buffer_head *passed_bh)
2219 /* Ok, the update went to disk for us, use the
2221 *ret_bh = passed_bh;
2227 status = ocfs2_read_inode_block(inode, ret_bh);
2235 * returns < 0 error if the callback will never be called, otherwise
2236 * the result of the lock will be communicated via the callback.
2238 int ocfs2_inode_lock_full_nested(struct inode *inode,
2239 struct buffer_head **ret_bh,
2244 int status, level, acquired;
2246 struct ocfs2_lock_res *lockres = NULL;
2247 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2248 struct buffer_head *local_bh = NULL;
2254 mlog(0, "inode %llu, take %s META lock\n",
2255 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2256 ex ? "EXMODE" : "PRMODE");
2260 /* We'll allow faking a readonly metadata lock for
2262 if (ocfs2_is_hard_readonly(osb)) {
2268 if (ocfs2_mount_local(osb))
2271 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
2272 ocfs2_wait_for_recovery(osb);
2274 lockres = &OCFS2_I(inode)->ip_inode_lockres;
2275 level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2277 if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
2278 dlm_flags |= DLM_LKF_NOQUEUE;
2280 status = __ocfs2_cluster_lock(osb, lockres, level, dlm_flags,
2281 arg_flags, subclass, _RET_IP_);
2283 if (status != -EAGAIN && status != -EIOCBRETRY)
2288 /* Notify the error cleanup path to drop the cluster lock. */
2291 /* We wait twice because a node may have died while we were in
2292 * the lower dlm layers. The second time though, we've
2293 * committed to owning this lock so we don't allow signals to
2294 * abort the operation. */
2295 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
2296 ocfs2_wait_for_recovery(osb);
2300 * We only see this flag if we're being called from
2301 * ocfs2_read_locked_inode(). It means we're locking an inode
2302 * which hasn't been populated yet, so clear the refresh flag
2303 * and let the caller handle it.
2305 if (inode->i_state & I_NEW) {
2308 ocfs2_complete_lock_res_refresh(lockres, 0);
2312 /* This is fun. The caller may want a bh back, or it may
2313 * not. ocfs2_inode_lock_update definitely wants one in, but
2314 * may or may not read one, depending on what's in the
2315 * LVB. The result of all of this is that we've *only* gone to
2316 * disk if we have to, so the complexity is worthwhile. */
2317 status = ocfs2_inode_lock_update(inode, &local_bh);
2319 if (status != -ENOENT)
2325 status = ocfs2_assign_bh(inode, ret_bh, local_bh);
2334 if (ret_bh && (*ret_bh)) {
2339 ocfs2_inode_unlock(inode, ex);
2350 * This is working around a lock inversion between tasks acquiring DLM
2351 * locks while holding a page lock and the downconvert thread which
2352 * blocks dlm lock acquiry while acquiring page locks.
2354 * ** These _with_page variantes are only intended to be called from aop
2355 * methods that hold page locks and return a very specific *positive* error
2356 * code that aop methods pass up to the VFS -- test for errors with != 0. **
2358 * The DLM is called such that it returns -EAGAIN if it would have
2359 * blocked waiting for the downconvert thread. In that case we unlock
2360 * our page so the downconvert thread can make progress. Once we've
2361 * done this we have to return AOP_TRUNCATED_PAGE so the aop method
2362 * that called us can bubble that back up into the VFS who will then
2363 * immediately retry the aop call.
2365 * We do a blocking lock and immediate unlock before returning, though, so that
2366 * the lock has a great chance of being cached on this node by the time the VFS
2367 * calls back to retry the aop. This has a potential to livelock as nodes
2368 * ping locks back and forth, but that's a risk we're willing to take to avoid
2369 * the lock inversion simply.
2371 int ocfs2_inode_lock_with_page(struct inode *inode,
2372 struct buffer_head **ret_bh,
2378 ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
2379 if (ret == -EAGAIN) {
2381 if (ocfs2_inode_lock(inode, ret_bh, ex) == 0)
2382 ocfs2_inode_unlock(inode, ex);
2383 ret = AOP_TRUNCATED_PAGE;
2389 int ocfs2_inode_lock_atime(struct inode *inode,
2390 struct vfsmount *vfsmnt,
2396 ret = ocfs2_inode_lock(inode, NULL, 0);
2403 * If we should update atime, we will get EX lock,
2404 * otherwise we just get PR lock.
2406 if (ocfs2_should_update_atime(inode, vfsmnt)) {
2407 struct buffer_head *bh = NULL;
2409 ocfs2_inode_unlock(inode, 0);
2410 ret = ocfs2_inode_lock(inode, &bh, 1);
2416 if (ocfs2_should_update_atime(inode, vfsmnt))
2417 ocfs2_update_inode_atime(inode, bh);
2427 void ocfs2_inode_unlock(struct inode *inode,
2430 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2431 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;
2432 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2436 mlog(0, "inode %llu drop %s META lock\n",
2437 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2438 ex ? "EXMODE" : "PRMODE");
2440 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) &&
2441 !ocfs2_mount_local(osb))
2442 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
2447 int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
2449 struct ocfs2_lock_res *lockres;
2450 struct ocfs2_orphan_scan_lvb *lvb;
2453 if (ocfs2_is_hard_readonly(osb))
2456 if (ocfs2_mount_local(osb))
2459 lockres = &osb->osb_orphan_scan.os_lockres;
2460 status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
2464 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2465 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
2466 lvb->lvb_version == OCFS2_ORPHAN_LVB_VERSION)
2467 *seqno = be32_to_cpu(lvb->lvb_os_seqno);
2469 *seqno = osb->osb_orphan_scan.os_seqno + 1;
2474 void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno)
2476 struct ocfs2_lock_res *lockres;
2477 struct ocfs2_orphan_scan_lvb *lvb;
2479 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) {
2480 lockres = &osb->osb_orphan_scan.os_lockres;
2481 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2482 lvb->lvb_version = OCFS2_ORPHAN_LVB_VERSION;
2483 lvb->lvb_os_seqno = cpu_to_be32(seqno);
2484 ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
2488 int ocfs2_super_lock(struct ocfs2_super *osb,
2492 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2493 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
2497 if (ocfs2_is_hard_readonly(osb))
2500 if (ocfs2_mount_local(osb))
2503 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
2509 /* The super block lock path is really in the best position to
2510 * know when resources covered by the lock need to be
2511 * refreshed, so we do it here. Of course, making sense of
2512 * everything is up to the caller :) */
2513 status = ocfs2_should_refresh_lock_res(lockres);
2519 status = ocfs2_refresh_slot_info(osb);
2521 ocfs2_complete_lock_res_refresh(lockres, status);
2525 ocfs2_track_lock_refresh(lockres);
2532 void ocfs2_super_unlock(struct ocfs2_super *osb,
2535 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2536 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
2538 if (!ocfs2_mount_local(osb))
2539 ocfs2_cluster_unlock(osb, lockres, level);
2542 int ocfs2_rename_lock(struct ocfs2_super *osb)
2545 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2547 if (ocfs2_is_hard_readonly(osb))
2550 if (ocfs2_mount_local(osb))
2553 status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
2560 void ocfs2_rename_unlock(struct ocfs2_super *osb)
2562 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2564 if (!ocfs2_mount_local(osb))
2565 ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
2568 int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex)
2571 struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
2573 if (ocfs2_is_hard_readonly(osb))
2576 if (ocfs2_mount_local(osb))
2579 status = ocfs2_cluster_lock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE,
2582 mlog(ML_ERROR, "lock on nfs sync lock failed %d\n", status);
2587 void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex)
2589 struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
2591 if (!ocfs2_mount_local(osb))
2592 ocfs2_cluster_unlock(osb, lockres,
2593 ex ? LKM_EXMODE : LKM_PRMODE);
2596 int ocfs2_dentry_lock(struct dentry *dentry, int ex)
2599 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2600 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2601 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2605 if (ocfs2_is_hard_readonly(osb))
2608 if (ocfs2_mount_local(osb))
2611 ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0);
2618 void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
2620 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2621 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2622 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2624 if (!ocfs2_mount_local(osb))
2625 ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
2628 /* Reference counting of the dlm debug structure. We want this because
2629 * open references on the debug inodes can live on after a mount, so
2630 * we can't rely on the ocfs2_super to always exist. */
2631 static void ocfs2_dlm_debug_free(struct kref *kref)
2633 struct ocfs2_dlm_debug *dlm_debug;
2635 dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt);
2640 void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug)
2643 kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free);
2646 static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug)
2648 kref_get(&debug->d_refcnt);
2651 struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
2653 struct ocfs2_dlm_debug *dlm_debug;
2655 dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL);
2657 mlog_errno(-ENOMEM);
2661 kref_init(&dlm_debug->d_refcnt);
2662 INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
2663 dlm_debug->d_locking_state = NULL;
2668 /* Access to this is arbitrated for us via seq_file->sem. */
2669 struct ocfs2_dlm_seq_priv {
2670 struct ocfs2_dlm_debug *p_dlm_debug;
2671 struct ocfs2_lock_res p_iter_res;
2672 struct ocfs2_lock_res p_tmp_res;
2675 static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start,
2676 struct ocfs2_dlm_seq_priv *priv)
2678 struct ocfs2_lock_res *iter, *ret = NULL;
2679 struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug;
2681 assert_spin_locked(&ocfs2_dlm_tracking_lock);
2683 list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
2684 /* discover the head of the list */
2685 if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) {
2686 mlog(0, "End of list found, %p\n", ret);
2690 /* We track our "dummy" iteration lockres' by a NULL
2692 if (iter->l_ops != NULL) {
2701 static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos)
2703 struct ocfs2_dlm_seq_priv *priv = m->private;
2704 struct ocfs2_lock_res *iter;
2706 spin_lock(&ocfs2_dlm_tracking_lock);
2707 iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv);
2709 /* Since lockres' have the lifetime of their container
2710 * (which can be inodes, ocfs2_supers, etc) we want to
2711 * copy this out to a temporary lockres while still
2712 * under the spinlock. Obviously after this we can't
2713 * trust any pointers on the copy returned, but that's
2714 * ok as the information we want isn't typically held
2716 priv->p_tmp_res = *iter;
2717 iter = &priv->p_tmp_res;
2719 spin_unlock(&ocfs2_dlm_tracking_lock);
2724 static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v)
2728 static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
2730 struct ocfs2_dlm_seq_priv *priv = m->private;
2731 struct ocfs2_lock_res *iter = v;
2732 struct ocfs2_lock_res *dummy = &priv->p_iter_res;
2734 spin_lock(&ocfs2_dlm_tracking_lock);
2735 iter = ocfs2_dlm_next_res(iter, priv);
2736 list_del_init(&dummy->l_debug_list);
2738 list_add(&dummy->l_debug_list, &iter->l_debug_list);
2739 priv->p_tmp_res = *iter;
2740 iter = &priv->p_tmp_res;
2742 spin_unlock(&ocfs2_dlm_tracking_lock);
2747 /* So that debugfs.ocfs2 can determine which format is being used */
2748 #define OCFS2_DLM_DEBUG_STR_VERSION 2
2749 static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
2753 struct ocfs2_lock_res *lockres = v;
2758 seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION);
2760 if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY)
2761 seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1,
2763 (unsigned int)ocfs2_get_dentry_lock_ino(lockres));
2765 seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name);
2767 seq_printf(m, "%d\t"
2778 lockres->l_unlock_action,
2779 lockres->l_ro_holders,
2780 lockres->l_ex_holders,
2781 lockres->l_requested,
2782 lockres->l_blocking);
2784 /* Dump the raw LVB */
2785 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2786 for(i = 0; i < DLM_LVB_LEN; i++)
2787 seq_printf(m, "0x%x\t", lvb[i]);
2789 #ifdef CONFIG_OCFS2_FS_STATS
2790 # define lock_num_prmode(_l) (_l)->l_lock_num_prmode
2791 # define lock_num_exmode(_l) (_l)->l_lock_num_exmode
2792 # define lock_num_prmode_failed(_l) (_l)->l_lock_num_prmode_failed
2793 # define lock_num_exmode_failed(_l) (_l)->l_lock_num_exmode_failed
2794 # define lock_total_prmode(_l) (_l)->l_lock_total_prmode
2795 # define lock_total_exmode(_l) (_l)->l_lock_total_exmode
2796 # define lock_max_prmode(_l) (_l)->l_lock_max_prmode
2797 # define lock_max_exmode(_l) (_l)->l_lock_max_exmode
2798 # define lock_refresh(_l) (_l)->l_lock_refresh
2800 # define lock_num_prmode(_l) (0ULL)
2801 # define lock_num_exmode(_l) (0ULL)
2802 # define lock_num_prmode_failed(_l) (0)
2803 # define lock_num_exmode_failed(_l) (0)
2804 # define lock_total_prmode(_l) (0ULL)
2805 # define lock_total_exmode(_l) (0ULL)
2806 # define lock_max_prmode(_l) (0)
2807 # define lock_max_exmode(_l) (0)
2808 # define lock_refresh(_l) (0)
2810 /* The following seq_print was added in version 2 of this output */
2811 seq_printf(m, "%llu\t"
2820 lock_num_prmode(lockres),
2821 lock_num_exmode(lockres),
2822 lock_num_prmode_failed(lockres),
2823 lock_num_exmode_failed(lockres),
2824 lock_total_prmode(lockres),
2825 lock_total_exmode(lockres),
2826 lock_max_prmode(lockres),
2827 lock_max_exmode(lockres),
2828 lock_refresh(lockres));
2831 seq_printf(m, "\n");
2835 static const struct seq_operations ocfs2_dlm_seq_ops = {
2836 .start = ocfs2_dlm_seq_start,
2837 .stop = ocfs2_dlm_seq_stop,
2838 .next = ocfs2_dlm_seq_next,
2839 .show = ocfs2_dlm_seq_show,
2842 static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file)
2844 struct seq_file *seq = (struct seq_file *) file->private_data;
2845 struct ocfs2_dlm_seq_priv *priv = seq->private;
2846 struct ocfs2_lock_res *res = &priv->p_iter_res;
2848 ocfs2_remove_lockres_tracking(res);
2849 ocfs2_put_dlm_debug(priv->p_dlm_debug);
2850 return seq_release_private(inode, file);
2853 static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
2856 struct ocfs2_dlm_seq_priv *priv;
2857 struct seq_file *seq;
2858 struct ocfs2_super *osb;
2860 priv = kzalloc(sizeof(struct ocfs2_dlm_seq_priv), GFP_KERNEL);
2866 osb = inode->i_private;
2867 ocfs2_get_dlm_debug(osb->osb_dlm_debug);
2868 priv->p_dlm_debug = osb->osb_dlm_debug;
2869 INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
2871 ret = seq_open(file, &ocfs2_dlm_seq_ops);
2878 seq = (struct seq_file *) file->private_data;
2879 seq->private = priv;
2881 ocfs2_add_lockres_tracking(&priv->p_iter_res,
2888 static const struct file_operations ocfs2_dlm_debug_fops = {
2889 .open = ocfs2_dlm_debug_open,
2890 .release = ocfs2_dlm_debug_release,
2892 .llseek = seq_lseek,
2895 static int ocfs2_dlm_init_debug(struct ocfs2_super *osb)
2898 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2900 dlm_debug->d_locking_state = debugfs_create_file("locking_state",
2902 osb->osb_debug_root,
2904 &ocfs2_dlm_debug_fops);
2905 if (!dlm_debug->d_locking_state) {
2908 "Unable to create locking state debugfs file.\n");
2912 ocfs2_get_dlm_debug(dlm_debug);
2917 static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
2919 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2922 debugfs_remove(dlm_debug->d_locking_state);
2923 ocfs2_put_dlm_debug(dlm_debug);
2927 int ocfs2_dlm_init(struct ocfs2_super *osb)
2930 struct ocfs2_cluster_connection *conn = NULL;
2934 if (ocfs2_mount_local(osb)) {
2939 status = ocfs2_dlm_init_debug(osb);
2945 /* launch downconvert thread */
2946 osb->dc_task = kthread_run(ocfs2_downconvert_thread, osb, "ocfs2dc");
2947 if (IS_ERR(osb->dc_task)) {
2948 status = PTR_ERR(osb->dc_task);
2949 osb->dc_task = NULL;
2954 /* for now, uuid == domain */
2955 status = ocfs2_cluster_connect(osb->osb_cluster_stack,
2957 strlen(osb->uuid_str),
2958 ocfs2_do_node_down, osb,
2965 status = ocfs2_cluster_this_node(&osb->node_num);
2969 "could not find this host's node number\n");
2970 ocfs2_cluster_disconnect(conn, 0);
2975 ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
2976 ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
2977 ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
2978 ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);
2985 ocfs2_dlm_shutdown_debug(osb);
2987 kthread_stop(osb->dc_task);
2994 void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
2999 ocfs2_drop_osb_locks(osb);
3002 * Now that we have dropped all locks and ocfs2_dismount_volume()
3003 * has disabled recovery, the DLM won't be talking to us. It's
3004 * safe to tear things down before disconnecting the cluster.
3008 kthread_stop(osb->dc_task);
3009 osb->dc_task = NULL;
3012 ocfs2_lock_res_free(&osb->osb_super_lockres);
3013 ocfs2_lock_res_free(&osb->osb_rename_lockres);
3014 ocfs2_lock_res_free(&osb->osb_nfs_sync_lockres);
3015 ocfs2_lock_res_free(&osb->osb_orphan_scan.os_lockres);
3017 ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
3020 ocfs2_dlm_shutdown_debug(osb);
3025 static void ocfs2_unlock_ast(void *opaque, int error)
3027 struct ocfs2_lock_res *lockres = opaque;
3028 unsigned long flags;
3032 mlog(0, "UNLOCK AST called on lock %s, action = %d\n", lockres->l_name,
3033 lockres->l_unlock_action);
3035 spin_lock_irqsave(&lockres->l_lock, flags);
3037 mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
3038 "unlock_action %d\n", error, lockres->l_name,
3039 lockres->l_unlock_action);
3040 spin_unlock_irqrestore(&lockres->l_lock, flags);
3044 switch(lockres->l_unlock_action) {
3045 case OCFS2_UNLOCK_CANCEL_CONVERT:
3046 mlog(0, "Cancel convert success for %s\n", lockres->l_name);
3047 lockres->l_action = OCFS2_AST_INVALID;
3048 /* Downconvert thread may have requeued this lock, we
3049 * need to wake it. */
3050 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
3051 ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
3053 case OCFS2_UNLOCK_DROP_LOCK:
3054 lockres->l_level = DLM_LOCK_IV;
3060 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
3061 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
3062 wake_up(&lockres->l_event);
3063 spin_unlock_irqrestore(&lockres->l_lock, flags);
3068 static int ocfs2_drop_lock(struct ocfs2_super *osb,
3069 struct ocfs2_lock_res *lockres)
3072 unsigned long flags;
3075 /* We didn't get anywhere near actually using this lockres. */
3076 if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
3079 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
3080 lkm_flags |= DLM_LKF_VALBLK;
3082 spin_lock_irqsave(&lockres->l_lock, flags);
3084 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING),
3085 "lockres %s, flags 0x%lx\n",
3086 lockres->l_name, lockres->l_flags);
3088 while (lockres->l_flags & OCFS2_LOCK_BUSY) {
3089 mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = "
3090 "%u, unlock_action = %u\n",
3091 lockres->l_name, lockres->l_flags, lockres->l_action,
3092 lockres->l_unlock_action);
3094 spin_unlock_irqrestore(&lockres->l_lock, flags);
3096 /* XXX: Today we just wait on any busy
3097 * locks... Perhaps we need to cancel converts in the
3099 ocfs2_wait_on_busy_lock(lockres);
3101 spin_lock_irqsave(&lockres->l_lock, flags);
3104 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
3105 if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
3106 lockres->l_level == DLM_LOCK_EX &&
3107 !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
3108 lockres->l_ops->set_lvb(lockres);
3111 if (lockres->l_flags & OCFS2_LOCK_BUSY)
3112 mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
3114 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
3115 mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name);
3117 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
3118 spin_unlock_irqrestore(&lockres->l_lock, flags);
3122 lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
3124 /* make sure we never get here while waiting for an ast to
3126 BUG_ON(lockres->l_action != OCFS2_AST_INVALID);
3128 /* is this necessary? */
3129 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
3130 lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK;
3131 spin_unlock_irqrestore(&lockres->l_lock, flags);
3133 mlog(0, "lock %s\n", lockres->l_name);
3135 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags,
3138 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
3139 mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
3140 ocfs2_dlm_dump_lksb(&lockres->l_lksb);
3143 mlog(0, "lock %s, successful return from ocfs2_dlm_unlock\n",
3146 ocfs2_wait_on_busy_lock(lockres);
3152 /* Mark the lockres as being dropped. It will no longer be
3153 * queued if blocking, but we still may have to wait on it
3154 * being dequeued from the downconvert thread before we can consider
3157 * You can *not* attempt to call cluster_lock on this lockres anymore. */
3158 void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
3161 struct ocfs2_mask_waiter mw;
3162 unsigned long flags;
3164 ocfs2_init_mask_waiter(&mw);
3166 spin_lock_irqsave(&lockres->l_lock, flags);
3167 lockres->l_flags |= OCFS2_LOCK_FREEING;
3168 while (lockres->l_flags & OCFS2_LOCK_QUEUED) {
3169 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0);
3170 spin_unlock_irqrestore(&lockres->l_lock, flags);
3172 mlog(0, "Waiting on lockres %s\n", lockres->l_name);
3174 status = ocfs2_wait_for_mask(&mw);
3178 spin_lock_irqsave(&lockres->l_lock, flags);
3180 spin_unlock_irqrestore(&lockres->l_lock, flags);
3183 void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
3184 struct ocfs2_lock_res *lockres)
3188 ocfs2_mark_lockres_freeing(lockres);
3189 ret = ocfs2_drop_lock(osb, lockres);
3194 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb)
3196 ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres);
3197 ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres);
3198 ocfs2_simple_drop_lockres(osb, &osb->osb_nfs_sync_lockres);
3199 ocfs2_simple_drop_lockres(osb, &osb->osb_orphan_scan.os_lockres);
3202 int ocfs2_drop_inode_locks(struct inode *inode)
3208 /* No need to call ocfs2_mark_lockres_freeing here -
3209 * ocfs2_clear_inode has done it for us. */
3211 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3212 &OCFS2_I(inode)->ip_open_lockres);
3218 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3219 &OCFS2_I(inode)->ip_inode_lockres);
3222 if (err < 0 && !status)
3225 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3226 &OCFS2_I(inode)->ip_rw_lockres);
3229 if (err < 0 && !status)
3236 static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
3239 assert_spin_locked(&lockres->l_lock);
3241 BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
3243 if (lockres->l_level <= new_level) {
3244 mlog(ML_ERROR, "lockres->l_level (%d) <= new_level (%d)\n",
3245 lockres->l_level, new_level);
3249 mlog(0, "lock %s, new_level = %d, l_blocking = %d\n",
3250 lockres->l_name, new_level, lockres->l_blocking);
3252 lockres->l_action = OCFS2_AST_DOWNCONVERT;
3253 lockres->l_requested = new_level;
3254 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
3255 return lockres_set_pending(lockres);
3258 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
3259 struct ocfs2_lock_res *lockres,
3262 unsigned int generation)
3265 u32 dlm_flags = DLM_LKF_CONVERT;
3270 dlm_flags |= DLM_LKF_VALBLK;
3272 ret = ocfs2_dlm_lock(osb->cconn,
3277 OCFS2_LOCK_ID_MAX_LEN - 1,
3279 lockres_clear_pending(lockres, generation, osb);
3281 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
3282 ocfs2_recover_from_dlm_error(lockres, 1);
3292 /* returns 1 when the caller should unlock and call ocfs2_dlm_unlock */
3293 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
3294 struct ocfs2_lock_res *lockres)
3296 assert_spin_locked(&lockres->l_lock);
3299 mlog(0, "lock %s\n", lockres->l_name);
3301 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
3302 /* If we're already trying to cancel a lock conversion
3303 * then just drop the spinlock and allow the caller to
3304 * requeue this lock. */
3306 mlog(0, "Lockres %s, skip convert\n", lockres->l_name);
3310 /* were we in a convert when we got the bast fire? */
3311 BUG_ON(lockres->l_action != OCFS2_AST_CONVERT &&
3312 lockres->l_action != OCFS2_AST_DOWNCONVERT);
3313 /* set things up for the unlockast to know to just
3314 * clear out the ast_action and unset busy, etc. */
3315 lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT;
3317 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY),
3318 "lock %s, invalid flags: 0x%lx\n",
3319 lockres->l_name, lockres->l_flags);
3324 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
3325 struct ocfs2_lock_res *lockres)
3330 mlog(0, "lock %s\n", lockres->l_name);
3332 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb,
3333 DLM_LKF_CANCEL, lockres);
3335 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
3336 ocfs2_recover_from_dlm_error(lockres, 0);
3339 mlog(0, "lock %s return from ocfs2_dlm_unlock\n", lockres->l_name);
3345 static int ocfs2_unblock_lock(struct ocfs2_super *osb,
3346 struct ocfs2_lock_res *lockres,
3347 struct ocfs2_unblock_ctl *ctl)
3349 unsigned long flags;
3358 spin_lock_irqsave(&lockres->l_lock, flags);
3360 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
3363 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
3365 * This is a *big* race. The OCFS2_LOCK_PENDING flag
3366 * exists entirely for one reason - another thread has set
3367 * OCFS2_LOCK_BUSY, but has *NOT* yet called dlm_lock().
3369 * If we do ocfs2_cancel_convert() before the other thread
3370 * calls dlm_lock(), our cancel will do nothing. We will
3371 * get no ast, and we will have no way of knowing the
3372 * cancel failed. Meanwhile, the other thread will call
3373 * into dlm_lock() and wait...forever.
3375 * Why forever? Because another node has asked for the
3376 * lock first; that's why we're here in unblock_lock().
3378 * The solution is OCFS2_LOCK_PENDING. When PENDING is
3379 * set, we just requeue the unblock. Only when the other
3380 * thread has called dlm_lock() and cleared PENDING will
3381 * we then cancel their request.
3383 * All callers of dlm_lock() must set OCFS2_DLM_PENDING
3384 * at the same time they set OCFS2_DLM_BUSY. They must
3385 * clear OCFS2_DLM_PENDING after dlm_lock() returns.
3387 if (lockres->l_flags & OCFS2_LOCK_PENDING)
3391 ret = ocfs2_prepare_cancel_convert(osb, lockres);
3392 spin_unlock_irqrestore(&lockres->l_lock, flags);
3394 ret = ocfs2_cancel_convert(osb, lockres);
3401 /* if we're blocking an exclusive and we have *any* holders,
3403 if ((lockres->l_blocking == DLM_LOCK_EX)
3404 && (lockres->l_ex_holders || lockres->l_ro_holders))
3407 /* If it's a PR we're blocking, then only
3408 * requeue if we've got any EX holders */
3409 if (lockres->l_blocking == DLM_LOCK_PR &&
3410 lockres->l_ex_holders)
3414 * Can we get a lock in this state if the holder counts are
3415 * zero? The meta data unblock code used to check this.
3417 if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
3418 && (lockres->l_flags & OCFS2_LOCK_REFRESHING))
3421 new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
3423 if (lockres->l_ops->check_downconvert
3424 && !lockres->l_ops->check_downconvert(lockres, new_level))
3427 /* If we get here, then we know that there are no more
3428 * incompatible holders (and anyone asking for an incompatible
3429 * lock is blocked). We can now downconvert the lock */
3430 if (!lockres->l_ops->downconvert_worker)
3433 /* Some lockres types want to do a bit of work before
3434 * downconverting a lock. Allow that here. The worker function
3435 * may sleep, so we save off a copy of what we're blocking as
3436 * it may change while we're not holding the spin lock. */
3437 blocking = lockres->l_blocking;
3438 spin_unlock_irqrestore(&lockres->l_lock, flags);
3440 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
3442 if (ctl->unblock_action == UNBLOCK_STOP_POST)
3445 spin_lock_irqsave(&lockres->l_lock, flags);
3446 if (blocking != lockres->l_blocking) {
3447 /* If this changed underneath us, then we can't drop
3455 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
3456 if (lockres->l_level == DLM_LOCK_EX)
3460 * We only set the lvb if the lock has been fully
3461 * refreshed - otherwise we risk setting stale
3462 * data. Otherwise, there's no need to actually clear
3463 * out the lvb here as it's value is still valid.
3465 if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
3466 lockres->l_ops->set_lvb(lockres);
3469 gen = ocfs2_prepare_downconvert(lockres, new_level);
3470 spin_unlock_irqrestore(&lockres->l_lock, flags);
3471 ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb,
3479 spin_unlock_irqrestore(&lockres->l_lock, flags);
3486 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
3489 struct inode *inode;
3490 struct address_space *mapping;
3492 inode = ocfs2_lock_res_inode(lockres);
3493 mapping = inode->i_mapping;
3495 if (!S_ISREG(inode->i_mode))
3499 * We need this before the filemap_fdatawrite() so that it can
3500 * transfer the dirty bit from the PTE to the
3501 * page. Unfortunately this means that even for EX->PR
3502 * downconverts, we'll lose our mappings and have to build
3505 unmap_mapping_range(mapping, 0, 0, 0);
3507 if (filemap_fdatawrite(mapping)) {
3508 mlog(ML_ERROR, "Could not sync inode %llu for downconvert!",
3509 (unsigned long long)OCFS2_I(inode)->ip_blkno);
3511 sync_mapping_buffers(mapping);
3512 if (blocking == DLM_LOCK_EX) {
3513 truncate_inode_pages(mapping, 0);
3515 /* We only need to wait on the I/O if we're not also
3516 * truncating pages because truncate_inode_pages waits
3517 * for us above. We don't truncate pages if we're
3518 * blocking anything < EXMODE because we want to keep
3519 * them around in that case. */
3520 filemap_fdatawait(mapping);
3524 return UNBLOCK_CONTINUE;
3527 static int ocfs2_ci_checkpointed(struct ocfs2_caching_info *ci,
3528 struct ocfs2_lock_res *lockres,
3531 int checkpointed = ocfs2_ci_fully_checkpointed(ci);
3533 BUG_ON(new_level != DLM_LOCK_NL && new_level != DLM_LOCK_PR);
3534 BUG_ON(lockres->l_level != DLM_LOCK_EX && !checkpointed);
3539 ocfs2_start_checkpoint(OCFS2_SB(ocfs2_metadata_cache_get_super(ci)));
3543 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
3546 struct inode *inode = ocfs2_lock_res_inode(lockres);
3548 return ocfs2_ci_checkpointed(INODE_CACHE(inode), lockres, new_level);
3551 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres)
3553 struct inode *inode = ocfs2_lock_res_inode(lockres);
3555 __ocfs2_stuff_meta_lvb(inode);
3559 * Does the final reference drop on our dentry lock. Right now this
3560 * happens in the downconvert thread, but we could choose to simplify the
3561 * dlmglue API and push these off to the ocfs2_wq in the future.
3563 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
3564 struct ocfs2_lock_res *lockres)
3566 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
3567 ocfs2_dentry_lock_put(osb, dl);
3571 * d_delete() matching dentries before the lock downconvert.
3573 * At this point, any process waiting to destroy the
3574 * dentry_lock due to last ref count is stopped by the
3575 * OCFS2_LOCK_QUEUED flag.
3577 * We have two potential problems
3579 * 1) If we do the last reference drop on our dentry_lock (via dput)
3580 * we'll wind up in ocfs2_release_dentry_lock(), waiting on
3581 * the downconvert to finish. Instead we take an elevated
3582 * reference and push the drop until after we've completed our
3583 * unblock processing.
3585 * 2) There might be another process with a final reference,
3586 * waiting on us to finish processing. If this is the case, we
3587 * detect it and exit out - there's no more dentries anyway.
3589 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
3592 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
3593 struct ocfs2_inode_info *oi = OCFS2_I(dl->dl_inode);
3594 struct dentry *dentry;
3595 unsigned long flags;
3599 * This node is blocking another node from getting a read
3600 * lock. This happens when we've renamed within a
3601 * directory. We've forced the other nodes to d_delete(), but
3602 * we never actually dropped our lock because it's still
3603 * valid. The downconvert code will retain a PR for this node,
3604 * so there's no further work to do.
3606 if (blocking == DLM_LOCK_PR)
3607 return UNBLOCK_CONTINUE;
3610 * Mark this inode as potentially orphaned. The code in
3611 * ocfs2_delete_inode() will figure out whether it actually
3612 * needs to be freed or not.
3614 spin_lock(&oi->ip_lock);
3615 oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
3616 spin_unlock(&oi->ip_lock);
3619 * Yuck. We need to make sure however that the check of
3620 * OCFS2_LOCK_FREEING and the extra reference are atomic with
3621 * respect to a reference decrement or the setting of that
3624 spin_lock_irqsave(&lockres->l_lock, flags);
3625 spin_lock(&dentry_attach_lock);
3626 if (!(lockres->l_flags & OCFS2_LOCK_FREEING)
3631 spin_unlock(&dentry_attach_lock);
3632 spin_unlock_irqrestore(&lockres->l_lock, flags);
3634 mlog(0, "extra_ref = %d\n", extra_ref);
3637 * We have a process waiting on us in ocfs2_dentry_iput(),
3638 * which means we can't have any more outstanding
3639 * aliases. There's no need to do any more work.
3642 return UNBLOCK_CONTINUE;
3644 spin_lock(&dentry_attach_lock);
3646 dentry = ocfs2_find_local_alias(dl->dl_inode,
3647 dl->dl_parent_blkno, 1);
3650 spin_unlock(&dentry_attach_lock);
3652 mlog(0, "d_delete(%.*s);\n", dentry->d_name.len,
3653 dentry->d_name.name);
3656 * The following dcache calls may do an
3657 * iput(). Normally we don't want that from the
3658 * downconverting thread, but in this case it's ok
3659 * because the requesting node already has an
3660 * exclusive lock on the inode, so it can't be queued
3661 * for a downconvert.
3666 spin_lock(&dentry_attach_lock);
3668 spin_unlock(&dentry_attach_lock);
3671 * If we are the last holder of this dentry lock, there is no
3672 * reason to downconvert so skip straight to the unlock.
3674 if (dl->dl_count == 1)
3675 return UNBLOCK_STOP_POST;
3677 return UNBLOCK_CONTINUE_POST;
3680 static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
3683 struct ocfs2_refcount_tree *tree =
3684 ocfs2_lock_res_refcount_tree(lockres);
3686 return ocfs2_ci_checkpointed(&tree->rf_ci, lockres, new_level);
3689 static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
3692 struct ocfs2_refcount_tree *tree =
3693 ocfs2_lock_res_refcount_tree(lockres);
3695 ocfs2_metadata_cache_purge(&tree->rf_ci);
3697 return UNBLOCK_CONTINUE;
3700 static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres)
3702 struct ocfs2_qinfo_lvb *lvb;
3703 struct ocfs2_mem_dqinfo *oinfo = ocfs2_lock_res_qinfo(lockres);
3704 struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
3705 oinfo->dqi_gi.dqi_type);
3709 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
3710 lvb->lvb_version = OCFS2_QINFO_LVB_VERSION;
3711 lvb->lvb_bgrace = cpu_to_be32(info->dqi_bgrace);
3712 lvb->lvb_igrace = cpu_to_be32(info->dqi_igrace);
3713 lvb->lvb_syncms = cpu_to_be32(oinfo->dqi_syncms);
3714 lvb->lvb_blocks = cpu_to_be32(oinfo->dqi_gi.dqi_blocks);
3715 lvb->lvb_free_blk = cpu_to_be32(oinfo->dqi_gi.dqi_free_blk);
3716 lvb->lvb_free_entry = cpu_to_be32(oinfo->dqi_gi.dqi_free_entry);
3721 void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex)
3723 struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
3724 struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
3725 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3728 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
3729 ocfs2_cluster_unlock(osb, lockres, level);
3733 static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo)
3735 struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
3736 oinfo->dqi_gi.dqi_type);
3737 struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
3738 struct ocfs2_qinfo_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
3739 struct buffer_head *bh = NULL;
3740 struct ocfs2_global_disk_dqinfo *gdinfo;
3743 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
3744 lvb->lvb_version == OCFS2_QINFO_LVB_VERSION) {
3745 info->dqi_bgrace = be32_to_cpu(lvb->lvb_bgrace);
3746 info->dqi_igrace = be32_to_cpu(lvb->lvb_igrace);
3747 oinfo->dqi_syncms = be32_to_cpu(lvb->lvb_syncms);
3748 oinfo->dqi_gi.dqi_blocks = be32_to_cpu(lvb->lvb_blocks);
3749 oinfo->dqi_gi.dqi_free_blk = be32_to_cpu(lvb->lvb_free_blk);
3750 oinfo->dqi_gi.dqi_free_entry =
3751 be32_to_cpu(lvb->lvb_free_entry);
3753 status = ocfs2_read_quota_block(oinfo->dqi_gqinode, 0, &bh);
3758 gdinfo = (struct ocfs2_global_disk_dqinfo *)
3759 (bh->b_data + OCFS2_GLOBAL_INFO_OFF);
3760 info->dqi_bgrace = le32_to_cpu(gdinfo->dqi_bgrace);
3761 info->dqi_igrace = le32_to_cpu(gdinfo->dqi_igrace);
3762 oinfo->dqi_syncms = le32_to_cpu(gdinfo->dqi_syncms);
3763 oinfo->dqi_gi.dqi_blocks = le32_to_cpu(gdinfo->dqi_blocks);
3764 oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(gdinfo->dqi_free_blk);
3765 oinfo->dqi_gi.dqi_free_entry =
3766 le32_to_cpu(gdinfo->dqi_free_entry);
3768 ocfs2_track_lock_refresh(lockres);
3775 /* Lock quota info, this function expects at least shared lock on the quota file
3776 * so that we can safely refresh quota info from disk. */
3777 int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex)
3779 struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
3780 struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
3781 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3786 /* On RO devices, locking really isn't needed... */
3787 if (ocfs2_is_hard_readonly(osb)) {
3792 if (ocfs2_mount_local(osb))
3795 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
3800 if (!ocfs2_should_refresh_lock_res(lockres))
3802 /* OK, we have the lock but we need to refresh the quota info */
3803 status = ocfs2_refresh_qinfo(oinfo);
3805 ocfs2_qinfo_unlock(oinfo, ex);
3806 ocfs2_complete_lock_res_refresh(lockres, status);
3812 int ocfs2_refcount_lock(struct ocfs2_refcount_tree *ref_tree, int ex)
3815 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3816 struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
3817 struct ocfs2_super *osb = lockres->l_priv;
3820 if (ocfs2_is_hard_readonly(osb))
3823 if (ocfs2_mount_local(osb))
3826 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
3833 void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex)
3835 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3836 struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
3837 struct ocfs2_super *osb = lockres->l_priv;
3839 if (!ocfs2_mount_local(osb))
3840 ocfs2_cluster_unlock(osb, lockres, level);
3844 * This is the filesystem locking protocol. It provides the lock handling
3845 * hooks for the underlying DLM. It has a maximum version number.
3846 * The version number allows interoperability with systems running at
3847 * the same major number and an equal or smaller minor number.
3849 * Whenever the filesystem does new things with locks (adds or removes a
3850 * lock, orders them differently, does different things underneath a lock),
3851 * the version must be changed. The protocol is negotiated when joining
3852 * the dlm domain. A node may join the domain if its major version is
3853 * identical to all other nodes and its minor version is greater than
3854 * or equal to all other nodes. When its minor version is greater than
3855 * the other nodes, it will run at the minor version specified by the
3858 * If a locking change is made that will not be compatible with older
3859 * versions, the major number must be increased and the minor version set
3860 * to zero. If a change merely adds a behavior that can be disabled when
3861 * speaking to older versions, the minor version must be increased. If a
3862 * change adds a fully backwards compatible change (eg, LVB changes that
3863 * are just ignored by older versions), the version does not need to be
3866 static struct ocfs2_locking_protocol lproto = {
3868 .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
3869 .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
3871 .lp_lock_ast = ocfs2_locking_ast,
3872 .lp_blocking_ast = ocfs2_blocking_ast,
3873 .lp_unlock_ast = ocfs2_unlock_ast,
3876 void ocfs2_set_locking_protocol(void)
3878 ocfs2_stack_glue_set_locking_protocol(&lproto);
3882 static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
3883 struct ocfs2_lock_res *lockres)
3886 struct ocfs2_unblock_ctl ctl = {0, 0,};
3887 unsigned long flags;
3889 /* Our reference to the lockres in this function can be
3890 * considered valid until we remove the OCFS2_LOCK_QUEUED
3896 BUG_ON(!lockres->l_ops);
3898 mlog(0, "lockres %s blocked.\n", lockres->l_name);
3900 /* Detect whether a lock has been marked as going away while
3901 * the downconvert thread was processing other things. A lock can
3902 * still be marked with OCFS2_LOCK_FREEING after this check,
3903 * but short circuiting here will still save us some
3905 spin_lock_irqsave(&lockres->l_lock, flags);
3906 if (lockres->l_flags & OCFS2_LOCK_FREEING)
3908 spin_unlock_irqrestore(&lockres->l_lock, flags);
3910 status = ocfs2_unblock_lock(osb, lockres, &ctl);
3914 spin_lock_irqsave(&lockres->l_lock, flags);
3916 if (lockres->l_flags & OCFS2_LOCK_FREEING || !ctl.requeue) {
3917 lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED);
3919 ocfs2_schedule_blocked_lock(osb, lockres);
3921 mlog(0, "lockres %s, requeue = %s.\n", lockres->l_name,
3922 ctl.requeue ? "yes" : "no");
3923 spin_unlock_irqrestore(&lockres->l_lock, flags);
3925 if (ctl.unblock_action != UNBLOCK_CONTINUE
3926 && lockres->l_ops->post_unlock)
3927 lockres->l_ops->post_unlock(osb, lockres);
3932 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
3933 struct ocfs2_lock_res *lockres)
3937 assert_spin_locked(&lockres->l_lock);
3939 if (lockres->l_flags & OCFS2_LOCK_FREEING) {
3940 /* Do not schedule a lock for downconvert when it's on
3941 * the way to destruction - any nodes wanting access
3942 * to the resource will get it soon. */
3943 mlog(0, "Lockres %s won't be scheduled: flags 0x%lx\n",
3944 lockres->l_name, lockres->l_flags);
3948 lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
3950 spin_lock(&osb->dc_task_lock);
3951 if (list_empty(&lockres->l_blocked_list)) {
3952 list_add_tail(&lockres->l_blocked_list,
3953 &osb->blocked_lock_list);
3954 osb->blocked_lock_count++;
3956 spin_unlock(&osb->dc_task_lock);
3961 static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
3963 unsigned long processed;
3964 struct ocfs2_lock_res *lockres;
3968 spin_lock(&osb->dc_task_lock);
3969 /* grab this early so we know to try again if a state change and
3970 * wake happens part-way through our work */
3971 osb->dc_work_sequence = osb->dc_wake_sequence;
3973 processed = osb->blocked_lock_count;
3975 BUG_ON(list_empty(&osb->blocked_lock_list));
3977 lockres = list_entry(osb->blocked_lock_list.next,
3978 struct ocfs2_lock_res, l_blocked_list);
3979 list_del_init(&lockres->l_blocked_list);
3980 osb->blocked_lock_count--;
3981 spin_unlock(&osb->dc_task_lock);
3986 ocfs2_process_blocked_lock(osb, lockres);
3988 spin_lock(&osb->dc_task_lock);
3990 spin_unlock(&osb->dc_task_lock);
3995 static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
3999 spin_lock(&osb->dc_task_lock);
4000 if (list_empty(&osb->blocked_lock_list))
4003 spin_unlock(&osb->dc_task_lock);
4007 static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
4009 int should_wake = 0;
4011 spin_lock(&osb->dc_task_lock);
4012 if (osb->dc_work_sequence != osb->dc_wake_sequence)
4014 spin_unlock(&osb->dc_task_lock);
4019 static int ocfs2_downconvert_thread(void *arg)
4022 struct ocfs2_super *osb = arg;
4024 /* only quit once we've been asked to stop and there is no more
4026 while (!(kthread_should_stop() &&
4027 ocfs2_downconvert_thread_lists_empty(osb))) {
4029 wait_event_interruptible(osb->dc_event,
4030 ocfs2_downconvert_thread_should_wake(osb) ||
4031 kthread_should_stop());
4033 mlog(0, "downconvert_thread: awoken\n");
4035 ocfs2_downconvert_thread_do_work(osb);
4038 osb->dc_task = NULL;
4042 void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
4044 spin_lock(&osb->dc_task_lock);
4045 /* make sure the voting thread gets a swipe at whatever changes
4046 * the caller may have made to the voting state */
4047 osb->dc_wake_sequence++;
4048 spin_unlock(&osb->dc_task_lock);
4049 wake_up(&osb->dc_event);