1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Code which implements an OCFS2 specific interface to our DLM.
8 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
26 #include <linux/types.h>
27 #include <linux/slab.h>
28 #include <linux/highmem.h>
30 #include <linux/kthread.h>
31 #include <linux/pagemap.h>
32 #include <linux/debugfs.h>
33 #include <linux/seq_file.h>
34 #include <linux/time.h>
35 #include <linux/quotaops.h>
37 #define MLOG_MASK_PREFIX ML_DLM_GLUE
38 #include <cluster/masklog.h>
41 #include "ocfs2_lockingver.h"
46 #include "extent_map.h"
48 #include "heartbeat.h"
51 #include "stackglue.h"
56 #include "refcounttree.h"
58 #include "buffer_head_io.h"
60 struct ocfs2_mask_waiter {
61 struct list_head mw_item;
63 struct completion mw_complete;
64 unsigned long mw_mask;
65 unsigned long mw_goal;
66 #ifdef CONFIG_OCFS2_FS_STATS
67 unsigned long long mw_lock_start;
71 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres);
72 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres);
73 static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres);
74 static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres);
77 * Return value from ->downconvert_worker functions.
79 * These control the precise actions of ocfs2_unblock_lock()
80 * and ocfs2_process_blocked_lock()
83 enum ocfs2_unblock_action {
84 UNBLOCK_CONTINUE = 0, /* Continue downconvert */
85 UNBLOCK_CONTINUE_POST = 1, /* Continue downconvert, fire
86 * ->post_unlock callback */
87 UNBLOCK_STOP_POST = 2, /* Do not downconvert, fire
88 * ->post_unlock() callback. */
91 struct ocfs2_unblock_ctl {
93 enum ocfs2_unblock_action unblock_action;
96 /* Lockdep class keys */
97 struct lock_class_key lockdep_keys[OCFS2_NUM_LOCK_TYPES];
99 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
101 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres);
103 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
106 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
109 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
110 struct ocfs2_lock_res *lockres);
112 static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres);
114 static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
116 static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
119 #define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
121 /* This aids in debugging situations where a bad LVB might be involved. */
122 static void ocfs2_dump_meta_lvb_info(u64 level,
123 const char *function,
125 struct ocfs2_lock_res *lockres)
127 struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
129 mlog(level, "LVB information for %s (called from %s:%u):\n",
130 lockres->l_name, function, line);
131 mlog(level, "version: %u, clusters: %u, generation: 0x%x\n",
132 lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters),
133 be32_to_cpu(lvb->lvb_igeneration));
134 mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n",
135 (unsigned long long)be64_to_cpu(lvb->lvb_isize),
136 be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid),
137 be16_to_cpu(lvb->lvb_imode));
138 mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
139 "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink),
140 (long long)be64_to_cpu(lvb->lvb_iatime_packed),
141 (long long)be64_to_cpu(lvb->lvb_ictime_packed),
142 (long long)be64_to_cpu(lvb->lvb_imtime_packed),
143 be32_to_cpu(lvb->lvb_iattr));
148 * OCFS2 Lock Resource Operations
150 * These fine tune the behavior of the generic dlmglue locking infrastructure.
152 * The most basic of lock types can point ->l_priv to their respective
153 * struct ocfs2_super and allow the default actions to manage things.
155 * Right now, each lock type also needs to implement an init function,
156 * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres()
157 * should be called when the lock is no longer needed (i.e., object
160 struct ocfs2_lock_res_ops {
162 * Translate an ocfs2_lock_res * into an ocfs2_super *. Define
163 * this callback if ->l_priv is not an ocfs2_super pointer
165 struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *);
168 * Optionally called in the downconvert thread after a
169 * successful downconvert. The lockres will not be referenced
170 * after this callback is called, so it is safe to free
173 * The exact semantics of when this is called are controlled
174 * by ->downconvert_worker()
176 void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *);
179 * Allow a lock type to add checks to determine whether it is
180 * safe to downconvert a lock. Return 0 to re-queue the
181 * downconvert at a later time, nonzero to continue.
183 * For most locks, the default checks that there are no
184 * incompatible holders are sufficient.
186 * Called with the lockres spinlock held.
188 int (*check_downconvert)(struct ocfs2_lock_res *, int);
191 * Allows a lock type to populate the lock value block. This
192 * is called on downconvert, and when we drop a lock.
194 * Locks that want to use this should set LOCK_TYPE_USES_LVB
195 * in the flags field.
197 * Called with the lockres spinlock held.
199 void (*set_lvb)(struct ocfs2_lock_res *);
202 * Called from the downconvert thread when it is determined
203 * that a lock will be downconverted. This is called without
204 * any locks held so the function can do work that might
205 * schedule (syncing out data, etc).
207 * This should return any one of the ocfs2_unblock_action
208 * values, depending on what it wants the thread to do.
210 int (*downconvert_worker)(struct ocfs2_lock_res *, int);
213 * LOCK_TYPE_* flags which describe the specific requirements
214 * of a lock type. Descriptions of each individual flag follow.
220 * Some locks want to "refresh" potentially stale data when a
221 * meaningful (PRMODE or EXMODE) lock level is first obtained. If this
222 * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the
223 * individual lockres l_flags member from the ast function. It is
224 * expected that the locking wrapper will clear the
225 * OCFS2_LOCK_NEEDS_REFRESH flag when done.
227 #define LOCK_TYPE_REQUIRES_REFRESH 0x1
230 * Indicate that a lock type makes use of the lock value block. The
231 * ->set_lvb lock type callback must be defined.
233 #define LOCK_TYPE_USES_LVB 0x2
235 static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
236 .get_osb = ocfs2_get_inode_osb,
240 static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = {
241 .get_osb = ocfs2_get_inode_osb,
242 .check_downconvert = ocfs2_check_meta_downconvert,
243 .set_lvb = ocfs2_set_meta_lvb,
244 .downconvert_worker = ocfs2_data_convert_worker,
245 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
248 static struct ocfs2_lock_res_ops ocfs2_super_lops = {
249 .flags = LOCK_TYPE_REQUIRES_REFRESH,
252 static struct ocfs2_lock_res_ops ocfs2_rename_lops = {
256 static struct ocfs2_lock_res_ops ocfs2_nfs_sync_lops = {
260 static struct ocfs2_lock_res_ops ocfs2_orphan_scan_lops = {
261 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
264 static struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
265 .get_osb = ocfs2_get_dentry_osb,
266 .post_unlock = ocfs2_dentry_post_unlock,
267 .downconvert_worker = ocfs2_dentry_convert_worker,
271 static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = {
272 .get_osb = ocfs2_get_inode_osb,
276 static struct ocfs2_lock_res_ops ocfs2_flock_lops = {
277 .get_osb = ocfs2_get_file_osb,
281 static struct ocfs2_lock_res_ops ocfs2_qinfo_lops = {
282 .set_lvb = ocfs2_set_qinfo_lvb,
283 .get_osb = ocfs2_get_qinfo_osb,
284 .flags = LOCK_TYPE_REQUIRES_REFRESH | LOCK_TYPE_USES_LVB,
287 static struct ocfs2_lock_res_ops ocfs2_refcount_block_lops = {
288 .check_downconvert = ocfs2_check_refcount_downconvert,
289 .downconvert_worker = ocfs2_refcount_convert_worker,
293 static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
295 return lockres->l_type == OCFS2_LOCK_TYPE_META ||
296 lockres->l_type == OCFS2_LOCK_TYPE_RW ||
297 lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
300 static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
302 BUG_ON(!ocfs2_is_inode_lock(lockres));
304 return (struct inode *) lockres->l_priv;
307 static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres)
309 BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_DENTRY);
311 return (struct ocfs2_dentry_lock *)lockres->l_priv;
314 static inline struct ocfs2_mem_dqinfo *ocfs2_lock_res_qinfo(struct ocfs2_lock_res *lockres)
316 BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_QINFO);
318 return (struct ocfs2_mem_dqinfo *)lockres->l_priv;
321 static inline struct ocfs2_refcount_tree *
322 ocfs2_lock_res_refcount_tree(struct ocfs2_lock_res *res)
324 return container_of(res, struct ocfs2_refcount_tree, rf_lockres);
327 static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres)
329 if (lockres->l_ops->get_osb)
330 return lockres->l_ops->get_osb(lockres);
332 return (struct ocfs2_super *)lockres->l_priv;
335 static int ocfs2_lock_create(struct ocfs2_super *osb,
336 struct ocfs2_lock_res *lockres,
339 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
341 static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
342 struct ocfs2_lock_res *lockres,
343 int level, unsigned long caller_ip);
344 static inline void ocfs2_cluster_unlock(struct ocfs2_super *osb,
345 struct ocfs2_lock_res *lockres,
348 __ocfs2_cluster_unlock(osb, lockres, level, _RET_IP_);
351 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
352 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
353 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
354 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level);
355 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
356 struct ocfs2_lock_res *lockres);
357 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
359 #define ocfs2_log_dlm_error(_func, _err, _lockres) do { \
360 if ((_lockres)->l_type != OCFS2_LOCK_TYPE_DENTRY) \
361 mlog(ML_ERROR, "DLM error %d while calling %s on resource %s\n", \
362 _err, _func, _lockres->l_name); \
364 mlog(ML_ERROR, "DLM error %d while calling %s on resource %.*s%08x\n", \
365 _err, _func, OCFS2_DENTRY_LOCK_INO_START - 1, (_lockres)->l_name, \
366 (unsigned int)ocfs2_get_dentry_lock_ino(_lockres)); \
368 static int ocfs2_downconvert_thread(void *arg);
369 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
370 struct ocfs2_lock_res *lockres);
371 static int ocfs2_inode_lock_update(struct inode *inode,
372 struct buffer_head **bh);
373 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
374 static inline int ocfs2_highest_compat_lock_level(int level);
375 static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
377 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
378 struct ocfs2_lock_res *lockres,
381 unsigned int generation);
382 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
383 struct ocfs2_lock_res *lockres);
384 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
385 struct ocfs2_lock_res *lockres);
388 static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
397 BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
399 len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
400 ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD,
401 (long long)blkno, generation);
403 BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
405 mlog(0, "built lock resource with name: %s\n", name);
410 static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
412 static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
413 struct ocfs2_dlm_debug *dlm_debug)
415 mlog(0, "Add tracking for lockres %s\n", res->l_name);
417 spin_lock(&ocfs2_dlm_tracking_lock);
418 list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking);
419 spin_unlock(&ocfs2_dlm_tracking_lock);
422 static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
424 spin_lock(&ocfs2_dlm_tracking_lock);
425 if (!list_empty(&res->l_debug_list))
426 list_del_init(&res->l_debug_list);
427 spin_unlock(&ocfs2_dlm_tracking_lock);
430 #ifdef CONFIG_OCFS2_FS_STATS
431 static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
433 res->l_lock_num_prmode = 0;
434 res->l_lock_num_prmode_failed = 0;
435 res->l_lock_total_prmode = 0;
436 res->l_lock_max_prmode = 0;
437 res->l_lock_num_exmode = 0;
438 res->l_lock_num_exmode_failed = 0;
439 res->l_lock_total_exmode = 0;
440 res->l_lock_max_exmode = 0;
441 res->l_lock_refresh = 0;
444 static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
445 struct ocfs2_mask_waiter *mw, int ret)
447 unsigned long long *num, *sum;
448 unsigned int *max, *failed;
449 struct timespec ts = current_kernel_time();
450 unsigned long long time = timespec_to_ns(&ts) - mw->mw_lock_start;
452 if (level == LKM_PRMODE) {
453 num = &res->l_lock_num_prmode;
454 sum = &res->l_lock_total_prmode;
455 max = &res->l_lock_max_prmode;
456 failed = &res->l_lock_num_prmode_failed;
457 } else if (level == LKM_EXMODE) {
458 num = &res->l_lock_num_exmode;
459 sum = &res->l_lock_total_exmode;
460 max = &res->l_lock_max_exmode;
461 failed = &res->l_lock_num_exmode_failed;
473 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
475 lockres->l_lock_refresh++;
478 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
480 struct timespec ts = current_kernel_time();
481 mw->mw_lock_start = timespec_to_ns(&ts);
484 static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
487 static inline void ocfs2_update_lock_stats(struct ocfs2_lock_res *res,
488 int level, struct ocfs2_mask_waiter *mw, int ret)
491 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
494 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
499 static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
500 struct ocfs2_lock_res *res,
501 enum ocfs2_lock_type type,
502 struct ocfs2_lock_res_ops *ops,
509 res->l_level = DLM_LOCK_IV;
510 res->l_requested = DLM_LOCK_IV;
511 res->l_blocking = DLM_LOCK_IV;
512 res->l_action = OCFS2_AST_INVALID;
513 res->l_unlock_action = OCFS2_UNLOCK_INVALID;
515 res->l_flags = OCFS2_LOCK_INITIALIZED;
517 ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
519 ocfs2_init_lock_stats(res);
520 #ifdef CONFIG_DEBUG_LOCK_ALLOC
521 if (type != OCFS2_LOCK_TYPE_OPEN)
522 lockdep_init_map(&res->l_lockdep_map, ocfs2_lock_type_strings[type],
523 &lockdep_keys[type], 0);
525 res->l_lockdep_map.key = NULL;
529 void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
531 /* This also clears out the lock status block */
532 memset(res, 0, sizeof(struct ocfs2_lock_res));
533 spin_lock_init(&res->l_lock);
534 init_waitqueue_head(&res->l_event);
535 INIT_LIST_HEAD(&res->l_blocked_list);
536 INIT_LIST_HEAD(&res->l_mask_waiters);
539 void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
540 enum ocfs2_lock_type type,
541 unsigned int generation,
544 struct ocfs2_lock_res_ops *ops;
547 case OCFS2_LOCK_TYPE_RW:
548 ops = &ocfs2_inode_rw_lops;
550 case OCFS2_LOCK_TYPE_META:
551 ops = &ocfs2_inode_inode_lops;
553 case OCFS2_LOCK_TYPE_OPEN:
554 ops = &ocfs2_inode_open_lops;
557 mlog_bug_on_msg(1, "type: %d\n", type);
558 ops = NULL; /* thanks, gcc */
562 ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno,
563 generation, res->l_name);
564 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, ops, inode);
567 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres)
569 struct inode *inode = ocfs2_lock_res_inode(lockres);
571 return OCFS2_SB(inode->i_sb);
574 static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres)
576 struct ocfs2_mem_dqinfo *info = lockres->l_priv;
578 return OCFS2_SB(info->dqi_gi.dqi_sb);
581 static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres)
583 struct ocfs2_file_private *fp = lockres->l_priv;
585 return OCFS2_SB(fp->fp_file->f_mapping->host->i_sb);
588 static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres)
590 __be64 inode_blkno_be;
592 memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START],
595 return be64_to_cpu(inode_blkno_be);
598 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres)
600 struct ocfs2_dentry_lock *dl = lockres->l_priv;
602 return OCFS2_SB(dl->dl_inode->i_sb);
605 void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl,
606 u64 parent, struct inode *inode)
609 u64 inode_blkno = OCFS2_I(inode)->ip_blkno;
610 __be64 inode_blkno_be = cpu_to_be64(inode_blkno);
611 struct ocfs2_lock_res *lockres = &dl->dl_lockres;
613 ocfs2_lock_res_init_once(lockres);
616 * Unfortunately, the standard lock naming scheme won't work
617 * here because we have two 16 byte values to use. Instead,
618 * we'll stuff the inode number as a binary value. We still
619 * want error prints to show something without garbling the
620 * display, so drop a null byte in there before the inode
621 * number. A future version of OCFS2 will likely use all
622 * binary lock names. The stringified names have been a
623 * tremendous aid in debugging, but now that the debugfs
624 * interface exists, we can mangle things there if need be.
626 * NOTE: We also drop the standard "pad" value (the total lock
627 * name size stays the same though - the last part is all
628 * zeros due to the memset in ocfs2_lock_res_init_once()
630 len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START,
632 ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY),
635 BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1));
637 memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be,
640 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
641 OCFS2_LOCK_TYPE_DENTRY, &ocfs2_dentry_lops,
645 static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res,
646 struct ocfs2_super *osb)
648 /* Superblock lockres doesn't come from a slab so we call init
649 * once on it manually. */
650 ocfs2_lock_res_init_once(res);
651 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO,
653 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER,
654 &ocfs2_super_lops, osb);
657 static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res,
658 struct ocfs2_super *osb)
660 /* Rename lockres doesn't come from a slab so we call init
661 * once on it manually. */
662 ocfs2_lock_res_init_once(res);
663 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name);
664 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME,
665 &ocfs2_rename_lops, osb);
668 static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
669 struct ocfs2_super *osb)
671 /* nfs_sync lockres doesn't come from a slab so we call init
672 * once on it manually. */
673 ocfs2_lock_res_init_once(res);
674 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_NFS_SYNC, 0, 0, res->l_name);
675 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_NFS_SYNC,
676 &ocfs2_nfs_sync_lops, osb);
679 static void ocfs2_orphan_scan_lock_res_init(struct ocfs2_lock_res *res,
680 struct ocfs2_super *osb)
682 ocfs2_lock_res_init_once(res);
683 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_ORPHAN_SCAN, 0, 0, res->l_name);
684 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_ORPHAN_SCAN,
685 &ocfs2_orphan_scan_lops, osb);
688 void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres,
689 struct ocfs2_file_private *fp)
691 struct inode *inode = fp->fp_file->f_mapping->host;
692 struct ocfs2_inode_info *oi = OCFS2_I(inode);
694 ocfs2_lock_res_init_once(lockres);
695 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_FLOCK, oi->ip_blkno,
696 inode->i_generation, lockres->l_name);
697 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
698 OCFS2_LOCK_TYPE_FLOCK, &ocfs2_flock_lops,
700 lockres->l_flags |= OCFS2_LOCK_NOCACHE;
703 void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res *lockres,
704 struct ocfs2_mem_dqinfo *info)
706 ocfs2_lock_res_init_once(lockres);
707 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_QINFO, info->dqi_gi.dqi_type,
709 ocfs2_lock_res_init_common(OCFS2_SB(info->dqi_gi.dqi_sb), lockres,
710 OCFS2_LOCK_TYPE_QINFO, &ocfs2_qinfo_lops,
714 void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres,
715 struct ocfs2_super *osb, u64 ref_blkno,
716 unsigned int generation)
718 ocfs2_lock_res_init_once(lockres);
719 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_REFCOUNT, ref_blkno,
720 generation, lockres->l_name);
721 ocfs2_lock_res_init_common(osb, lockres, OCFS2_LOCK_TYPE_REFCOUNT,
722 &ocfs2_refcount_block_lops, osb);
725 void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
729 if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
732 ocfs2_remove_lockres_tracking(res);
734 mlog_bug_on_msg(!list_empty(&res->l_blocked_list),
735 "Lockres %s is on the blocked list\n",
737 mlog_bug_on_msg(!list_empty(&res->l_mask_waiters),
738 "Lockres %s has mask waiters pending\n",
740 mlog_bug_on_msg(spin_is_locked(&res->l_lock),
741 "Lockres %s is locked\n",
743 mlog_bug_on_msg(res->l_ro_holders,
744 "Lockres %s has %u ro holders\n",
745 res->l_name, res->l_ro_holders);
746 mlog_bug_on_msg(res->l_ex_holders,
747 "Lockres %s has %u ex holders\n",
748 res->l_name, res->l_ex_holders);
750 /* Need to clear out the lock status block for the dlm */
751 memset(&res->l_lksb, 0, sizeof(res->l_lksb));
757 static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
766 lockres->l_ex_holders++;
769 lockres->l_ro_holders++;
778 static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
787 BUG_ON(!lockres->l_ex_holders);
788 lockres->l_ex_holders--;
791 BUG_ON(!lockres->l_ro_holders);
792 lockres->l_ro_holders--;
800 /* WARNING: This function lives in a world where the only three lock
801 * levels are EX, PR, and NL. It *will* have to be adjusted when more
802 * lock types are added. */
803 static inline int ocfs2_highest_compat_lock_level(int level)
805 int new_level = DLM_LOCK_EX;
807 if (level == DLM_LOCK_EX)
808 new_level = DLM_LOCK_NL;
809 else if (level == DLM_LOCK_PR)
810 new_level = DLM_LOCK_PR;
814 static void lockres_set_flags(struct ocfs2_lock_res *lockres,
815 unsigned long newflags)
817 struct ocfs2_mask_waiter *mw, *tmp;
819 assert_spin_locked(&lockres->l_lock);
821 lockres->l_flags = newflags;
823 list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) {
824 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
827 list_del_init(&mw->mw_item);
829 complete(&mw->mw_complete);
832 static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
834 lockres_set_flags(lockres, lockres->l_flags | or);
836 static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
839 lockres_set_flags(lockres, lockres->l_flags & ~clear);
842 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
846 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
847 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
848 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
849 BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
851 lockres->l_level = lockres->l_requested;
852 if (lockres->l_level <=
853 ocfs2_highest_compat_lock_level(lockres->l_blocking)) {
854 lockres->l_blocking = DLM_LOCK_NL;
855 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
857 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
862 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
866 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
867 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
869 /* Convert from RO to EX doesn't really need anything as our
870 * information is already up to data. Convert from NL to
871 * *anything* however should mark ourselves as needing an
873 if (lockres->l_level == DLM_LOCK_NL &&
874 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
875 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
877 lockres->l_level = lockres->l_requested;
878 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
883 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
887 BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY)));
888 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
890 if (lockres->l_requested > DLM_LOCK_NL &&
891 !(lockres->l_flags & OCFS2_LOCK_LOCAL) &&
892 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
893 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
895 lockres->l_level = lockres->l_requested;
896 lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
897 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
902 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
905 int needs_downconvert = 0;
908 assert_spin_locked(&lockres->l_lock);
910 if (level > lockres->l_blocking) {
911 /* only schedule a downconvert if we haven't already scheduled
912 * one that goes low enough to satisfy the level we're
913 * blocking. this also catches the case where we get
915 if (ocfs2_highest_compat_lock_level(level) <
916 ocfs2_highest_compat_lock_level(lockres->l_blocking))
917 needs_downconvert = 1;
919 lockres->l_blocking = level;
922 if (needs_downconvert)
923 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
925 mlog_exit(needs_downconvert);
926 return needs_downconvert;
930 * OCFS2_LOCK_PENDING and l_pending_gen.
932 * Why does OCFS2_LOCK_PENDING exist? To close a race between setting
933 * OCFS2_LOCK_BUSY and calling ocfs2_dlm_lock(). See ocfs2_unblock_lock()
934 * for more details on the race.
936 * OCFS2_LOCK_PENDING closes the race quite nicely. However, it introduces
937 * a race on itself. In o2dlm, we can get the ast before ocfs2_dlm_lock()
938 * returns. The ast clears OCFS2_LOCK_BUSY, and must therefore clear
939 * OCFS2_LOCK_PENDING at the same time. When ocfs2_dlm_lock() returns,
940 * the caller is going to try to clear PENDING again. If nothing else is
941 * happening, __lockres_clear_pending() sees PENDING is unset and does
944 * But what if another path (eg downconvert thread) has just started a
945 * new locking action? The other path has re-set PENDING. Our path
946 * cannot clear PENDING, because that will re-open the original race
952 * ocfs2_cluster_lock()
957 * ocfs2_locking_ast() ocfs2_downconvert_thread()
958 * clear PENDING ocfs2_unblock_lock()
961 * ocfs2_prepare_downconvert()
971 * So as you can see, we now have a window where l_lock is not held,
972 * PENDING is not set, and ocfs2_dlm_lock() has not been called.
974 * The core problem is that ocfs2_cluster_lock() has cleared the PENDING
975 * set by ocfs2_prepare_downconvert(). That wasn't nice.
977 * To solve this we introduce l_pending_gen. A call to
978 * lockres_clear_pending() will only do so when it is passed a generation
979 * number that matches the lockres. lockres_set_pending() will return the
980 * current generation number. When ocfs2_cluster_lock() goes to clear
981 * PENDING, it passes the generation it got from set_pending(). In our
982 * example above, the generation numbers will *not* match. Thus,
983 * ocfs2_cluster_lock() will not clear the PENDING set by
984 * ocfs2_prepare_downconvert().
987 /* Unlocked version for ocfs2_locking_ast() */
988 static void __lockres_clear_pending(struct ocfs2_lock_res *lockres,
989 unsigned int generation,
990 struct ocfs2_super *osb)
992 assert_spin_locked(&lockres->l_lock);
995 * The ast and locking functions can race us here. The winner
996 * will clear pending, the loser will not.
998 if (!(lockres->l_flags & OCFS2_LOCK_PENDING) ||
999 (lockres->l_pending_gen != generation))
1002 lockres_clear_flags(lockres, OCFS2_LOCK_PENDING);
1003 lockres->l_pending_gen++;
1006 * The downconvert thread may have skipped us because we
1007 * were PENDING. Wake it up.
1009 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
1010 ocfs2_wake_downconvert_thread(osb);
1013 /* Locked version for callers of ocfs2_dlm_lock() */
1014 static void lockres_clear_pending(struct ocfs2_lock_res *lockres,
1015 unsigned int generation,
1016 struct ocfs2_super *osb)
1018 unsigned long flags;
1020 spin_lock_irqsave(&lockres->l_lock, flags);
1021 __lockres_clear_pending(lockres, generation, osb);
1022 spin_unlock_irqrestore(&lockres->l_lock, flags);
1025 static unsigned int lockres_set_pending(struct ocfs2_lock_res *lockres)
1027 assert_spin_locked(&lockres->l_lock);
1028 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
1030 lockres_or_flags(lockres, OCFS2_LOCK_PENDING);
1032 return lockres->l_pending_gen;
1036 static void ocfs2_blocking_ast(void *opaque, int level)
1038 struct ocfs2_lock_res *lockres = opaque;
1039 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1040 int needs_downconvert;
1041 unsigned long flags;
1043 BUG_ON(level <= DLM_LOCK_NL);
1045 mlog(0, "BAST fired for lockres %s, blocking %d, level %d type %s\n",
1046 lockres->l_name, level, lockres->l_level,
1047 ocfs2_lock_type_string(lockres->l_type));
1050 * We can skip the bast for locks which don't enable caching -
1051 * they'll be dropped at the earliest possible time anyway.
1053 if (lockres->l_flags & OCFS2_LOCK_NOCACHE)
1056 spin_lock_irqsave(&lockres->l_lock, flags);
1057 needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
1058 if (needs_downconvert)
1059 ocfs2_schedule_blocked_lock(osb, lockres);
1060 spin_unlock_irqrestore(&lockres->l_lock, flags);
1062 wake_up(&lockres->l_event);
1064 ocfs2_wake_downconvert_thread(osb);
1067 static void ocfs2_locking_ast(void *opaque)
1069 struct ocfs2_lock_res *lockres = opaque;
1070 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1071 unsigned long flags;
1074 spin_lock_irqsave(&lockres->l_lock, flags);
1076 status = ocfs2_dlm_lock_status(&lockres->l_lksb);
1078 if (status == -EAGAIN) {
1079 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1084 mlog(ML_ERROR, "lockres %s: lksb status value of %d!\n",
1085 lockres->l_name, status);
1086 spin_unlock_irqrestore(&lockres->l_lock, flags);
1090 switch(lockres->l_action) {
1091 case OCFS2_AST_ATTACH:
1092 ocfs2_generic_handle_attach_action(lockres);
1093 lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL);
1095 case OCFS2_AST_CONVERT:
1096 ocfs2_generic_handle_convert_action(lockres);
1098 case OCFS2_AST_DOWNCONVERT:
1099 ocfs2_generic_handle_downconvert_action(lockres);
1102 mlog(ML_ERROR, "lockres %s: ast fired with invalid action: %u "
1103 "lockres flags = 0x%lx, unlock action: %u\n",
1104 lockres->l_name, lockres->l_action, lockres->l_flags,
1105 lockres->l_unlock_action);
1109 /* set it to something invalid so if we get called again we
1111 lockres->l_action = OCFS2_AST_INVALID;
1113 /* Did we try to cancel this lock? Clear that state */
1114 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT)
1115 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1118 * We may have beaten the locking functions here. We certainly
1119 * know that dlm_lock() has been called :-)
1120 * Because we can't have two lock calls in flight at once, we
1121 * can use lockres->l_pending_gen.
1123 __lockres_clear_pending(lockres, lockres->l_pending_gen, osb);
1125 wake_up(&lockres->l_event);
1126 spin_unlock_irqrestore(&lockres->l_lock, flags);
1129 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
1132 unsigned long flags;
1135 spin_lock_irqsave(&lockres->l_lock, flags);
1136 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1138 lockres->l_action = OCFS2_AST_INVALID;
1140 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1141 spin_unlock_irqrestore(&lockres->l_lock, flags);
1143 wake_up(&lockres->l_event);
1147 /* Note: If we detect another process working on the lock (i.e.,
1148 * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
1149 * to do the right thing in that case.
1151 static int ocfs2_lock_create(struct ocfs2_super *osb,
1152 struct ocfs2_lock_res *lockres,
1157 unsigned long flags;
1162 mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level,
1165 spin_lock_irqsave(&lockres->l_lock, flags);
1166 if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) ||
1167 (lockres->l_flags & OCFS2_LOCK_BUSY)) {
1168 spin_unlock_irqrestore(&lockres->l_lock, flags);
1172 lockres->l_action = OCFS2_AST_ATTACH;
1173 lockres->l_requested = level;
1174 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1175 gen = lockres_set_pending(lockres);
1176 spin_unlock_irqrestore(&lockres->l_lock, flags);
1178 ret = ocfs2_dlm_lock(osb->cconn,
1183 OCFS2_LOCK_ID_MAX_LEN - 1,
1185 lockres_clear_pending(lockres, gen, osb);
1187 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
1188 ocfs2_recover_from_dlm_error(lockres, 1);
1191 mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name);
1198 static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres,
1201 unsigned long flags;
1204 spin_lock_irqsave(&lockres->l_lock, flags);
1205 ret = lockres->l_flags & flag;
1206 spin_unlock_irqrestore(&lockres->l_lock, flags);
1211 static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres)
1214 wait_event(lockres->l_event,
1215 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY));
1218 static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres)
1221 wait_event(lockres->l_event,
1222 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING));
1225 /* predict what lock level we'll be dropping down to on behalf
1226 * of another node, and return true if the currently wanted
1227 * level will be compatible with it. */
1228 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
1231 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
1233 return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking);
1236 static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
1238 INIT_LIST_HEAD(&mw->mw_item);
1239 init_completion(&mw->mw_complete);
1240 ocfs2_init_start_time(mw);
1243 static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
1245 wait_for_completion(&mw->mw_complete);
1246 /* Re-arm the completion in case we want to wait on it again */
1247 INIT_COMPLETION(mw->mw_complete);
1248 return mw->mw_status;
1251 static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
1252 struct ocfs2_mask_waiter *mw,
1256 BUG_ON(!list_empty(&mw->mw_item));
1258 assert_spin_locked(&lockres->l_lock);
1260 list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
1265 /* returns 0 if the mw that was removed was already satisfied, -EBUSY
1266 * if the mask still hadn't reached its goal */
1267 static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
1268 struct ocfs2_mask_waiter *mw)
1270 unsigned long flags;
1273 spin_lock_irqsave(&lockres->l_lock, flags);
1274 if (!list_empty(&mw->mw_item)) {
1275 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
1278 list_del_init(&mw->mw_item);
1279 init_completion(&mw->mw_complete);
1281 spin_unlock_irqrestore(&lockres->l_lock, flags);
1287 static int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw,
1288 struct ocfs2_lock_res *lockres)
1292 ret = wait_for_completion_interruptible(&mw->mw_complete);
1294 lockres_remove_mask_waiter(lockres, mw);
1296 ret = mw->mw_status;
1297 /* Re-arm the completion in case we want to wait on it again */
1298 INIT_COMPLETION(mw->mw_complete);
1302 static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
1303 struct ocfs2_lock_res *lockres,
1308 unsigned long caller_ip)
1310 struct ocfs2_mask_waiter mw;
1311 int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
1312 int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
1313 unsigned long flags;
1315 int noqueue_attempted = 0;
1319 ocfs2_init_mask_waiter(&mw);
1321 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
1322 lkm_flags |= DLM_LKF_VALBLK;
1327 if (catch_signals && signal_pending(current)) {
1332 spin_lock_irqsave(&lockres->l_lock, flags);
1334 mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
1335 "Cluster lock called on freeing lockres %s! flags "
1336 "0x%lx\n", lockres->l_name, lockres->l_flags);
1338 /* We only compare against the currently granted level
1339 * here. If the lock is blocked waiting on a downconvert,
1340 * we'll get caught below. */
1341 if (lockres->l_flags & OCFS2_LOCK_BUSY &&
1342 level > lockres->l_level) {
1343 /* is someone sitting in dlm_lock? If so, wait on
1345 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1350 if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
1351 !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
1352 /* is the lock is currently blocked on behalf of
1354 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0);
1359 if (level > lockres->l_level) {
1360 if (noqueue_attempted > 0) {
1364 if (lkm_flags & DLM_LKF_NOQUEUE)
1365 noqueue_attempted = 1;
1367 if (lockres->l_action != OCFS2_AST_INVALID)
1368 mlog(ML_ERROR, "lockres %s has action %u pending\n",
1369 lockres->l_name, lockres->l_action);
1371 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
1372 lockres->l_action = OCFS2_AST_ATTACH;
1373 lkm_flags &= ~DLM_LKF_CONVERT;
1375 lockres->l_action = OCFS2_AST_CONVERT;
1376 lkm_flags |= DLM_LKF_CONVERT;
1379 lockres->l_requested = level;
1380 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1381 gen = lockres_set_pending(lockres);
1382 spin_unlock_irqrestore(&lockres->l_lock, flags);
1384 BUG_ON(level == DLM_LOCK_IV);
1385 BUG_ON(level == DLM_LOCK_NL);
1387 mlog(0, "lock %s, convert from %d to level = %d\n",
1388 lockres->l_name, lockres->l_level, level);
1390 /* call dlm_lock to upgrade lock now */
1391 ret = ocfs2_dlm_lock(osb->cconn,
1396 OCFS2_LOCK_ID_MAX_LEN - 1,
1398 lockres_clear_pending(lockres, gen, osb);
1400 if (!(lkm_flags & DLM_LKF_NOQUEUE) ||
1402 ocfs2_log_dlm_error("ocfs2_dlm_lock",
1405 ocfs2_recover_from_dlm_error(lockres, 1);
1409 mlog(0, "lock %s, successful return from ocfs2_dlm_lock\n",
1412 /* At this point we've gone inside the dlm and need to
1413 * complete our work regardless. */
1416 /* wait for busy to clear and carry on */
1420 /* Ok, if we get here then we're good to go. */
1421 ocfs2_inc_holders(lockres, level);
1425 spin_unlock_irqrestore(&lockres->l_lock, flags);
1428 * This is helping work around a lock inversion between the page lock
1429 * and dlm locks. One path holds the page lock while calling aops
1430 * which block acquiring dlm locks. The voting thread holds dlm
1431 * locks while acquiring page locks while down converting data locks.
1432 * This block is helping an aop path notice the inversion and back
1433 * off to unlock its page lock before trying the dlm lock again.
1435 if (wait && arg_flags & OCFS2_LOCK_NONBLOCK &&
1436 mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) {
1438 if (lockres_remove_mask_waiter(lockres, &mw))
1444 ret = ocfs2_wait_for_mask(&mw);
1449 ocfs2_update_lock_stats(lockres, level, &mw, ret);
1451 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1452 if (!ret && lockres->l_lockdep_map.key != NULL) {
1453 if (level == DLM_LOCK_PR)
1454 rwsem_acquire_read(&lockres->l_lockdep_map, l_subclass,
1455 !!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
1458 rwsem_acquire(&lockres->l_lockdep_map, l_subclass,
1459 !!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
1467 static inline int ocfs2_cluster_lock(struct ocfs2_super *osb,
1468 struct ocfs2_lock_res *lockres,
1473 return __ocfs2_cluster_lock(osb, lockres, level, lkm_flags, arg_flags,
1478 static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
1479 struct ocfs2_lock_res *lockres,
1481 unsigned long caller_ip)
1483 unsigned long flags;
1486 spin_lock_irqsave(&lockres->l_lock, flags);
1487 ocfs2_dec_holders(lockres, level);
1488 ocfs2_downconvert_on_unlock(osb, lockres);
1489 spin_unlock_irqrestore(&lockres->l_lock, flags);
1490 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1491 if (lockres->l_lockdep_map.key != NULL)
1492 rwsem_release(&lockres->l_lockdep_map, 1, caller_ip);
1497 static int ocfs2_create_new_lock(struct ocfs2_super *osb,
1498 struct ocfs2_lock_res *lockres,
1502 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
1503 unsigned long flags;
1504 u32 lkm_flags = local ? DLM_LKF_LOCAL : 0;
1506 spin_lock_irqsave(&lockres->l_lock, flags);
1507 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
1508 lockres_or_flags(lockres, OCFS2_LOCK_LOCAL);
1509 spin_unlock_irqrestore(&lockres->l_lock, flags);
1511 return ocfs2_lock_create(osb, lockres, level, lkm_flags);
1514 /* Grants us an EX lock on the data and metadata resources, skipping
1515 * the normal cluster directory lookup. Use this ONLY on newly created
1516 * inodes which other nodes can't possibly see, and which haven't been
1517 * hashed in the inode hash yet. This can give us a good performance
1518 * increase as it'll skip the network broadcast normally associated
1519 * with creating a new lock resource. */
1520 int ocfs2_create_new_inode_locks(struct inode *inode)
1523 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1526 BUG_ON(!ocfs2_inode_is_new(inode));
1530 mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
1532 /* NOTE: That we don't increment any of the holder counts, nor
1533 * do we add anything to a journal handle. Since this is
1534 * supposed to be a new inode which the cluster doesn't know
1535 * about yet, there is no need to. As far as the LVB handling
1536 * is concerned, this is basically like acquiring an EX lock
1537 * on a resource which has an invalid one -- we'll set it
1538 * valid when we release the EX. */
1540 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1);
1547 * We don't want to use DLM_LKF_LOCAL on a meta data lock as they
1548 * don't use a generation in their lock names.
1550 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_inode_lockres, 1, 0);
1556 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0);
1567 int ocfs2_rw_lock(struct inode *inode, int write)
1570 struct ocfs2_lock_res *lockres;
1571 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1577 mlog(0, "inode %llu take %s RW lock\n",
1578 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1579 write ? "EXMODE" : "PRMODE");
1581 if (ocfs2_mount_local(osb)) {
1586 lockres = &OCFS2_I(inode)->ip_rw_lockres;
1588 level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1590 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level, 0,
1599 void ocfs2_rw_unlock(struct inode *inode, int write)
1601 int level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1602 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
1603 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1607 mlog(0, "inode %llu drop %s RW lock\n",
1608 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1609 write ? "EXMODE" : "PRMODE");
1611 if (!ocfs2_mount_local(osb))
1612 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1618 * ocfs2_open_lock always get PR mode lock.
1620 int ocfs2_open_lock(struct inode *inode)
1623 struct ocfs2_lock_res *lockres;
1624 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1630 mlog(0, "inode %llu take PRMODE open lock\n",
1631 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1633 if (ocfs2_mount_local(osb))
1636 lockres = &OCFS2_I(inode)->ip_open_lockres;
1638 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1648 int ocfs2_try_open_lock(struct inode *inode, int write)
1650 int status = 0, level;
1651 struct ocfs2_lock_res *lockres;
1652 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1658 mlog(0, "inode %llu try to take %s open lock\n",
1659 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1660 write ? "EXMODE" : "PRMODE");
1662 if (ocfs2_mount_local(osb))
1665 lockres = &OCFS2_I(inode)->ip_open_lockres;
1667 level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1670 * The file system may already holding a PRMODE/EXMODE open lock.
1671 * Since we pass DLM_LKF_NOQUEUE, the request won't block waiting on
1672 * other nodes and the -EAGAIN will indicate to the caller that
1673 * this inode is still in use.
1675 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1676 level, DLM_LKF_NOQUEUE, 0);
1684 * ocfs2_open_unlock unlock PR and EX mode open locks.
1686 void ocfs2_open_unlock(struct inode *inode)
1688 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
1689 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1693 mlog(0, "inode %llu drop open lock\n",
1694 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1696 if (ocfs2_mount_local(osb))
1699 if(lockres->l_ro_holders)
1700 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1702 if(lockres->l_ex_holders)
1703 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1710 static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres,
1714 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1715 unsigned long flags;
1716 struct ocfs2_mask_waiter mw;
1718 ocfs2_init_mask_waiter(&mw);
1721 spin_lock_irqsave(&lockres->l_lock, flags);
1722 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
1723 ret = ocfs2_prepare_cancel_convert(osb, lockres);
1725 spin_unlock_irqrestore(&lockres->l_lock, flags);
1726 ret = ocfs2_cancel_convert(osb, lockres);
1733 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1734 spin_unlock_irqrestore(&lockres->l_lock, flags);
1736 ocfs2_wait_for_mask(&mw);
1742 * We may still have gotten the lock, in which case there's no
1743 * point to restarting the syscall.
1745 if (lockres->l_level == level)
1748 mlog(0, "Cancel returning %d. flags: 0x%lx, level: %d, act: %d\n", ret,
1749 lockres->l_flags, lockres->l_level, lockres->l_action);
1751 spin_unlock_irqrestore(&lockres->l_lock, flags);
1758 * ocfs2_file_lock() and ocfs2_file_unlock() map to a single pair of
1759 * flock() calls. The locking approach this requires is sufficiently
1760 * different from all other cluster lock types that we implement a
1761 * seperate path to the "low-level" dlm calls. In particular:
1763 * - No optimization of lock levels is done - we take at exactly
1764 * what's been requested.
1766 * - No lock caching is employed. We immediately downconvert to
1767 * no-lock at unlock time. This also means flock locks never go on
1768 * the blocking list).
1770 * - Since userspace can trivially deadlock itself with flock, we make
1771 * sure to allow cancellation of a misbehaving applications flock()
1774 * - Access to any flock lockres doesn't require concurrency, so we
1775 * can simplify the code by requiring the caller to guarantee
1776 * serialization of dlmglue flock calls.
1778 int ocfs2_file_lock(struct file *file, int ex, int trylock)
1780 int ret, level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
1781 unsigned int lkm_flags = trylock ? DLM_LKF_NOQUEUE : 0;
1782 unsigned long flags;
1783 struct ocfs2_file_private *fp = file->private_data;
1784 struct ocfs2_lock_res *lockres = &fp->fp_flock;
1785 struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
1786 struct ocfs2_mask_waiter mw;
1788 ocfs2_init_mask_waiter(&mw);
1790 if ((lockres->l_flags & OCFS2_LOCK_BUSY) ||
1791 (lockres->l_level > DLM_LOCK_NL)) {
1793 "File lock \"%s\" has busy or locked state: flags: 0x%lx, "
1794 "level: %u\n", lockres->l_name, lockres->l_flags,
1799 spin_lock_irqsave(&lockres->l_lock, flags);
1800 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
1801 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1802 spin_unlock_irqrestore(&lockres->l_lock, flags);
1805 * Get the lock at NLMODE to start - that way we
1806 * can cancel the upconvert request if need be.
1808 ret = ocfs2_lock_create(osb, lockres, DLM_LOCK_NL, 0);
1814 ret = ocfs2_wait_for_mask(&mw);
1819 spin_lock_irqsave(&lockres->l_lock, flags);
1822 lockres->l_action = OCFS2_AST_CONVERT;
1823 lkm_flags |= DLM_LKF_CONVERT;
1824 lockres->l_requested = level;
1825 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1827 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1828 spin_unlock_irqrestore(&lockres->l_lock, flags);
1830 ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags,
1831 lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1,
1834 if (!trylock || (ret != -EAGAIN)) {
1835 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
1839 ocfs2_recover_from_dlm_error(lockres, 1);
1840 lockres_remove_mask_waiter(lockres, &mw);
1844 ret = ocfs2_wait_for_mask_interruptible(&mw, lockres);
1845 if (ret == -ERESTARTSYS) {
1847 * Userspace can cause deadlock itself with
1848 * flock(). Current behavior locally is to allow the
1849 * deadlock, but abort the system call if a signal is
1850 * received. We follow this example, otherwise a
1851 * poorly written program could sit in kernel until
1854 * Handling this is a bit more complicated for Ocfs2
1855 * though. We can't exit this function with an
1856 * outstanding lock request, so a cancel convert is
1857 * required. We intentionally overwrite 'ret' - if the
1858 * cancel fails and the lock was granted, it's easier
1859 * to just bubble success back up to the user.
1861 ret = ocfs2_flock_handle_signal(lockres, level);
1862 } else if (!ret && (level > lockres->l_level)) {
1863 /* Trylock failed asynchronously */
1870 mlog(0, "Lock: \"%s\" ex: %d, trylock: %d, returns: %d\n",
1871 lockres->l_name, ex, trylock, ret);
1875 void ocfs2_file_unlock(struct file *file)
1879 unsigned long flags;
1880 struct ocfs2_file_private *fp = file->private_data;
1881 struct ocfs2_lock_res *lockres = &fp->fp_flock;
1882 struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
1883 struct ocfs2_mask_waiter mw;
1885 ocfs2_init_mask_waiter(&mw);
1887 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED))
1890 if (lockres->l_level == DLM_LOCK_NL)
1893 mlog(0, "Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n",
1894 lockres->l_name, lockres->l_flags, lockres->l_level,
1897 spin_lock_irqsave(&lockres->l_lock, flags);
1899 * Fake a blocking ast for the downconvert code.
1901 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
1902 lockres->l_blocking = DLM_LOCK_EX;
1904 gen = ocfs2_prepare_downconvert(lockres, DLM_LOCK_NL);
1905 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1906 spin_unlock_irqrestore(&lockres->l_lock, flags);
1908 ret = ocfs2_downconvert_lock(osb, lockres, DLM_LOCK_NL, 0, gen);
1914 ret = ocfs2_wait_for_mask(&mw);
1919 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
1920 struct ocfs2_lock_res *lockres)
1926 /* If we know that another node is waiting on our lock, kick
1927 * the downconvert thread * pre-emptively when we reach a release
1929 if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
1930 switch(lockres->l_blocking) {
1932 if (!lockres->l_ex_holders && !lockres->l_ro_holders)
1936 if (!lockres->l_ex_holders)
1945 ocfs2_wake_downconvert_thread(osb);
1950 #define OCFS2_SEC_BITS 34
1951 #define OCFS2_SEC_SHIFT (64 - 34)
1952 #define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1)
1954 /* LVB only has room for 64 bits of time here so we pack it for
1956 static u64 ocfs2_pack_timespec(struct timespec *spec)
1959 u64 sec = spec->tv_sec;
1960 u32 nsec = spec->tv_nsec;
1962 res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK);
1967 /* Call this with the lockres locked. I am reasonably sure we don't
1968 * need ip_lock in this function as anyone who would be changing those
1969 * values is supposed to be blocked in ocfs2_inode_lock right now. */
1970 static void __ocfs2_stuff_meta_lvb(struct inode *inode)
1972 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1973 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
1974 struct ocfs2_meta_lvb *lvb;
1978 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
1981 * Invalidate the LVB of a deleted inode - this way other
1982 * nodes are forced to go to disk and discover the new inode
1985 if (oi->ip_flags & OCFS2_INODE_DELETED) {
1986 lvb->lvb_version = 0;
1990 lvb->lvb_version = OCFS2_LVB_VERSION;
1991 lvb->lvb_isize = cpu_to_be64(i_size_read(inode));
1992 lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
1993 lvb->lvb_iuid = cpu_to_be32(inode->i_uid);
1994 lvb->lvb_igid = cpu_to_be32(inode->i_gid);
1995 lvb->lvb_imode = cpu_to_be16(inode->i_mode);
1996 lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
1997 lvb->lvb_iatime_packed =
1998 cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
1999 lvb->lvb_ictime_packed =
2000 cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime));
2001 lvb->lvb_imtime_packed =
2002 cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
2003 lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
2004 lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
2005 lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
2008 mlog_meta_lvb(0, lockres);
2013 static void ocfs2_unpack_timespec(struct timespec *spec,
2016 spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT;
2017 spec->tv_nsec = packed_time & OCFS2_NSEC_MASK;
2020 static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
2022 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2023 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2024 struct ocfs2_meta_lvb *lvb;
2028 mlog_meta_lvb(0, lockres);
2030 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2032 /* We're safe here without the lockres lock... */
2033 spin_lock(&oi->ip_lock);
2034 oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters);
2035 i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
2037 oi->ip_attr = be32_to_cpu(lvb->lvb_iattr);
2038 oi->ip_dyn_features = be16_to_cpu(lvb->lvb_idynfeatures);
2039 ocfs2_set_inode_flags(inode);
2041 /* fast-symlinks are a special case */
2042 if (S_ISLNK(inode->i_mode) && !oi->ip_clusters)
2043 inode->i_blocks = 0;
2045 inode->i_blocks = ocfs2_inode_sector_count(inode);
2047 inode->i_uid = be32_to_cpu(lvb->lvb_iuid);
2048 inode->i_gid = be32_to_cpu(lvb->lvb_igid);
2049 inode->i_mode = be16_to_cpu(lvb->lvb_imode);
2050 inode->i_nlink = be16_to_cpu(lvb->lvb_inlink);
2051 ocfs2_unpack_timespec(&inode->i_atime,
2052 be64_to_cpu(lvb->lvb_iatime_packed));
2053 ocfs2_unpack_timespec(&inode->i_mtime,
2054 be64_to_cpu(lvb->lvb_imtime_packed));
2055 ocfs2_unpack_timespec(&inode->i_ctime,
2056 be64_to_cpu(lvb->lvb_ictime_packed));
2057 spin_unlock(&oi->ip_lock);
2062 static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
2063 struct ocfs2_lock_res *lockres)
2065 struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2067 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)
2068 && lvb->lvb_version == OCFS2_LVB_VERSION
2069 && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation)
2074 /* Determine whether a lock resource needs to be refreshed, and
2075 * arbitrate who gets to refresh it.
2077 * 0 means no refresh needed.
2079 * > 0 means you need to refresh this and you MUST call
2080 * ocfs2_complete_lock_res_refresh afterwards. */
2081 static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
2083 unsigned long flags;
2089 spin_lock_irqsave(&lockres->l_lock, flags);
2090 if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
2091 spin_unlock_irqrestore(&lockres->l_lock, flags);
2095 if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
2096 spin_unlock_irqrestore(&lockres->l_lock, flags);
2098 ocfs2_wait_on_refreshing_lock(lockres);
2102 /* Ok, I'll be the one to refresh this lock. */
2103 lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING);
2104 spin_unlock_irqrestore(&lockres->l_lock, flags);
2112 /* If status is non zero, I'll mark it as not being in refresh
2113 * anymroe, but i won't clear the needs refresh flag. */
2114 static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres,
2117 unsigned long flags;
2120 spin_lock_irqsave(&lockres->l_lock, flags);
2121 lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
2123 lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
2124 spin_unlock_irqrestore(&lockres->l_lock, flags);
2126 wake_up(&lockres->l_event);
2131 /* may or may not return a bh if it went to disk. */
2132 static int ocfs2_inode_lock_update(struct inode *inode,
2133 struct buffer_head **bh)
2136 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2137 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2138 struct ocfs2_dinode *fe;
2139 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2143 if (ocfs2_mount_local(osb))
2146 spin_lock(&oi->ip_lock);
2147 if (oi->ip_flags & OCFS2_INODE_DELETED) {
2148 mlog(0, "Orphaned inode %llu was deleted while we "
2149 "were waiting on a lock. ip_flags = 0x%x\n",
2150 (unsigned long long)oi->ip_blkno, oi->ip_flags);
2151 spin_unlock(&oi->ip_lock);
2155 spin_unlock(&oi->ip_lock);
2157 if (!ocfs2_should_refresh_lock_res(lockres))
2160 /* This will discard any caching information we might have had
2161 * for the inode metadata. */
2162 ocfs2_metadata_cache_purge(INODE_CACHE(inode));
2164 ocfs2_extent_map_trunc(inode, 0);
2166 if (ocfs2_meta_lvb_is_trustable(inode, lockres)) {
2167 mlog(0, "Trusting LVB on inode %llu\n",
2168 (unsigned long long)oi->ip_blkno);
2169 ocfs2_refresh_inode_from_lvb(inode);
2171 /* Boo, we have to go to disk. */
2172 /* read bh, cast, ocfs2_refresh_inode */
2173 status = ocfs2_read_inode_block(inode, bh);
2178 fe = (struct ocfs2_dinode *) (*bh)->b_data;
2180 /* This is a good chance to make sure we're not
2181 * locking an invalid object. ocfs2_read_inode_block()
2182 * already checked that the inode block is sane.
2184 * We bug on a stale inode here because we checked
2185 * above whether it was wiped from disk. The wiping
2186 * node provides a guarantee that we receive that
2187 * message and can mark the inode before dropping any
2188 * locks associated with it. */
2189 mlog_bug_on_msg(inode->i_generation !=
2190 le32_to_cpu(fe->i_generation),
2191 "Invalid dinode %llu disk generation: %u "
2192 "inode->i_generation: %u\n",
2193 (unsigned long long)oi->ip_blkno,
2194 le32_to_cpu(fe->i_generation),
2195 inode->i_generation);
2196 mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) ||
2197 !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)),
2198 "Stale dinode %llu dtime: %llu flags: 0x%x\n",
2199 (unsigned long long)oi->ip_blkno,
2200 (unsigned long long)le64_to_cpu(fe->i_dtime),
2201 le32_to_cpu(fe->i_flags));
2203 ocfs2_refresh_inode(inode, fe);
2204 ocfs2_track_lock_refresh(lockres);
2209 ocfs2_complete_lock_res_refresh(lockres, status);
2215 static int ocfs2_assign_bh(struct inode *inode,
2216 struct buffer_head **ret_bh,
2217 struct buffer_head *passed_bh)
2222 /* Ok, the update went to disk for us, use the
2224 *ret_bh = passed_bh;
2230 status = ocfs2_read_inode_block(inode, ret_bh);
2238 * returns < 0 error if the callback will never be called, otherwise
2239 * the result of the lock will be communicated via the callback.
2241 int ocfs2_inode_lock_full_nested(struct inode *inode,
2242 struct buffer_head **ret_bh,
2247 int status, level, acquired;
2249 struct ocfs2_lock_res *lockres = NULL;
2250 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2251 struct buffer_head *local_bh = NULL;
2257 mlog(0, "inode %llu, take %s META lock\n",
2258 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2259 ex ? "EXMODE" : "PRMODE");
2263 /* We'll allow faking a readonly metadata lock for
2265 if (ocfs2_is_hard_readonly(osb)) {
2271 if (ocfs2_mount_local(osb))
2274 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
2275 ocfs2_wait_for_recovery(osb);
2277 lockres = &OCFS2_I(inode)->ip_inode_lockres;
2278 level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2280 if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
2281 dlm_flags |= DLM_LKF_NOQUEUE;
2283 status = __ocfs2_cluster_lock(osb, lockres, level, dlm_flags,
2284 arg_flags, subclass, _RET_IP_);
2286 if (status != -EAGAIN && status != -EIOCBRETRY)
2291 /* Notify the error cleanup path to drop the cluster lock. */
2294 /* We wait twice because a node may have died while we were in
2295 * the lower dlm layers. The second time though, we've
2296 * committed to owning this lock so we don't allow signals to
2297 * abort the operation. */
2298 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
2299 ocfs2_wait_for_recovery(osb);
2303 * We only see this flag if we're being called from
2304 * ocfs2_read_locked_inode(). It means we're locking an inode
2305 * which hasn't been populated yet, so clear the refresh flag
2306 * and let the caller handle it.
2308 if (inode->i_state & I_NEW) {
2311 ocfs2_complete_lock_res_refresh(lockres, 0);
2315 /* This is fun. The caller may want a bh back, or it may
2316 * not. ocfs2_inode_lock_update definitely wants one in, but
2317 * may or may not read one, depending on what's in the
2318 * LVB. The result of all of this is that we've *only* gone to
2319 * disk if we have to, so the complexity is worthwhile. */
2320 status = ocfs2_inode_lock_update(inode, &local_bh);
2322 if (status != -ENOENT)
2328 status = ocfs2_assign_bh(inode, ret_bh, local_bh);
2337 if (ret_bh && (*ret_bh)) {
2342 ocfs2_inode_unlock(inode, ex);
2353 * This is working around a lock inversion between tasks acquiring DLM
2354 * locks while holding a page lock and the downconvert thread which
2355 * blocks dlm lock acquiry while acquiring page locks.
2357 * ** These _with_page variantes are only intended to be called from aop
2358 * methods that hold page locks and return a very specific *positive* error
2359 * code that aop methods pass up to the VFS -- test for errors with != 0. **
2361 * The DLM is called such that it returns -EAGAIN if it would have
2362 * blocked waiting for the downconvert thread. In that case we unlock
2363 * our page so the downconvert thread can make progress. Once we've
2364 * done this we have to return AOP_TRUNCATED_PAGE so the aop method
2365 * that called us can bubble that back up into the VFS who will then
2366 * immediately retry the aop call.
2368 * We do a blocking lock and immediate unlock before returning, though, so that
2369 * the lock has a great chance of being cached on this node by the time the VFS
2370 * calls back to retry the aop. This has a potential to livelock as nodes
2371 * ping locks back and forth, but that's a risk we're willing to take to avoid
2372 * the lock inversion simply.
2374 int ocfs2_inode_lock_with_page(struct inode *inode,
2375 struct buffer_head **ret_bh,
2381 ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
2382 if (ret == -EAGAIN) {
2384 if (ocfs2_inode_lock(inode, ret_bh, ex) == 0)
2385 ocfs2_inode_unlock(inode, ex);
2386 ret = AOP_TRUNCATED_PAGE;
2392 int ocfs2_inode_lock_atime(struct inode *inode,
2393 struct vfsmount *vfsmnt,
2399 ret = ocfs2_inode_lock(inode, NULL, 0);
2406 * If we should update atime, we will get EX lock,
2407 * otherwise we just get PR lock.
2409 if (ocfs2_should_update_atime(inode, vfsmnt)) {
2410 struct buffer_head *bh = NULL;
2412 ocfs2_inode_unlock(inode, 0);
2413 ret = ocfs2_inode_lock(inode, &bh, 1);
2419 if (ocfs2_should_update_atime(inode, vfsmnt))
2420 ocfs2_update_inode_atime(inode, bh);
2430 void ocfs2_inode_unlock(struct inode *inode,
2433 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2434 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;
2435 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2439 mlog(0, "inode %llu drop %s META lock\n",
2440 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2441 ex ? "EXMODE" : "PRMODE");
2443 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) &&
2444 !ocfs2_mount_local(osb))
2445 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
2450 int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
2452 struct ocfs2_lock_res *lockres;
2453 struct ocfs2_orphan_scan_lvb *lvb;
2456 if (ocfs2_is_hard_readonly(osb))
2459 if (ocfs2_mount_local(osb))
2462 lockres = &osb->osb_orphan_scan.os_lockres;
2463 status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
2467 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2468 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
2469 lvb->lvb_version == OCFS2_ORPHAN_LVB_VERSION)
2470 *seqno = be32_to_cpu(lvb->lvb_os_seqno);
2472 *seqno = osb->osb_orphan_scan.os_seqno + 1;
2477 void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno)
2479 struct ocfs2_lock_res *lockres;
2480 struct ocfs2_orphan_scan_lvb *lvb;
2482 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) {
2483 lockres = &osb->osb_orphan_scan.os_lockres;
2484 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2485 lvb->lvb_version = OCFS2_ORPHAN_LVB_VERSION;
2486 lvb->lvb_os_seqno = cpu_to_be32(seqno);
2487 ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
2491 int ocfs2_super_lock(struct ocfs2_super *osb,
2495 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2496 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
2500 if (ocfs2_is_hard_readonly(osb))
2503 if (ocfs2_mount_local(osb))
2506 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
2512 /* The super block lock path is really in the best position to
2513 * know when resources covered by the lock need to be
2514 * refreshed, so we do it here. Of course, making sense of
2515 * everything is up to the caller :) */
2516 status = ocfs2_should_refresh_lock_res(lockres);
2522 status = ocfs2_refresh_slot_info(osb);
2524 ocfs2_complete_lock_res_refresh(lockres, status);
2528 ocfs2_track_lock_refresh(lockres);
2535 void ocfs2_super_unlock(struct ocfs2_super *osb,
2538 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2539 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
2541 if (!ocfs2_mount_local(osb))
2542 ocfs2_cluster_unlock(osb, lockres, level);
2545 int ocfs2_rename_lock(struct ocfs2_super *osb)
2548 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2550 if (ocfs2_is_hard_readonly(osb))
2553 if (ocfs2_mount_local(osb))
2556 status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
2563 void ocfs2_rename_unlock(struct ocfs2_super *osb)
2565 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2567 if (!ocfs2_mount_local(osb))
2568 ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
2571 int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex)
2574 struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
2576 if (ocfs2_is_hard_readonly(osb))
2579 if (ocfs2_mount_local(osb))
2582 status = ocfs2_cluster_lock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE,
2585 mlog(ML_ERROR, "lock on nfs sync lock failed %d\n", status);
2590 void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex)
2592 struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
2594 if (!ocfs2_mount_local(osb))
2595 ocfs2_cluster_unlock(osb, lockres,
2596 ex ? LKM_EXMODE : LKM_PRMODE);
2599 int ocfs2_dentry_lock(struct dentry *dentry, int ex)
2602 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2603 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2604 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2608 if (ocfs2_is_hard_readonly(osb))
2611 if (ocfs2_mount_local(osb))
2614 ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0);
2621 void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
2623 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2624 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2625 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2627 if (!ocfs2_mount_local(osb))
2628 ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
2631 /* Reference counting of the dlm debug structure. We want this because
2632 * open references on the debug inodes can live on after a mount, so
2633 * we can't rely on the ocfs2_super to always exist. */
2634 static void ocfs2_dlm_debug_free(struct kref *kref)
2636 struct ocfs2_dlm_debug *dlm_debug;
2638 dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt);
2643 void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug)
2646 kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free);
2649 static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug)
2651 kref_get(&debug->d_refcnt);
2654 struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
2656 struct ocfs2_dlm_debug *dlm_debug;
2658 dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL);
2660 mlog_errno(-ENOMEM);
2664 kref_init(&dlm_debug->d_refcnt);
2665 INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
2666 dlm_debug->d_locking_state = NULL;
2671 /* Access to this is arbitrated for us via seq_file->sem. */
2672 struct ocfs2_dlm_seq_priv {
2673 struct ocfs2_dlm_debug *p_dlm_debug;
2674 struct ocfs2_lock_res p_iter_res;
2675 struct ocfs2_lock_res p_tmp_res;
2678 static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start,
2679 struct ocfs2_dlm_seq_priv *priv)
2681 struct ocfs2_lock_res *iter, *ret = NULL;
2682 struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug;
2684 assert_spin_locked(&ocfs2_dlm_tracking_lock);
2686 list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
2687 /* discover the head of the list */
2688 if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) {
2689 mlog(0, "End of list found, %p\n", ret);
2693 /* We track our "dummy" iteration lockres' by a NULL
2695 if (iter->l_ops != NULL) {
2704 static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos)
2706 struct ocfs2_dlm_seq_priv *priv = m->private;
2707 struct ocfs2_lock_res *iter;
2709 spin_lock(&ocfs2_dlm_tracking_lock);
2710 iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv);
2712 /* Since lockres' have the lifetime of their container
2713 * (which can be inodes, ocfs2_supers, etc) we want to
2714 * copy this out to a temporary lockres while still
2715 * under the spinlock. Obviously after this we can't
2716 * trust any pointers on the copy returned, but that's
2717 * ok as the information we want isn't typically held
2719 priv->p_tmp_res = *iter;
2720 iter = &priv->p_tmp_res;
2722 spin_unlock(&ocfs2_dlm_tracking_lock);
2727 static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v)
2731 static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
2733 struct ocfs2_dlm_seq_priv *priv = m->private;
2734 struct ocfs2_lock_res *iter = v;
2735 struct ocfs2_lock_res *dummy = &priv->p_iter_res;
2737 spin_lock(&ocfs2_dlm_tracking_lock);
2738 iter = ocfs2_dlm_next_res(iter, priv);
2739 list_del_init(&dummy->l_debug_list);
2741 list_add(&dummy->l_debug_list, &iter->l_debug_list);
2742 priv->p_tmp_res = *iter;
2743 iter = &priv->p_tmp_res;
2745 spin_unlock(&ocfs2_dlm_tracking_lock);
2750 /* So that debugfs.ocfs2 can determine which format is being used */
2751 #define OCFS2_DLM_DEBUG_STR_VERSION 2
2752 static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
2756 struct ocfs2_lock_res *lockres = v;
2761 seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION);
2763 if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY)
2764 seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1,
2766 (unsigned int)ocfs2_get_dentry_lock_ino(lockres));
2768 seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name);
2770 seq_printf(m, "%d\t"
2781 lockres->l_unlock_action,
2782 lockres->l_ro_holders,
2783 lockres->l_ex_holders,
2784 lockres->l_requested,
2785 lockres->l_blocking);
2787 /* Dump the raw LVB */
2788 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2789 for(i = 0; i < DLM_LVB_LEN; i++)
2790 seq_printf(m, "0x%x\t", lvb[i]);
2792 #ifdef CONFIG_OCFS2_FS_STATS
2793 # define lock_num_prmode(_l) (_l)->l_lock_num_prmode
2794 # define lock_num_exmode(_l) (_l)->l_lock_num_exmode
2795 # define lock_num_prmode_failed(_l) (_l)->l_lock_num_prmode_failed
2796 # define lock_num_exmode_failed(_l) (_l)->l_lock_num_exmode_failed
2797 # define lock_total_prmode(_l) (_l)->l_lock_total_prmode
2798 # define lock_total_exmode(_l) (_l)->l_lock_total_exmode
2799 # define lock_max_prmode(_l) (_l)->l_lock_max_prmode
2800 # define lock_max_exmode(_l) (_l)->l_lock_max_exmode
2801 # define lock_refresh(_l) (_l)->l_lock_refresh
2803 # define lock_num_prmode(_l) (0ULL)
2804 # define lock_num_exmode(_l) (0ULL)
2805 # define lock_num_prmode_failed(_l) (0)
2806 # define lock_num_exmode_failed(_l) (0)
2807 # define lock_total_prmode(_l) (0ULL)
2808 # define lock_total_exmode(_l) (0ULL)
2809 # define lock_max_prmode(_l) (0)
2810 # define lock_max_exmode(_l) (0)
2811 # define lock_refresh(_l) (0)
2813 /* The following seq_print was added in version 2 of this output */
2814 seq_printf(m, "%llu\t"
2823 lock_num_prmode(lockres),
2824 lock_num_exmode(lockres),
2825 lock_num_prmode_failed(lockres),
2826 lock_num_exmode_failed(lockres),
2827 lock_total_prmode(lockres),
2828 lock_total_exmode(lockres),
2829 lock_max_prmode(lockres),
2830 lock_max_exmode(lockres),
2831 lock_refresh(lockres));
2834 seq_printf(m, "\n");
2838 static const struct seq_operations ocfs2_dlm_seq_ops = {
2839 .start = ocfs2_dlm_seq_start,
2840 .stop = ocfs2_dlm_seq_stop,
2841 .next = ocfs2_dlm_seq_next,
2842 .show = ocfs2_dlm_seq_show,
2845 static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file)
2847 struct seq_file *seq = (struct seq_file *) file->private_data;
2848 struct ocfs2_dlm_seq_priv *priv = seq->private;
2849 struct ocfs2_lock_res *res = &priv->p_iter_res;
2851 ocfs2_remove_lockres_tracking(res);
2852 ocfs2_put_dlm_debug(priv->p_dlm_debug);
2853 return seq_release_private(inode, file);
2856 static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
2859 struct ocfs2_dlm_seq_priv *priv;
2860 struct seq_file *seq;
2861 struct ocfs2_super *osb;
2863 priv = kzalloc(sizeof(struct ocfs2_dlm_seq_priv), GFP_KERNEL);
2869 osb = inode->i_private;
2870 ocfs2_get_dlm_debug(osb->osb_dlm_debug);
2871 priv->p_dlm_debug = osb->osb_dlm_debug;
2872 INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
2874 ret = seq_open(file, &ocfs2_dlm_seq_ops);
2881 seq = (struct seq_file *) file->private_data;
2882 seq->private = priv;
2884 ocfs2_add_lockres_tracking(&priv->p_iter_res,
2891 static const struct file_operations ocfs2_dlm_debug_fops = {
2892 .open = ocfs2_dlm_debug_open,
2893 .release = ocfs2_dlm_debug_release,
2895 .llseek = seq_lseek,
2898 static int ocfs2_dlm_init_debug(struct ocfs2_super *osb)
2901 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2903 dlm_debug->d_locking_state = debugfs_create_file("locking_state",
2905 osb->osb_debug_root,
2907 &ocfs2_dlm_debug_fops);
2908 if (!dlm_debug->d_locking_state) {
2911 "Unable to create locking state debugfs file.\n");
2915 ocfs2_get_dlm_debug(dlm_debug);
2920 static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
2922 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2925 debugfs_remove(dlm_debug->d_locking_state);
2926 ocfs2_put_dlm_debug(dlm_debug);
2930 int ocfs2_dlm_init(struct ocfs2_super *osb)
2933 struct ocfs2_cluster_connection *conn = NULL;
2937 if (ocfs2_mount_local(osb)) {
2942 status = ocfs2_dlm_init_debug(osb);
2948 /* launch downconvert thread */
2949 osb->dc_task = kthread_run(ocfs2_downconvert_thread, osb, "ocfs2dc");
2950 if (IS_ERR(osb->dc_task)) {
2951 status = PTR_ERR(osb->dc_task);
2952 osb->dc_task = NULL;
2957 /* for now, uuid == domain */
2958 status = ocfs2_cluster_connect(osb->osb_cluster_stack,
2960 strlen(osb->uuid_str),
2961 ocfs2_do_node_down, osb,
2968 status = ocfs2_cluster_this_node(&osb->node_num);
2972 "could not find this host's node number\n");
2973 ocfs2_cluster_disconnect(conn, 0);
2978 ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
2979 ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
2980 ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
2981 ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);
2988 ocfs2_dlm_shutdown_debug(osb);
2990 kthread_stop(osb->dc_task);
2997 void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
3002 ocfs2_drop_osb_locks(osb);
3005 * Now that we have dropped all locks and ocfs2_dismount_volume()
3006 * has disabled recovery, the DLM won't be talking to us. It's
3007 * safe to tear things down before disconnecting the cluster.
3011 kthread_stop(osb->dc_task);
3012 osb->dc_task = NULL;
3015 ocfs2_lock_res_free(&osb->osb_super_lockres);
3016 ocfs2_lock_res_free(&osb->osb_rename_lockres);
3017 ocfs2_lock_res_free(&osb->osb_nfs_sync_lockres);
3018 ocfs2_lock_res_free(&osb->osb_orphan_scan.os_lockres);
3020 ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
3023 ocfs2_dlm_shutdown_debug(osb);
3028 static void ocfs2_unlock_ast(void *opaque, int error)
3030 struct ocfs2_lock_res *lockres = opaque;
3031 unsigned long flags;
3035 mlog(0, "UNLOCK AST called on lock %s, action = %d\n", lockres->l_name,
3036 lockres->l_unlock_action);
3038 spin_lock_irqsave(&lockres->l_lock, flags);
3040 mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
3041 "unlock_action %d\n", error, lockres->l_name,
3042 lockres->l_unlock_action);
3043 spin_unlock_irqrestore(&lockres->l_lock, flags);
3048 switch(lockres->l_unlock_action) {
3049 case OCFS2_UNLOCK_CANCEL_CONVERT:
3050 mlog(0, "Cancel convert success for %s\n", lockres->l_name);
3051 lockres->l_action = OCFS2_AST_INVALID;
3052 /* Downconvert thread may have requeued this lock, we
3053 * need to wake it. */
3054 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
3055 ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
3057 case OCFS2_UNLOCK_DROP_LOCK:
3058 lockres->l_level = DLM_LOCK_IV;
3064 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
3065 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
3066 wake_up(&lockres->l_event);
3067 spin_unlock_irqrestore(&lockres->l_lock, flags);
3072 static int ocfs2_drop_lock(struct ocfs2_super *osb,
3073 struct ocfs2_lock_res *lockres)
3076 unsigned long flags;
3079 /* We didn't get anywhere near actually using this lockres. */
3080 if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
3083 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
3084 lkm_flags |= DLM_LKF_VALBLK;
3086 spin_lock_irqsave(&lockres->l_lock, flags);
3088 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING),
3089 "lockres %s, flags 0x%lx\n",
3090 lockres->l_name, lockres->l_flags);
3092 while (lockres->l_flags & OCFS2_LOCK_BUSY) {
3093 mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = "
3094 "%u, unlock_action = %u\n",
3095 lockres->l_name, lockres->l_flags, lockres->l_action,
3096 lockres->l_unlock_action);
3098 spin_unlock_irqrestore(&lockres->l_lock, flags);
3100 /* XXX: Today we just wait on any busy
3101 * locks... Perhaps we need to cancel converts in the
3103 ocfs2_wait_on_busy_lock(lockres);
3105 spin_lock_irqsave(&lockres->l_lock, flags);
3108 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
3109 if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
3110 lockres->l_level == DLM_LOCK_EX &&
3111 !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
3112 lockres->l_ops->set_lvb(lockres);
3115 if (lockres->l_flags & OCFS2_LOCK_BUSY)
3116 mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
3118 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
3119 mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name);
3121 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
3122 spin_unlock_irqrestore(&lockres->l_lock, flags);
3126 lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
3128 /* make sure we never get here while waiting for an ast to
3130 BUG_ON(lockres->l_action != OCFS2_AST_INVALID);
3132 /* is this necessary? */
3133 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
3134 lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK;
3135 spin_unlock_irqrestore(&lockres->l_lock, flags);
3137 mlog(0, "lock %s\n", lockres->l_name);
3139 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags,
3142 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
3143 mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
3144 ocfs2_dlm_dump_lksb(&lockres->l_lksb);
3147 mlog(0, "lock %s, successful return from ocfs2_dlm_unlock\n",
3150 ocfs2_wait_on_busy_lock(lockres);
3156 /* Mark the lockres as being dropped. It will no longer be
3157 * queued if blocking, but we still may have to wait on it
3158 * being dequeued from the downconvert thread before we can consider
3161 * You can *not* attempt to call cluster_lock on this lockres anymore. */
3162 void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
3165 struct ocfs2_mask_waiter mw;
3166 unsigned long flags;
3168 ocfs2_init_mask_waiter(&mw);
3170 spin_lock_irqsave(&lockres->l_lock, flags);
3171 lockres->l_flags |= OCFS2_LOCK_FREEING;
3172 while (lockres->l_flags & OCFS2_LOCK_QUEUED) {
3173 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0);
3174 spin_unlock_irqrestore(&lockres->l_lock, flags);
3176 mlog(0, "Waiting on lockres %s\n", lockres->l_name);
3178 status = ocfs2_wait_for_mask(&mw);
3182 spin_lock_irqsave(&lockres->l_lock, flags);
3184 spin_unlock_irqrestore(&lockres->l_lock, flags);
3187 void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
3188 struct ocfs2_lock_res *lockres)
3192 ocfs2_mark_lockres_freeing(lockres);
3193 ret = ocfs2_drop_lock(osb, lockres);
3198 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb)
3200 ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres);
3201 ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres);
3202 ocfs2_simple_drop_lockres(osb, &osb->osb_nfs_sync_lockres);
3203 ocfs2_simple_drop_lockres(osb, &osb->osb_orphan_scan.os_lockres);
3206 int ocfs2_drop_inode_locks(struct inode *inode)
3212 /* No need to call ocfs2_mark_lockres_freeing here -
3213 * ocfs2_clear_inode has done it for us. */
3215 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3216 &OCFS2_I(inode)->ip_open_lockres);
3222 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3223 &OCFS2_I(inode)->ip_inode_lockres);
3226 if (err < 0 && !status)
3229 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3230 &OCFS2_I(inode)->ip_rw_lockres);
3233 if (err < 0 && !status)
3240 static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
3243 assert_spin_locked(&lockres->l_lock);
3245 BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
3247 if (lockres->l_level <= new_level) {
3248 mlog(ML_ERROR, "lockres->l_level (%d) <= new_level (%d)\n",
3249 lockres->l_level, new_level);
3253 mlog(0, "lock %s, new_level = %d, l_blocking = %d\n",
3254 lockres->l_name, new_level, lockres->l_blocking);
3256 lockres->l_action = OCFS2_AST_DOWNCONVERT;
3257 lockres->l_requested = new_level;
3258 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
3259 return lockres_set_pending(lockres);
3262 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
3263 struct ocfs2_lock_res *lockres,
3266 unsigned int generation)
3269 u32 dlm_flags = DLM_LKF_CONVERT;
3274 dlm_flags |= DLM_LKF_VALBLK;
3276 ret = ocfs2_dlm_lock(osb->cconn,
3281 OCFS2_LOCK_ID_MAX_LEN - 1,
3283 lockres_clear_pending(lockres, generation, osb);
3285 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
3286 ocfs2_recover_from_dlm_error(lockres, 1);
3296 /* returns 1 when the caller should unlock and call ocfs2_dlm_unlock */
3297 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
3298 struct ocfs2_lock_res *lockres)
3300 assert_spin_locked(&lockres->l_lock);
3303 mlog(0, "lock %s\n", lockres->l_name);
3305 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
3306 /* If we're already trying to cancel a lock conversion
3307 * then just drop the spinlock and allow the caller to
3308 * requeue this lock. */
3310 mlog(0, "Lockres %s, skip convert\n", lockres->l_name);
3314 /* were we in a convert when we got the bast fire? */
3315 BUG_ON(lockres->l_action != OCFS2_AST_CONVERT &&
3316 lockres->l_action != OCFS2_AST_DOWNCONVERT);
3317 /* set things up for the unlockast to know to just
3318 * clear out the ast_action and unset busy, etc. */
3319 lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT;
3321 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY),
3322 "lock %s, invalid flags: 0x%lx\n",
3323 lockres->l_name, lockres->l_flags);
3328 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
3329 struct ocfs2_lock_res *lockres)
3334 mlog(0, "lock %s\n", lockres->l_name);
3336 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb,
3337 DLM_LKF_CANCEL, lockres);
3339 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
3340 ocfs2_recover_from_dlm_error(lockres, 0);
3343 mlog(0, "lock %s return from ocfs2_dlm_unlock\n", lockres->l_name);
3349 static int ocfs2_unblock_lock(struct ocfs2_super *osb,
3350 struct ocfs2_lock_res *lockres,
3351 struct ocfs2_unblock_ctl *ctl)
3353 unsigned long flags;
3362 spin_lock_irqsave(&lockres->l_lock, flags);
3364 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
3367 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
3369 * This is a *big* race. The OCFS2_LOCK_PENDING flag
3370 * exists entirely for one reason - another thread has set
3371 * OCFS2_LOCK_BUSY, but has *NOT* yet called dlm_lock().
3373 * If we do ocfs2_cancel_convert() before the other thread
3374 * calls dlm_lock(), our cancel will do nothing. We will
3375 * get no ast, and we will have no way of knowing the
3376 * cancel failed. Meanwhile, the other thread will call
3377 * into dlm_lock() and wait...forever.
3379 * Why forever? Because another node has asked for the
3380 * lock first; that's why we're here in unblock_lock().
3382 * The solution is OCFS2_LOCK_PENDING. When PENDING is
3383 * set, we just requeue the unblock. Only when the other
3384 * thread has called dlm_lock() and cleared PENDING will
3385 * we then cancel their request.
3387 * All callers of dlm_lock() must set OCFS2_DLM_PENDING
3388 * at the same time they set OCFS2_DLM_BUSY. They must
3389 * clear OCFS2_DLM_PENDING after dlm_lock() returns.
3391 if (lockres->l_flags & OCFS2_LOCK_PENDING)
3395 ret = ocfs2_prepare_cancel_convert(osb, lockres);
3396 spin_unlock_irqrestore(&lockres->l_lock, flags);
3398 ret = ocfs2_cancel_convert(osb, lockres);
3405 /* if we're blocking an exclusive and we have *any* holders,
3407 if ((lockres->l_blocking == DLM_LOCK_EX)
3408 && (lockres->l_ex_holders || lockres->l_ro_holders))
3411 /* If it's a PR we're blocking, then only
3412 * requeue if we've got any EX holders */
3413 if (lockres->l_blocking == DLM_LOCK_PR &&
3414 lockres->l_ex_holders)
3418 * Can we get a lock in this state if the holder counts are
3419 * zero? The meta data unblock code used to check this.
3421 if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
3422 && (lockres->l_flags & OCFS2_LOCK_REFRESHING))
3425 new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
3427 if (lockres->l_ops->check_downconvert
3428 && !lockres->l_ops->check_downconvert(lockres, new_level))
3431 /* If we get here, then we know that there are no more
3432 * incompatible holders (and anyone asking for an incompatible
3433 * lock is blocked). We can now downconvert the lock */
3434 if (!lockres->l_ops->downconvert_worker)
3437 /* Some lockres types want to do a bit of work before
3438 * downconverting a lock. Allow that here. The worker function
3439 * may sleep, so we save off a copy of what we're blocking as
3440 * it may change while we're not holding the spin lock. */
3441 blocking = lockres->l_blocking;
3442 spin_unlock_irqrestore(&lockres->l_lock, flags);
3444 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
3446 if (ctl->unblock_action == UNBLOCK_STOP_POST)
3449 spin_lock_irqsave(&lockres->l_lock, flags);
3450 if (blocking != lockres->l_blocking) {
3451 /* If this changed underneath us, then we can't drop
3459 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
3460 if (lockres->l_level == DLM_LOCK_EX)
3464 * We only set the lvb if the lock has been fully
3465 * refreshed - otherwise we risk setting stale
3466 * data. Otherwise, there's no need to actually clear
3467 * out the lvb here as it's value is still valid.
3469 if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
3470 lockres->l_ops->set_lvb(lockres);
3473 gen = ocfs2_prepare_downconvert(lockres, new_level);
3474 spin_unlock_irqrestore(&lockres->l_lock, flags);
3475 ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb,
3483 spin_unlock_irqrestore(&lockres->l_lock, flags);
3490 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
3493 struct inode *inode;
3494 struct address_space *mapping;
3496 inode = ocfs2_lock_res_inode(lockres);
3497 mapping = inode->i_mapping;
3499 if (!S_ISREG(inode->i_mode))
3503 * We need this before the filemap_fdatawrite() so that it can
3504 * transfer the dirty bit from the PTE to the
3505 * page. Unfortunately this means that even for EX->PR
3506 * downconverts, we'll lose our mappings and have to build
3509 unmap_mapping_range(mapping, 0, 0, 0);
3511 if (filemap_fdatawrite(mapping)) {
3512 mlog(ML_ERROR, "Could not sync inode %llu for downconvert!",
3513 (unsigned long long)OCFS2_I(inode)->ip_blkno);
3515 sync_mapping_buffers(mapping);
3516 if (blocking == DLM_LOCK_EX) {
3517 truncate_inode_pages(mapping, 0);
3519 /* We only need to wait on the I/O if we're not also
3520 * truncating pages because truncate_inode_pages waits
3521 * for us above. We don't truncate pages if we're
3522 * blocking anything < EXMODE because we want to keep
3523 * them around in that case. */
3524 filemap_fdatawait(mapping);
3528 return UNBLOCK_CONTINUE;
3531 static int ocfs2_ci_checkpointed(struct ocfs2_caching_info *ci,
3532 struct ocfs2_lock_res *lockres,
3535 int checkpointed = ocfs2_ci_fully_checkpointed(ci);
3537 BUG_ON(new_level != DLM_LOCK_NL && new_level != DLM_LOCK_PR);
3538 BUG_ON(lockres->l_level != DLM_LOCK_EX && !checkpointed);
3543 ocfs2_start_checkpoint(OCFS2_SB(ocfs2_metadata_cache_get_super(ci)));
3547 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
3550 struct inode *inode = ocfs2_lock_res_inode(lockres);
3552 return ocfs2_ci_checkpointed(INODE_CACHE(inode), lockres, new_level);
3555 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres)
3557 struct inode *inode = ocfs2_lock_res_inode(lockres);
3559 __ocfs2_stuff_meta_lvb(inode);
3563 * Does the final reference drop on our dentry lock. Right now this
3564 * happens in the downconvert thread, but we could choose to simplify the
3565 * dlmglue API and push these off to the ocfs2_wq in the future.
3567 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
3568 struct ocfs2_lock_res *lockres)
3570 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
3571 ocfs2_dentry_lock_put(osb, dl);
3575 * d_delete() matching dentries before the lock downconvert.
3577 * At this point, any process waiting to destroy the
3578 * dentry_lock due to last ref count is stopped by the
3579 * OCFS2_LOCK_QUEUED flag.
3581 * We have two potential problems
3583 * 1) If we do the last reference drop on our dentry_lock (via dput)
3584 * we'll wind up in ocfs2_release_dentry_lock(), waiting on
3585 * the downconvert to finish. Instead we take an elevated
3586 * reference and push the drop until after we've completed our
3587 * unblock processing.
3589 * 2) There might be another process with a final reference,
3590 * waiting on us to finish processing. If this is the case, we
3591 * detect it and exit out - there's no more dentries anyway.
3593 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
3596 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
3597 struct ocfs2_inode_info *oi = OCFS2_I(dl->dl_inode);
3598 struct dentry *dentry;
3599 unsigned long flags;
3603 * This node is blocking another node from getting a read
3604 * lock. This happens when we've renamed within a
3605 * directory. We've forced the other nodes to d_delete(), but
3606 * we never actually dropped our lock because it's still
3607 * valid. The downconvert code will retain a PR for this node,
3608 * so there's no further work to do.
3610 if (blocking == DLM_LOCK_PR)
3611 return UNBLOCK_CONTINUE;
3614 * Mark this inode as potentially orphaned. The code in
3615 * ocfs2_delete_inode() will figure out whether it actually
3616 * needs to be freed or not.
3618 spin_lock(&oi->ip_lock);
3619 oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
3620 spin_unlock(&oi->ip_lock);
3623 * Yuck. We need to make sure however that the check of
3624 * OCFS2_LOCK_FREEING and the extra reference are atomic with
3625 * respect to a reference decrement or the setting of that
3628 spin_lock_irqsave(&lockres->l_lock, flags);
3629 spin_lock(&dentry_attach_lock);
3630 if (!(lockres->l_flags & OCFS2_LOCK_FREEING)
3635 spin_unlock(&dentry_attach_lock);
3636 spin_unlock_irqrestore(&lockres->l_lock, flags);
3638 mlog(0, "extra_ref = %d\n", extra_ref);
3641 * We have a process waiting on us in ocfs2_dentry_iput(),
3642 * which means we can't have any more outstanding
3643 * aliases. There's no need to do any more work.
3646 return UNBLOCK_CONTINUE;
3648 spin_lock(&dentry_attach_lock);
3650 dentry = ocfs2_find_local_alias(dl->dl_inode,
3651 dl->dl_parent_blkno, 1);
3654 spin_unlock(&dentry_attach_lock);
3656 mlog(0, "d_delete(%.*s);\n", dentry->d_name.len,
3657 dentry->d_name.name);
3660 * The following dcache calls may do an
3661 * iput(). Normally we don't want that from the
3662 * downconverting thread, but in this case it's ok
3663 * because the requesting node already has an
3664 * exclusive lock on the inode, so it can't be queued
3665 * for a downconvert.
3670 spin_lock(&dentry_attach_lock);
3672 spin_unlock(&dentry_attach_lock);
3675 * If we are the last holder of this dentry lock, there is no
3676 * reason to downconvert so skip straight to the unlock.
3678 if (dl->dl_count == 1)
3679 return UNBLOCK_STOP_POST;
3681 return UNBLOCK_CONTINUE_POST;
3684 static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
3687 struct ocfs2_refcount_tree *tree =
3688 ocfs2_lock_res_refcount_tree(lockres);
3690 return ocfs2_ci_checkpointed(&tree->rf_ci, lockres, new_level);
3693 static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
3696 struct ocfs2_refcount_tree *tree =
3697 ocfs2_lock_res_refcount_tree(lockres);
3699 ocfs2_metadata_cache_purge(&tree->rf_ci);
3701 return UNBLOCK_CONTINUE;
3704 static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres)
3706 struct ocfs2_qinfo_lvb *lvb;
3707 struct ocfs2_mem_dqinfo *oinfo = ocfs2_lock_res_qinfo(lockres);
3708 struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
3709 oinfo->dqi_gi.dqi_type);
3713 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
3714 lvb->lvb_version = OCFS2_QINFO_LVB_VERSION;
3715 lvb->lvb_bgrace = cpu_to_be32(info->dqi_bgrace);
3716 lvb->lvb_igrace = cpu_to_be32(info->dqi_igrace);
3717 lvb->lvb_syncms = cpu_to_be32(oinfo->dqi_syncms);
3718 lvb->lvb_blocks = cpu_to_be32(oinfo->dqi_gi.dqi_blocks);
3719 lvb->lvb_free_blk = cpu_to_be32(oinfo->dqi_gi.dqi_free_blk);
3720 lvb->lvb_free_entry = cpu_to_be32(oinfo->dqi_gi.dqi_free_entry);
3725 void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex)
3727 struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
3728 struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
3729 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3732 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
3733 ocfs2_cluster_unlock(osb, lockres, level);
3737 static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo)
3739 struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
3740 oinfo->dqi_gi.dqi_type);
3741 struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
3742 struct ocfs2_qinfo_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
3743 struct buffer_head *bh = NULL;
3744 struct ocfs2_global_disk_dqinfo *gdinfo;
3747 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
3748 lvb->lvb_version == OCFS2_QINFO_LVB_VERSION) {
3749 info->dqi_bgrace = be32_to_cpu(lvb->lvb_bgrace);
3750 info->dqi_igrace = be32_to_cpu(lvb->lvb_igrace);
3751 oinfo->dqi_syncms = be32_to_cpu(lvb->lvb_syncms);
3752 oinfo->dqi_gi.dqi_blocks = be32_to_cpu(lvb->lvb_blocks);
3753 oinfo->dqi_gi.dqi_free_blk = be32_to_cpu(lvb->lvb_free_blk);
3754 oinfo->dqi_gi.dqi_free_entry =
3755 be32_to_cpu(lvb->lvb_free_entry);
3757 status = ocfs2_read_quota_block(oinfo->dqi_gqinode, 0, &bh);
3762 gdinfo = (struct ocfs2_global_disk_dqinfo *)
3763 (bh->b_data + OCFS2_GLOBAL_INFO_OFF);
3764 info->dqi_bgrace = le32_to_cpu(gdinfo->dqi_bgrace);
3765 info->dqi_igrace = le32_to_cpu(gdinfo->dqi_igrace);
3766 oinfo->dqi_syncms = le32_to_cpu(gdinfo->dqi_syncms);
3767 oinfo->dqi_gi.dqi_blocks = le32_to_cpu(gdinfo->dqi_blocks);
3768 oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(gdinfo->dqi_free_blk);
3769 oinfo->dqi_gi.dqi_free_entry =
3770 le32_to_cpu(gdinfo->dqi_free_entry);
3772 ocfs2_track_lock_refresh(lockres);
3779 /* Lock quota info, this function expects at least shared lock on the quota file
3780 * so that we can safely refresh quota info from disk. */
3781 int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex)
3783 struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
3784 struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
3785 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3790 /* On RO devices, locking really isn't needed... */
3791 if (ocfs2_is_hard_readonly(osb)) {
3796 if (ocfs2_mount_local(osb))
3799 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
3804 if (!ocfs2_should_refresh_lock_res(lockres))
3806 /* OK, we have the lock but we need to refresh the quota info */
3807 status = ocfs2_refresh_qinfo(oinfo);
3809 ocfs2_qinfo_unlock(oinfo, ex);
3810 ocfs2_complete_lock_res_refresh(lockres, status);
3816 int ocfs2_refcount_lock(struct ocfs2_refcount_tree *ref_tree, int ex)
3819 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3820 struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
3821 struct ocfs2_super *osb = lockres->l_priv;
3824 if (ocfs2_is_hard_readonly(osb))
3827 if (ocfs2_mount_local(osb))
3830 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
3837 void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex)
3839 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3840 struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
3841 struct ocfs2_super *osb = lockres->l_priv;
3843 if (!ocfs2_mount_local(osb))
3844 ocfs2_cluster_unlock(osb, lockres, level);
3848 * This is the filesystem locking protocol. It provides the lock handling
3849 * hooks for the underlying DLM. It has a maximum version number.
3850 * The version number allows interoperability with systems running at
3851 * the same major number and an equal or smaller minor number.
3853 * Whenever the filesystem does new things with locks (adds or removes a
3854 * lock, orders them differently, does different things underneath a lock),
3855 * the version must be changed. The protocol is negotiated when joining
3856 * the dlm domain. A node may join the domain if its major version is
3857 * identical to all other nodes and its minor version is greater than
3858 * or equal to all other nodes. When its minor version is greater than
3859 * the other nodes, it will run at the minor version specified by the
3862 * If a locking change is made that will not be compatible with older
3863 * versions, the major number must be increased and the minor version set
3864 * to zero. If a change merely adds a behavior that can be disabled when
3865 * speaking to older versions, the minor version must be increased. If a
3866 * change adds a fully backwards compatible change (eg, LVB changes that
3867 * are just ignored by older versions), the version does not need to be
3870 static struct ocfs2_locking_protocol lproto = {
3872 .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
3873 .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
3875 .lp_lock_ast = ocfs2_locking_ast,
3876 .lp_blocking_ast = ocfs2_blocking_ast,
3877 .lp_unlock_ast = ocfs2_unlock_ast,
3880 void ocfs2_set_locking_protocol(void)
3882 ocfs2_stack_glue_set_locking_protocol(&lproto);
3886 static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
3887 struct ocfs2_lock_res *lockres)
3890 struct ocfs2_unblock_ctl ctl = {0, 0,};
3891 unsigned long flags;
3893 /* Our reference to the lockres in this function can be
3894 * considered valid until we remove the OCFS2_LOCK_QUEUED
3900 BUG_ON(!lockres->l_ops);
3902 mlog(0, "lockres %s blocked.\n", lockres->l_name);
3904 /* Detect whether a lock has been marked as going away while
3905 * the downconvert thread was processing other things. A lock can
3906 * still be marked with OCFS2_LOCK_FREEING after this check,
3907 * but short circuiting here will still save us some
3909 spin_lock_irqsave(&lockres->l_lock, flags);
3910 if (lockres->l_flags & OCFS2_LOCK_FREEING)
3912 spin_unlock_irqrestore(&lockres->l_lock, flags);
3914 status = ocfs2_unblock_lock(osb, lockres, &ctl);
3918 spin_lock_irqsave(&lockres->l_lock, flags);
3920 if (lockres->l_flags & OCFS2_LOCK_FREEING || !ctl.requeue) {
3921 lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED);
3923 ocfs2_schedule_blocked_lock(osb, lockres);
3925 mlog(0, "lockres %s, requeue = %s.\n", lockres->l_name,
3926 ctl.requeue ? "yes" : "no");
3927 spin_unlock_irqrestore(&lockres->l_lock, flags);
3929 if (ctl.unblock_action != UNBLOCK_CONTINUE
3930 && lockres->l_ops->post_unlock)
3931 lockres->l_ops->post_unlock(osb, lockres);
3936 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
3937 struct ocfs2_lock_res *lockres)
3941 assert_spin_locked(&lockres->l_lock);
3943 if (lockres->l_flags & OCFS2_LOCK_FREEING) {
3944 /* Do not schedule a lock for downconvert when it's on
3945 * the way to destruction - any nodes wanting access
3946 * to the resource will get it soon. */
3947 mlog(0, "Lockres %s won't be scheduled: flags 0x%lx\n",
3948 lockres->l_name, lockres->l_flags);
3952 lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
3954 spin_lock(&osb->dc_task_lock);
3955 if (list_empty(&lockres->l_blocked_list)) {
3956 list_add_tail(&lockres->l_blocked_list,
3957 &osb->blocked_lock_list);
3958 osb->blocked_lock_count++;
3960 spin_unlock(&osb->dc_task_lock);
3965 static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
3967 unsigned long processed;
3968 struct ocfs2_lock_res *lockres;
3972 spin_lock(&osb->dc_task_lock);
3973 /* grab this early so we know to try again if a state change and
3974 * wake happens part-way through our work */
3975 osb->dc_work_sequence = osb->dc_wake_sequence;
3977 processed = osb->blocked_lock_count;
3979 BUG_ON(list_empty(&osb->blocked_lock_list));
3981 lockres = list_entry(osb->blocked_lock_list.next,
3982 struct ocfs2_lock_res, l_blocked_list);
3983 list_del_init(&lockres->l_blocked_list);
3984 osb->blocked_lock_count--;
3985 spin_unlock(&osb->dc_task_lock);
3990 ocfs2_process_blocked_lock(osb, lockres);
3992 spin_lock(&osb->dc_task_lock);
3994 spin_unlock(&osb->dc_task_lock);
3999 static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
4003 spin_lock(&osb->dc_task_lock);
4004 if (list_empty(&osb->blocked_lock_list))
4007 spin_unlock(&osb->dc_task_lock);
4011 static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
4013 int should_wake = 0;
4015 spin_lock(&osb->dc_task_lock);
4016 if (osb->dc_work_sequence != osb->dc_wake_sequence)
4018 spin_unlock(&osb->dc_task_lock);
4023 static int ocfs2_downconvert_thread(void *arg)
4026 struct ocfs2_super *osb = arg;
4028 /* only quit once we've been asked to stop and there is no more
4030 while (!(kthread_should_stop() &&
4031 ocfs2_downconvert_thread_lists_empty(osb))) {
4033 wait_event_interruptible(osb->dc_event,
4034 ocfs2_downconvert_thread_should_wake(osb) ||
4035 kthread_should_stop());
4037 mlog(0, "downconvert_thread: awoken\n");
4039 ocfs2_downconvert_thread_do_work(osb);
4042 osb->dc_task = NULL;
4046 void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
4048 spin_lock(&osb->dc_task_lock);
4049 /* make sure the voting thread gets a swipe at whatever changes
4050 * the caller may have made to the voting state */
4051 osb->dc_wake_sequence++;
4052 spin_unlock(&osb->dc_task_lock);
4053 wake_up(&osb->dc_event);