#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/mm.h>
-#include <linux/smp_lock.h>
#include <linux/crc32.h>
#include <linux/kthread.h>
#include <linux/pagemap.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
+#include "ocfs2_lockingver.h"
#include "alloc.h"
#include "dcache.h"
#include "dlmglue.h"
#include "extent_map.h"
+#include "file.h"
#include "heartbeat.h"
#include "inode.h"
#include "journal.h"
#include "slot_map.h"
#include "super.h"
#include "uptodate.h"
-#include "vote.h"
#include "buffer_head_io.h"
static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres);
static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres);
+static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres);
/*
- * Return value from ocfs2_convert_worker_t functions.
+ * Return value from ->downconvert_worker functions.
*
- * These control the precise actions of ocfs2_generic_unblock_lock()
+ * These control the precise actions of ocfs2_unblock_lock()
* and ocfs2_process_blocked_lock()
*
*/
enum ocfs2_unblock_action unblock_action;
};
-static int ocfs2_unblock_meta(struct ocfs2_lock_res *lockres,
- struct ocfs2_unblock_ctl *ctl);
-static int ocfs2_unblock_data(struct ocfs2_lock_res *lockres,
- struct ocfs2_unblock_ctl *ctl);
-static int ocfs2_unblock_inode_lock(struct ocfs2_lock_res *lockres,
- struct ocfs2_unblock_ctl *ctl);
-static int ocfs2_unblock_dentry_lock(struct ocfs2_lock_res *lockres,
- struct ocfs2_unblock_ctl *ctl);
-static int ocfs2_unblock_osb_lock(struct ocfs2_lock_res *lockres,
- struct ocfs2_unblock_ctl *ctl);
+static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
+ int new_level);
+static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres);
+
+static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
+ int blocking);
+
+static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
+ int blocking);
static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres);
+
+#define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
+
+/* This aids in debugging situations where a bad LVB might be involved. */
+static void ocfs2_dump_meta_lvb_info(u64 level,
+ const char *function,
+ unsigned int line,
+ struct ocfs2_lock_res *lockres)
+{
+ struct ocfs2_meta_lvb *lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
+
+ mlog(level, "LVB information for %s (called from %s:%u):\n",
+ lockres->l_name, function, line);
+ mlog(level, "version: %u, clusters: %u, generation: 0x%x\n",
+ lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters),
+ be32_to_cpu(lvb->lvb_igeneration));
+ mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n",
+ (unsigned long long)be64_to_cpu(lvb->lvb_isize),
+ be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid),
+ be16_to_cpu(lvb->lvb_imode));
+ mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
+ "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink),
+ (long long)be64_to_cpu(lvb->lvb_iatime_packed),
+ (long long)be64_to_cpu(lvb->lvb_ictime_packed),
+ (long long)be64_to_cpu(lvb->lvb_imtime_packed),
+ be32_to_cpu(lvb->lvb_iattr));
+}
+
+
/*
* OCFS2 Lock Resource Operations
*
* These fine tune the behavior of the generic dlmglue locking infrastructure.
+ *
+ * The most basic of lock types can point ->l_priv to their respective
+ * struct ocfs2_super and allow the default actions to manage things.
+ *
+ * Right now, each lock type also needs to implement an init function,
+ * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres()
+ * should be called when the lock is no longer needed (i.e., object
+ * destruction time).
*/
struct ocfs2_lock_res_ops {
/*
* this callback if ->l_priv is not an ocfs2_super pointer
*/
struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *);
- int (*unblock)(struct ocfs2_lock_res *, struct ocfs2_unblock_ctl *);
+
+ /*
+ * Optionally called in the downconvert thread after a
+ * successful downconvert. The lockres will not be referenced
+ * after this callback is called, so it is safe to free
+ * memory, etc.
+ *
+ * The exact semantics of when this is called are controlled
+ * by ->downconvert_worker()
+ */
void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *);
/*
+ * Allow a lock type to add checks to determine whether it is
+ * safe to downconvert a lock. Return 0 to re-queue the
+ * downconvert at a later time, nonzero to continue.
+ *
+ * For most locks, the default checks that there are no
+ * incompatible holders are sufficient.
+ *
+ * Called with the lockres spinlock held.
+ */
+ int (*check_downconvert)(struct ocfs2_lock_res *, int);
+
+ /*
+ * Allows a lock type to populate the lock value block. This
+ * is called on downconvert, and when we drop a lock.
+ *
+ * Locks that want to use this should set LOCK_TYPE_USES_LVB
+ * in the flags field.
+ *
+ * Called with the lockres spinlock held.
+ */
+ void (*set_lvb)(struct ocfs2_lock_res *);
+
+ /*
+ * Called from the downconvert thread when it is determined
+ * that a lock will be downconverted. This is called without
+ * any locks held so the function can do work that might
+ * schedule (syncing out data, etc).
+ *
+ * This should return any one of the ocfs2_unblock_action
+ * values, depending on what it wants the thread to do.
+ */
+ int (*downconvert_worker)(struct ocfs2_lock_res *, int);
+
+ /*
* LOCK_TYPE_* flags which describe the specific requirements
* of a lock type. Descriptions of each individual flag follow.
*/
*/
#define LOCK_TYPE_REQUIRES_REFRESH 0x1
-typedef int (ocfs2_convert_worker_t)(struct ocfs2_lock_res *, int);
-static int ocfs2_generic_unblock_lock(struct ocfs2_super *osb,
- struct ocfs2_lock_res *lockres,
- struct ocfs2_unblock_ctl *ctl,
- ocfs2_convert_worker_t *worker);
+/*
+ * Indicate that a lock type makes use of the lock value block. The
+ * ->set_lvb lock type callback must be defined.
+ */
+#define LOCK_TYPE_USES_LVB 0x2
static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
.get_osb = ocfs2_get_inode_osb,
- .unblock = ocfs2_unblock_inode_lock,
.flags = 0,
};
-static struct ocfs2_lock_res_ops ocfs2_inode_meta_lops = {
+static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = {
.get_osb = ocfs2_get_inode_osb,
- .unblock = ocfs2_unblock_meta,
- .flags = LOCK_TYPE_REQUIRES_REFRESH,
-};
-
-static struct ocfs2_lock_res_ops ocfs2_inode_data_lops = {
- .get_osb = ocfs2_get_inode_osb,
- .unblock = ocfs2_unblock_data,
- .flags = 0,
+ .check_downconvert = ocfs2_check_meta_downconvert,
+ .set_lvb = ocfs2_set_meta_lvb,
+ .downconvert_worker = ocfs2_data_convert_worker,
+ .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
};
static struct ocfs2_lock_res_ops ocfs2_super_lops = {
- .unblock = ocfs2_unblock_osb_lock,
.flags = LOCK_TYPE_REQUIRES_REFRESH,
};
static struct ocfs2_lock_res_ops ocfs2_rename_lops = {
- .unblock = ocfs2_unblock_osb_lock,
.flags = 0,
};
static struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
.get_osb = ocfs2_get_dentry_osb,
- .unblock = ocfs2_unblock_dentry_lock,
.post_unlock = ocfs2_dentry_post_unlock,
+ .downconvert_worker = ocfs2_dentry_convert_worker,
+ .flags = 0,
+};
+
+static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = {
+ .get_osb = ocfs2_get_inode_osb,
.flags = 0,
};
+static struct ocfs2_lock_res_ops ocfs2_flock_lops = {
+ .get_osb = ocfs2_get_file_osb,
+ .flags = 0,
+};
+
+/*
+ * This is the filesystem locking protocol version.
+ *
+ * Whenever the filesystem does new things with locks (adds or removes a
+ * lock, orders them differently, does different things underneath a lock),
+ * the version must be changed. The protocol is negotiated when joining
+ * the dlm domain. A node may join the domain if its major version is
+ * identical to all other nodes and its minor version is greater than
+ * or equal to all other nodes. When its minor version is greater than
+ * the other nodes, it will run at the minor version specified by the
+ * other nodes.
+ *
+ * If a locking change is made that will not be compatible with older
+ * versions, the major number must be increased and the minor version set
+ * to zero. If a change merely adds a behavior that can be disabled when
+ * speaking to older versions, the minor version must be increased. If a
+ * change adds a fully backwards compatible change (eg, LVB changes that
+ * are just ignored by older versions), the version does not need to be
+ * updated.
+ */
+const struct dlm_protocol_version ocfs2_locking_protocol = {
+ .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
+ .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
+};
+
static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
{
return lockres->l_type == OCFS2_LOCK_TYPE_META ||
- lockres->l_type == OCFS2_LOCK_TYPE_DATA ||
- lockres->l_type == OCFS2_LOCK_TYPE_RW;
+ lockres->l_type == OCFS2_LOCK_TYPE_RW ||
+ lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
}
static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
"resource %s: %s\n", dlm_errname(_stat), _func, \
_lockres->l_name, dlm_errmsg(_stat)); \
} while (0)
-static void ocfs2_vote_on_unlock(struct ocfs2_super *osb,
- struct ocfs2_lock_res *lockres);
-static int ocfs2_meta_lock_update(struct inode *inode,
+static int ocfs2_downconvert_thread(void *arg);
+static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres);
+static int ocfs2_inode_lock_update(struct inode *inode,
struct buffer_head **bh);
static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
static inline int ocfs2_highest_compat_lock_level(int level);
-static inline int ocfs2_can_downconvert_meta_lock(struct inode *inode,
- struct ocfs2_lock_res *lockres,
- int new_level);
+static void ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
+ int new_level);
+static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int new_level,
+ int lvb);
+static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres);
+static int ocfs2_cancel_convert(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres);
+
static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
u64 blkno,
ops = &ocfs2_inode_rw_lops;
break;
case OCFS2_LOCK_TYPE_META:
- ops = &ocfs2_inode_meta_lops;
+ ops = &ocfs2_inode_inode_lops;
break;
- case OCFS2_LOCK_TYPE_DATA:
- ops = &ocfs2_inode_data_lops;
+ case OCFS2_LOCK_TYPE_OPEN:
+ ops = &ocfs2_inode_open_lops;
break;
default:
mlog_bug_on_msg(1, "type: %d\n", type);
return OCFS2_SB(inode->i_sb);
}
+static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres)
+{
+ struct ocfs2_file_private *fp = lockres->l_priv;
+
+ return OCFS2_SB(fp->fp_file->f_mapping->host->i_sb);
+}
+
static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres)
{
__be64 inode_blkno_be;
&ocfs2_rename_lops, osb);
}
+void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres,
+ struct ocfs2_file_private *fp)
+{
+ struct inode *inode = fp->fp_file->f_mapping->host;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+
+ ocfs2_lock_res_init_once(lockres);
+ ocfs2_build_lock_name(OCFS2_LOCK_TYPE_FLOCK, oi->ip_blkno,
+ inode->i_generation, lockres->l_name);
+ ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
+ OCFS2_LOCK_TYPE_FLOCK, &ocfs2_flock_lops,
+ fp);
+ lockres->l_flags |= OCFS2_LOCK_NOCACHE;
+}
+
void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
{
mlog_entry_void();
static void lockres_set_flags(struct ocfs2_lock_res *lockres,
unsigned long newflags)
{
- struct list_head *pos, *tmp;
- struct ocfs2_mask_waiter *mw;
+ struct ocfs2_mask_waiter *mw, *tmp;
assert_spin_locked(&lockres->l_lock);
lockres->l_flags = newflags;
- list_for_each_safe(pos, tmp, &lockres->l_mask_waiters) {
- mw = list_entry(pos, struct ocfs2_mask_waiter, mw_item);
+ list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) {
if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
continue;
{
mlog_entry_void();
- BUG_ON((!lockres->l_flags & OCFS2_LOCK_BUSY));
+ BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY)));
BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
if (lockres->l_requested > LKM_NLMODE &&
lockres->l_name, level, lockres->l_level,
ocfs2_lock_type_string(lockres->l_type));
+ /*
+ * We can skip the bast for locks which don't enable caching -
+ * they'll be dropped at the earliest possible time anyway.
+ */
+ if (lockres->l_flags & OCFS2_LOCK_NOCACHE)
+ return;
+
spin_lock_irqsave(&lockres->l_lock, flags);
needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
if (needs_downconvert)
wake_up(&lockres->l_event);
- ocfs2_kick_vote_thread(osb);
+ ocfs2_wake_downconvert_thread(osb);
}
static void ocfs2_locking_ast(void *opaque)
int dlm_flags)
{
int ret = 0;
- enum dlm_status status;
+ enum dlm_status status = DLM_NORMAL;
unsigned long flags;
mlog_entry_void();
}
+static int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw,
+ struct ocfs2_lock_res *lockres)
+{
+ int ret;
+
+ ret = wait_for_completion_interruptible(&mw->mw_complete);
+ if (ret)
+ lockres_remove_mask_waiter(lockres, mw);
+ else
+ ret = mw->mw_status;
+ /* Re-arm the completion in case we want to wait on it again */
+ INIT_COMPLETION(mw->mw_complete);
+ return ret;
+}
+
static int ocfs2_cluster_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres,
int level,
ocfs2_init_mask_waiter(&mw);
+ if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
+ lkm_flags |= LKM_VALBLK;
+
again:
wait = 0;
goto unlock;
}
- if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
- /* lock has not been created yet. */
- spin_unlock_irqrestore(&lockres->l_lock, flags);
-
- ret = ocfs2_lock_create(osb, lockres, LKM_NLMODE, 0);
- if (ret < 0) {
- mlog_errno(ret);
- goto out;
- }
- goto again;
- }
-
if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
!ocfs2_may_continue_on_blocked_lock(lockres, level)) {
/* is the lock is currently blocked on behalf of
mlog(ML_ERROR, "lockres %s has action %u pending\n",
lockres->l_name, lockres->l_action);
- lockres->l_action = OCFS2_AST_CONVERT;
+ if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
+ lockres->l_action = OCFS2_AST_ATTACH;
+ lkm_flags &= ~LKM_CONVERT;
+ } else {
+ lockres->l_action = OCFS2_AST_CONVERT;
+ lkm_flags |= LKM_CONVERT;
+ }
+
lockres->l_requested = level;
lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
spin_unlock_irqrestore(&lockres->l_lock, flags);
status = dlmlock(osb->dlm,
level,
&lockres->l_lksb,
- lkm_flags|LKM_CONVERT|LKM_VALBLK,
+ lkm_flags,
lockres->l_name,
OCFS2_LOCK_ID_MAX_LEN - 1,
ocfs2_locking_ast,
mlog_entry_void();
spin_lock_irqsave(&lockres->l_lock, flags);
ocfs2_dec_holders(lockres, level);
- ocfs2_vote_on_unlock(osb, lockres);
+ ocfs2_downconvert_on_unlock(osb, lockres);
spin_unlock_irqrestore(&lockres->l_lock, flags);
mlog_exit_void();
}
-int ocfs2_create_new_lock(struct ocfs2_super *osb,
- struct ocfs2_lock_res *lockres,
- int ex,
- int local)
+static int ocfs2_create_new_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int ex,
+ int local)
{
int level = ex ? LKM_EXMODE : LKM_PRMODE;
unsigned long flags;
* We don't want to use LKM_LOCAL on a meta data lock as they
* don't use a generation in their lock names.
*/
- ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_meta_lockres, 1, 0);
+ ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_inode_lockres, 1, 0);
if (ret) {
mlog_errno(ret);
goto bail;
}
- ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_data_lockres, 1, 1);
+ ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0);
if (ret) {
mlog_errno(ret);
goto bail;
{
int status, level;
struct ocfs2_lock_res *lockres;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
BUG_ON(!inode);
(unsigned long long)OCFS2_I(inode)->ip_blkno,
write ? "EXMODE" : "PRMODE");
+ if (ocfs2_mount_local(osb))
+ return 0;
+
lockres = &OCFS2_I(inode)->ip_rw_lockres;
level = write ? LKM_EXMODE : LKM_PRMODE;
{
int level = write ? LKM_EXMODE : LKM_PRMODE;
struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
mlog_entry_void();
(unsigned long long)OCFS2_I(inode)->ip_blkno,
write ? "EXMODE" : "PRMODE");
- ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
+ if (!ocfs2_mount_local(osb))
+ ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
mlog_exit_void();
}
-int ocfs2_data_lock_full(struct inode *inode,
- int write,
- int arg_flags)
+/*
+ * ocfs2_open_lock always get PR mode lock.
+ */
+int ocfs2_open_lock(struct inode *inode)
+{
+ int status = 0;
+ struct ocfs2_lock_res *lockres;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ BUG_ON(!inode);
+
+ mlog_entry_void();
+
+ mlog(0, "inode %llu take PRMODE open lock\n",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
+
+ if (ocfs2_mount_local(osb))
+ goto out;
+
+ lockres = &OCFS2_I(inode)->ip_open_lockres;
+
+ status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
+ LKM_PRMODE, 0, 0);
+ if (status < 0)
+ mlog_errno(status);
+
+out:
+ mlog_exit(status);
+ return status;
+}
+
+int ocfs2_try_open_lock(struct inode *inode, int write)
{
int status = 0, level;
struct ocfs2_lock_res *lockres;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
BUG_ON(!inode);
mlog_entry_void();
- mlog(0, "inode %llu take %s DATA lock\n",
+ mlog(0, "inode %llu try to take %s open lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
write ? "EXMODE" : "PRMODE");
- /* We'll allow faking a readonly data lock for
- * rodevices. */
- if (ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb))) {
- if (write) {
- status = -EROFS;
- mlog_errno(status);
- }
+ if (ocfs2_mount_local(osb))
goto out;
- }
- lockres = &OCFS2_I(inode)->ip_data_lockres;
+ lockres = &OCFS2_I(inode)->ip_open_lockres;
level = write ? LKM_EXMODE : LKM_PRMODE;
- status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level,
- 0, arg_flags);
- if (status < 0 && status != -EAGAIN)
- mlog_errno(status);
+ /*
+ * The file system may already holding a PRMODE/EXMODE open lock.
+ * Since we pass LKM_NOQUEUE, the request won't block waiting on
+ * other nodes and the -EAGAIN will indicate to the caller that
+ * this inode is still in use.
+ */
+ status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
+ level, LKM_NOQUEUE, 0);
out:
mlog_exit(status);
return status;
}
-/* see ocfs2_meta_lock_with_page() */
-int ocfs2_data_lock_with_page(struct inode *inode,
- int write,
- struct page *page)
+/*
+ * ocfs2_open_unlock unlock PR and EX mode open locks.
+ */
+void ocfs2_open_unlock(struct inode *inode)
+{
+ struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ mlog_entry_void();
+
+ mlog(0, "inode %llu drop open lock\n",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
+
+ if (ocfs2_mount_local(osb))
+ goto out;
+
+ if(lockres->l_ro_holders)
+ ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
+ LKM_PRMODE);
+ if(lockres->l_ex_holders)
+ ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
+ LKM_EXMODE);
+
+out:
+ mlog_exit_void();
+}
+
+static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres,
+ int level)
{
int ret;
+ struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
+ unsigned long flags;
+ struct ocfs2_mask_waiter mw;
- ret = ocfs2_data_lock_full(inode, write, OCFS2_LOCK_NONBLOCK);
- if (ret == -EAGAIN) {
- unlock_page(page);
- if (ocfs2_data_lock(inode, write) == 0)
- ocfs2_data_unlock(inode, write);
- ret = AOP_TRUNCATED_PAGE;
+ ocfs2_init_mask_waiter(&mw);
+
+retry_cancel:
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ if (lockres->l_flags & OCFS2_LOCK_BUSY) {
+ ret = ocfs2_prepare_cancel_convert(osb, lockres);
+ if (ret) {
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ ret = ocfs2_cancel_convert(osb, lockres);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+ goto retry_cancel;
+ }
+ lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ ocfs2_wait_for_mask(&mw);
+ goto retry_cancel;
}
+ ret = -ERESTARTSYS;
+ /*
+ * We may still have gotten the lock, in which case there's no
+ * point to restarting the syscall.
+ */
+ if (lockres->l_level == level)
+ ret = 0;
+
+ mlog(0, "Cancel returning %d. flags: 0x%lx, level: %d, act: %d\n", ret,
+ lockres->l_flags, lockres->l_level, lockres->l_action);
+
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+out:
return ret;
}
-static void ocfs2_vote_on_unlock(struct ocfs2_super *osb,
- struct ocfs2_lock_res *lockres)
+/*
+ * ocfs2_file_lock() and ocfs2_file_unlock() map to a single pair of
+ * flock() calls. The locking approach this requires is sufficiently
+ * different from all other cluster lock types that we implement a
+ * seperate path to the "low-level" dlm calls. In particular:
+ *
+ * - No optimization of lock levels is done - we take at exactly
+ * what's been requested.
+ *
+ * - No lock caching is employed. We immediately downconvert to
+ * no-lock at unlock time. This also means flock locks never go on
+ * the blocking list).
+ *
+ * - Since userspace can trivially deadlock itself with flock, we make
+ * sure to allow cancellation of a misbehaving applications flock()
+ * request.
+ *
+ * - Access to any flock lockres doesn't require concurrency, so we
+ * can simplify the code by requiring the caller to guarantee
+ * serialization of dlmglue flock calls.
+ */
+int ocfs2_file_lock(struct file *file, int ex, int trylock)
+{
+ int ret, level = ex ? LKM_EXMODE : LKM_PRMODE;
+ unsigned int lkm_flags = trylock ? LKM_NOQUEUE : 0;
+ unsigned long flags;
+ struct ocfs2_file_private *fp = file->private_data;
+ struct ocfs2_lock_res *lockres = &fp->fp_flock;
+ struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
+ struct ocfs2_mask_waiter mw;
+
+ ocfs2_init_mask_waiter(&mw);
+
+ if ((lockres->l_flags & OCFS2_LOCK_BUSY) ||
+ (lockres->l_level > LKM_NLMODE)) {
+ mlog(ML_ERROR,
+ "File lock \"%s\" has busy or locked state: flags: 0x%lx, "
+ "level: %u\n", lockres->l_name, lockres->l_flags,
+ lockres->l_level);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
+ lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ /*
+ * Get the lock at NLMODE to start - that way we
+ * can cancel the upconvert request if need be.
+ */
+ ret = ocfs2_lock_create(osb, lockres, LKM_NLMODE, 0);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_wait_for_mask(&mw);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ }
+
+ lockres->l_action = OCFS2_AST_CONVERT;
+ lkm_flags |= LKM_CONVERT;
+ lockres->l_requested = level;
+ lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
+
+ lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ ret = dlmlock(osb->dlm, level, &lockres->l_lksb, lkm_flags,
+ lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1,
+ ocfs2_locking_ast, lockres, ocfs2_blocking_ast);
+ if (ret != DLM_NORMAL) {
+ if (trylock && ret == DLM_NOTQUEUED)
+ ret = -EAGAIN;
+ else {
+ ocfs2_log_dlm_error("dlmlock", ret, lockres);
+ ret = -EINVAL;
+ }
+
+ ocfs2_recover_from_dlm_error(lockres, 1);
+ lockres_remove_mask_waiter(lockres, &mw);
+ goto out;
+ }
+
+ ret = ocfs2_wait_for_mask_interruptible(&mw, lockres);
+ if (ret == -ERESTARTSYS) {
+ /*
+ * Userspace can cause deadlock itself with
+ * flock(). Current behavior locally is to allow the
+ * deadlock, but abort the system call if a signal is
+ * received. We follow this example, otherwise a
+ * poorly written program could sit in kernel until
+ * reboot.
+ *
+ * Handling this is a bit more complicated for Ocfs2
+ * though. We can't exit this function with an
+ * outstanding lock request, so a cancel convert is
+ * required. We intentionally overwrite 'ret' - if the
+ * cancel fails and the lock was granted, it's easier
+ * to just bubble sucess back up to the user.
+ */
+ ret = ocfs2_flock_handle_signal(lockres, level);
+ }
+
+out:
+
+ mlog(0, "Lock: \"%s\" ex: %d, trylock: %d, returns: %d\n",
+ lockres->l_name, ex, trylock, ret);
+ return ret;
+}
+
+void ocfs2_file_unlock(struct file *file)
+{
+ int ret;
+ unsigned long flags;
+ struct ocfs2_file_private *fp = file->private_data;
+ struct ocfs2_lock_res *lockres = &fp->fp_flock;
+ struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
+ struct ocfs2_mask_waiter mw;
+
+ ocfs2_init_mask_waiter(&mw);
+
+ if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED))
+ return;
+
+ if (lockres->l_level == LKM_NLMODE)
+ return;
+
+ mlog(0, "Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n",
+ lockres->l_name, lockres->l_flags, lockres->l_level,
+ lockres->l_action);
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ /*
+ * Fake a blocking ast for the downconvert code.
+ */
+ lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
+ lockres->l_blocking = LKM_EXMODE;
+
+ ocfs2_prepare_downconvert(lockres, LKM_NLMODE);
+ lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ ret = ocfs2_downconvert_lock(osb, lockres, LKM_NLMODE, 0);
+ if (ret) {
+ mlog_errno(ret);
+ return;
+ }
+
+ ret = ocfs2_wait_for_mask(&mw);
+ if (ret)
+ mlog_errno(ret);
+}
+
+static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres)
{
int kick = 0;
mlog_entry_void();
/* If we know that another node is waiting on our lock, kick
- * the vote thread * pre-emptively when we reach a release
+ * the downconvert thread * pre-emptively when we reach a release
* condition. */
if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
switch(lockres->l_blocking) {
}
if (kick)
- ocfs2_kick_vote_thread(osb);
-
- mlog_exit_void();
-}
-
-void ocfs2_data_unlock(struct inode *inode,
- int write)
-{
- int level = write ? LKM_EXMODE : LKM_PRMODE;
- struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_data_lockres;
-
- mlog_entry_void();
-
- mlog(0, "inode %llu drop %s DATA lock\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
- write ? "EXMODE" : "PRMODE");
-
- if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)))
- ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
+ ocfs2_wake_downconvert_thread(osb);
mlog_exit_void();
}
/* Call this with the lockres locked. I am reasonably sure we don't
* need ip_lock in this function as anyone who would be changing those
- * values is supposed to be blocked in ocfs2_meta_lock right now. */
+ * values is supposed to be blocked in ocfs2_inode_lock right now. */
static void __ocfs2_stuff_meta_lvb(struct inode *inode)
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
- struct ocfs2_lock_res *lockres = &oi->ip_meta_lockres;
+ struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
struct ocfs2_meta_lvb *lvb;
mlog_entry_void();
lvb->lvb_imtime_packed =
cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
+ lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
out:
static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
- struct ocfs2_lock_res *lockres = &oi->ip_meta_lockres;
+ struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
struct ocfs2_meta_lvb *lvb;
mlog_entry_void();
i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
oi->ip_attr = be32_to_cpu(lvb->lvb_iattr);
+ oi->ip_dyn_features = be16_to_cpu(lvb->lvb_idynfeatures);
ocfs2_set_inode_flags(inode);
/* fast-symlinks are a special case */
if (S_ISLNK(inode->i_mode) && !oi->ip_clusters)
inode->i_blocks = 0;
else
- inode->i_blocks =
- ocfs2_align_bytes_to_sectors(i_size_read(inode));
+ inode->i_blocks = ocfs2_inode_sector_count(inode);
inode->i_uid = be32_to_cpu(lvb->lvb_iuid);
inode->i_gid = be32_to_cpu(lvb->lvb_igid);
}
/* may or may not return a bh if it went to disk. */
-static int ocfs2_meta_lock_update(struct inode *inode,
+static int ocfs2_inode_lock_update(struct inode *inode,
struct buffer_head **bh)
{
int status = 0;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
- struct ocfs2_lock_res *lockres;
+ struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
struct ocfs2_dinode *fe;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
mlog_entry_void();
+ if (ocfs2_mount_local(osb))
+ goto bail;
+
spin_lock(&oi->ip_lock);
if (oi->ip_flags & OCFS2_INODE_DELETED) {
mlog(0, "Orphaned inode %llu was deleted while we "
}
spin_unlock(&oi->ip_lock);
- lockres = &oi->ip_meta_lockres;
-
if (!ocfs2_should_refresh_lock_res(lockres))
goto bail;
* for the inode metadata. */
ocfs2_metadata_cache_purge(inode);
- /* will do nothing for inode types that don't use the extent
- * map (directories, bitmap files, etc) */
ocfs2_extent_map_trunc(inode, 0);
if (ocfs2_meta_lvb_is_trustable(inode, lockres)) {
* returns < 0 error if the callback will never be called, otherwise
* the result of the lock will be communicated via the callback.
*/
-int ocfs2_meta_lock_full(struct inode *inode,
- struct ocfs2_journal_handle *handle,
+int ocfs2_inode_lock_full(struct inode *inode,
struct buffer_head **ret_bh,
int ex,
int arg_flags)
{
int status, level, dlm_flags, acquired;
- struct ocfs2_lock_res *lockres;
+ struct ocfs2_lock_res *lockres = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct buffer_head *local_bh = NULL;
goto bail;
}
+ if (ocfs2_mount_local(osb))
+ goto local;
+
if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
wait_event(osb->recovery_event,
ocfs2_node_map_is_empty(osb, &osb->recovery_map));
- acquired = 0;
- lockres = &OCFS2_I(inode)->ip_meta_lockres;
+ lockres = &OCFS2_I(inode)->ip_inode_lockres;
level = ex ? LKM_EXMODE : LKM_PRMODE;
dlm_flags = 0;
if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
wait_event(osb->recovery_event,
ocfs2_node_map_is_empty(osb, &osb->recovery_map));
+local:
/*
* We only see this flag if we're being called from
* ocfs2_read_locked_inode(). It means we're locking an inode
*/
if (inode->i_state & I_NEW) {
status = 0;
- ocfs2_complete_lock_res_refresh(lockres, 0);
+ if (lockres)
+ ocfs2_complete_lock_res_refresh(lockres, 0);
goto bail;
}
/* This is fun. The caller may want a bh back, or it may
- * not. ocfs2_meta_lock_update definitely wants one in, but
+ * not. ocfs2_inode_lock_update definitely wants one in, but
* may or may not read one, depending on what's in the
* LVB. The result of all of this is that we've *only* gone to
* disk if we have to, so the complexity is worthwhile. */
- status = ocfs2_meta_lock_update(inode, &local_bh);
+ status = ocfs2_inode_lock_update(inode, &local_bh);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
}
}
- if (handle) {
- status = ocfs2_handle_add_lock(handle, inode);
- if (status < 0)
- mlog_errno(status);
- }
-
bail:
if (status < 0) {
if (ret_bh && (*ret_bh)) {
*ret_bh = NULL;
}
if (acquired)
- ocfs2_meta_unlock(inode, ex);
+ ocfs2_inode_unlock(inode, ex);
}
if (local_bh)
}
/*
- * This is working around a lock inversion between tasks acquiring DLM locks
- * while holding a page lock and the vote thread which blocks dlm lock acquiry
- * while acquiring page locks.
+ * This is working around a lock inversion between tasks acquiring DLM
+ * locks while holding a page lock and the downconvert thread which
+ * blocks dlm lock acquiry while acquiring page locks.
*
* ** These _with_page variantes are only intended to be called from aop
* methods that hold page locks and return a very specific *positive* error
* code that aop methods pass up to the VFS -- test for errors with != 0. **
*
- * The DLM is called such that it returns -EAGAIN if it would have blocked
- * waiting for the vote thread. In that case we unlock our page so the vote
- * thread can make progress. Once we've done this we have to return
- * AOP_TRUNCATED_PAGE so the aop method that called us can bubble that back up
- * into the VFS who will then immediately retry the aop call.
+ * The DLM is called such that it returns -EAGAIN if it would have
+ * blocked waiting for the downconvert thread. In that case we unlock
+ * our page so the downconvert thread can make progress. Once we've
+ * done this we have to return AOP_TRUNCATED_PAGE so the aop method
+ * that called us can bubble that back up into the VFS who will then
+ * immediately retry the aop call.
*
* We do a blocking lock and immediate unlock before returning, though, so that
* the lock has a great chance of being cached on this node by the time the VFS
* ping locks back and forth, but that's a risk we're willing to take to avoid
* the lock inversion simply.
*/
-int ocfs2_meta_lock_with_page(struct inode *inode,
- struct ocfs2_journal_handle *handle,
+int ocfs2_inode_lock_with_page(struct inode *inode,
struct buffer_head **ret_bh,
int ex,
struct page *page)
{
int ret;
- ret = ocfs2_meta_lock_full(inode, handle, ret_bh, ex,
- OCFS2_LOCK_NONBLOCK);
+ ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
if (ret == -EAGAIN) {
unlock_page(page);
- if (ocfs2_meta_lock(inode, handle, ret_bh, ex) == 0)
- ocfs2_meta_unlock(inode, ex);
+ if (ocfs2_inode_lock(inode, ret_bh, ex) == 0)
+ ocfs2_inode_unlock(inode, ex);
ret = AOP_TRUNCATED_PAGE;
}
return ret;
}
-void ocfs2_meta_unlock(struct inode *inode,
+int ocfs2_inode_lock_atime(struct inode *inode,
+ struct vfsmount *vfsmnt,
+ int *level)
+{
+ int ret;
+
+ mlog_entry_void();
+ ret = ocfs2_inode_lock(inode, NULL, 0);
+ if (ret < 0) {
+ mlog_errno(ret);
+ return ret;
+ }
+
+ /*
+ * If we should update atime, we will get EX lock,
+ * otherwise we just get PR lock.
+ */
+ if (ocfs2_should_update_atime(inode, vfsmnt)) {
+ struct buffer_head *bh = NULL;
+
+ ocfs2_inode_unlock(inode, 0);
+ ret = ocfs2_inode_lock(inode, &bh, 1);
+ if (ret < 0) {
+ mlog_errno(ret);
+ return ret;
+ }
+ *level = 1;
+ if (ocfs2_should_update_atime(inode, vfsmnt))
+ ocfs2_update_inode_atime(inode, bh);
+ if (bh)
+ brelse(bh);
+ } else
+ *level = 0;
+
+ mlog_exit(ret);
+ return ret;
+}
+
+void ocfs2_inode_unlock(struct inode *inode,
int ex)
{
int level = ex ? LKM_EXMODE : LKM_PRMODE;
- struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_meta_lockres;
+ struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
mlog_entry_void();
(unsigned long long)OCFS2_I(inode)->ip_blkno,
ex ? "EXMODE" : "PRMODE");
- if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)))
+ if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) &&
+ !ocfs2_mount_local(osb))
ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
mlog_exit_void();
int ocfs2_super_lock(struct ocfs2_super *osb,
int ex)
{
- int status;
+ int status = 0;
int level = ex ? LKM_EXMODE : LKM_PRMODE;
struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
struct buffer_head *bh;
if (ocfs2_is_hard_readonly(osb))
return -EROFS;
+ if (ocfs2_mount_local(osb))
+ goto bail;
+
status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
if (status < 0) {
mlog_errno(status);
int level = ex ? LKM_EXMODE : LKM_PRMODE;
struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
- ocfs2_cluster_unlock(osb, lockres, level);
+ if (!ocfs2_mount_local(osb))
+ ocfs2_cluster_unlock(osb, lockres, level);
}
int ocfs2_rename_lock(struct ocfs2_super *osb)
if (ocfs2_is_hard_readonly(osb))
return -EROFS;
+ if (ocfs2_mount_local(osb))
+ return 0;
+
status = ocfs2_cluster_lock(osb, lockres, LKM_EXMODE, 0, 0);
if (status < 0)
mlog_errno(status);
{
struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
- ocfs2_cluster_unlock(osb, lockres, LKM_EXMODE);
+ if (!ocfs2_mount_local(osb))
+ ocfs2_cluster_unlock(osb, lockres, LKM_EXMODE);
}
int ocfs2_dentry_lock(struct dentry *dentry, int ex)
if (ocfs2_is_hard_readonly(osb))
return -EROFS;
+ if (ocfs2_mount_local(osb))
+ return 0;
+
ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0);
if (ret < 0)
mlog_errno(ret);
struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
- ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
+ if (!ocfs2_mount_local(osb))
+ ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
}
/* Reference counting of the dlm debug structure. We want this because
return 0;
}
-static struct seq_operations ocfs2_dlm_seq_ops = {
+static const struct seq_operations ocfs2_dlm_seq_ops = {
.start = ocfs2_dlm_seq_start,
.stop = ocfs2_dlm_seq_stop,
.next = ocfs2_dlm_seq_next,
mlog_errno(ret);
goto out;
}
- osb = (struct ocfs2_super *) inode->u.generic_ip;
+ osb = inode->i_private;
ocfs2_get_dlm_debug(osb->osb_dlm_debug);
priv->p_dlm_debug = osb->osb_dlm_debug;
INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
int ocfs2_dlm_init(struct ocfs2_super *osb)
{
- int status;
+ int status = 0;
u32 dlm_key;
- struct dlm_ctxt *dlm;
+ struct dlm_ctxt *dlm = NULL;
mlog_entry_void();
+ if (ocfs2_mount_local(osb))
+ goto local;
+
status = ocfs2_dlm_init_debug(osb);
if (status < 0) {
mlog_errno(status);
goto bail;
}
- /* launch vote thread */
- osb->vote_task = kthread_run(ocfs2_vote_thread, osb, "ocfs2vote");
- if (IS_ERR(osb->vote_task)) {
- status = PTR_ERR(osb->vote_task);
- osb->vote_task = NULL;
+ /* launch downconvert thread */
+ osb->dc_task = kthread_run(ocfs2_downconvert_thread, osb, "ocfs2dc");
+ if (IS_ERR(osb->dc_task)) {
+ status = PTR_ERR(osb->dc_task);
+ osb->dc_task = NULL;
mlog_errno(status);
goto bail;
}
dlm_key = crc32_le(0, osb->uuid_str, strlen(osb->uuid_str));
/* for now, uuid == domain */
- dlm = dlm_register_domain(osb->uuid_str, dlm_key);
+ dlm = dlm_register_domain(osb->uuid_str, dlm_key,
+ &osb->osb_locking_proto);
if (IS_ERR(dlm)) {
status = PTR_ERR(dlm);
mlog_errno(status);
goto bail;
}
+ dlm_register_eviction_cb(dlm, &osb->osb_eviction_cb);
+
+local:
ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
- dlm_register_eviction_cb(dlm, &osb->osb_eviction_cb);
-
osb->dlm = dlm;
status = 0;
bail:
if (status < 0) {
ocfs2_dlm_shutdown_debug(osb);
- if (osb->vote_task)
- kthread_stop(osb->vote_task);
+ if (osb->dc_task)
+ kthread_stop(osb->dc_task);
}
mlog_exit(status);
ocfs2_drop_osb_locks(osb);
- if (osb->vote_task) {
- kthread_stop(osb->vote_task);
- osb->vote_task = NULL;
+ if (osb->dc_task) {
+ kthread_stop(osb->dc_task);
+ osb->dc_task = NULL;
}
ocfs2_lock_res_free(&osb->osb_super_lockres);
mlog_exit_void();
}
-typedef void (ocfs2_pre_drop_cb_t)(struct ocfs2_lock_res *, void *);
-
-struct drop_lock_cb {
- ocfs2_pre_drop_cb_t *drop_func;
- void *drop_data;
-};
-
static int ocfs2_drop_lock(struct ocfs2_super *osb,
- struct ocfs2_lock_res *lockres,
- struct drop_lock_cb *dcb)
+ struct ocfs2_lock_res *lockres)
{
enum dlm_status status;
unsigned long flags;
+ int lkm_flags = 0;
/* We didn't get anywhere near actually using this lockres. */
if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
goto out;
+ if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
+ lkm_flags |= LKM_VALBLK;
+
spin_lock_irqsave(&lockres->l_lock, flags);
mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING),
spin_lock_irqsave(&lockres->l_lock, flags);
}
- if (dcb)
- dcb->drop_func(lockres, dcb->drop_data);
+ if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
+ if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
+ lockres->l_level == LKM_EXMODE &&
+ !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
+ lockres->l_ops->set_lvb(lockres);
+ }
if (lockres->l_flags & OCFS2_LOCK_BUSY)
mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
mlog(0, "lock %s\n", lockres->l_name);
- status = dlmunlock(osb->dlm, &lockres->l_lksb, LKM_VALBLK,
+ status = dlmunlock(osb->dlm, &lockres->l_lksb, lkm_flags,
ocfs2_unlock_ast, lockres);
if (status != DLM_NORMAL) {
ocfs2_log_dlm_error("dlmunlock", status, lockres);
/* Mark the lockres as being dropped. It will no longer be
* queued if blocking, but we still may have to wait on it
- * being dequeued from the vote thread before we can consider
+ * being dequeued from the downconvert thread before we can consider
* it safe to drop.
*
* You can *not* attempt to call cluster_lock on this lockres anymore. */
int ret;
ocfs2_mark_lockres_freeing(lockres);
- ret = ocfs2_drop_lock(osb, lockres, NULL);
+ ret = ocfs2_drop_lock(osb, lockres);
if (ret)
mlog_errno(ret);
}
ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres);
}
-static void ocfs2_meta_pre_drop(struct ocfs2_lock_res *lockres, void *data)
-{
- struct inode *inode = data;
-
- /* the metadata lock requires a bit more work as we have an
- * LVB to worry about. */
- if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
- lockres->l_level == LKM_EXMODE &&
- !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
- __ocfs2_stuff_meta_lvb(inode);
-}
-
int ocfs2_drop_inode_locks(struct inode *inode)
{
int status, err;
- struct drop_lock_cb meta_dcb = { ocfs2_meta_pre_drop, inode, };
mlog_entry_void();
* ocfs2_clear_inode has done it for us. */
err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
- &OCFS2_I(inode)->ip_data_lockres,
- NULL);
+ &OCFS2_I(inode)->ip_open_lockres);
if (err < 0)
mlog_errno(err);
status = err;
err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
- &OCFS2_I(inode)->ip_meta_lockres,
- &meta_dcb);
+ &OCFS2_I(inode)->ip_inode_lockres);
if (err < 0)
mlog_errno(err);
if (err < 0 && !status)
status = err;
err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
- &OCFS2_I(inode)->ip_rw_lockres,
- NULL);
+ &OCFS2_I(inode)->ip_rw_lockres);
if (err < 0)
mlog_errno(err);
if (err < 0 && !status)
return ret;
}
-static inline int ocfs2_can_downconvert_meta_lock(struct inode *inode,
- struct ocfs2_lock_res *lockres,
- int new_level)
-{
- int ret;
-
- mlog_entry_void();
-
- BUG_ON(new_level != LKM_NLMODE && new_level != LKM_PRMODE);
-
- if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
- ret = 0;
- mlog(0, "lockres %s currently being refreshed -- backing "
- "off!\n", lockres->l_name);
- } else if (new_level == LKM_PRMODE)
- ret = !lockres->l_ex_holders &&
- ocfs2_inode_fully_checkpointed(inode);
- else /* Must be NLMODE we're converting to. */
- ret = !lockres->l_ro_holders && !lockres->l_ex_holders &&
- ocfs2_inode_fully_checkpointed(inode);
-
- mlog_exit(ret);
- return ret;
-}
-
-static int ocfs2_do_unblock_meta(struct inode *inode,
- int *requeue)
-{
- int new_level;
- int set_lvb = 0;
- int ret = 0;
- struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_meta_lockres;
- unsigned long flags;
-
- struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
-
- mlog_entry_void();
-
- spin_lock_irqsave(&lockres->l_lock, flags);
-
- BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
-
- mlog(0, "l_level=%d, l_blocking=%d\n", lockres->l_level,
- lockres->l_blocking);
-
- BUG_ON(lockres->l_level != LKM_EXMODE &&
- lockres->l_level != LKM_PRMODE);
-
- if (lockres->l_flags & OCFS2_LOCK_BUSY) {
- *requeue = 1;
- ret = ocfs2_prepare_cancel_convert(osb, lockres);
- spin_unlock_irqrestore(&lockres->l_lock, flags);
- if (ret) {
- ret = ocfs2_cancel_convert(osb, lockres);
- if (ret < 0)
- mlog_errno(ret);
- }
- goto leave;
- }
-
- new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
-
- mlog(0, "l_level=%d, l_blocking=%d, new_level=%d\n",
- lockres->l_level, lockres->l_blocking, new_level);
-
- if (ocfs2_can_downconvert_meta_lock(inode, lockres, new_level)) {
- if (lockres->l_level == LKM_EXMODE)
- set_lvb = 1;
-
- /* If the lock hasn't been refreshed yet (rare), then
- * our memory inode values are old and we skip
- * stuffing the lvb. There's no need to actually clear
- * out the lvb here as it's value is still valid. */
- if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
- if (set_lvb)
- __ocfs2_stuff_meta_lvb(inode);
- } else
- mlog(0, "lockres %s: downconverting stale lock!\n",
- lockres->l_name);
-
- mlog(0, "calling ocfs2_downconvert_lock with l_level=%d, "
- "l_blocking=%d, new_level=%d\n",
- lockres->l_level, lockres->l_blocking, new_level);
-
- ocfs2_prepare_downconvert(lockres, new_level);
- spin_unlock_irqrestore(&lockres->l_lock, flags);
- ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb);
- goto leave;
- }
- if (!ocfs2_inode_fully_checkpointed(inode))
- ocfs2_start_checkpoint(osb);
-
- *requeue = 1;
- spin_unlock_irqrestore(&lockres->l_lock, flags);
- ret = 0;
-leave:
- mlog_exit(ret);
- return ret;
-}
-
-static int ocfs2_generic_unblock_lock(struct ocfs2_super *osb,
- struct ocfs2_lock_res *lockres,
- struct ocfs2_unblock_ctl *ctl,
- ocfs2_convert_worker_t *worker)
+static int ocfs2_unblock_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ struct ocfs2_unblock_ctl *ctl)
{
unsigned long flags;
int blocking;
int new_level;
int ret = 0;
+ int set_lvb = 0;
mlog_entry_void();
/* if we're blocking an exclusive and we have *any* holders,
* then requeue. */
if ((lockres->l_blocking == LKM_EXMODE)
- && (lockres->l_ex_holders || lockres->l_ro_holders)) {
- spin_unlock_irqrestore(&lockres->l_lock, flags);
- ctl->requeue = 1;
- ret = 0;
- goto leave;
- }
+ && (lockres->l_ex_holders || lockres->l_ro_holders))
+ goto leave_requeue;
/* If it's a PR we're blocking, then only
* requeue if we've got any EX holders */
if (lockres->l_blocking == LKM_PRMODE &&
- lockres->l_ex_holders) {
- spin_unlock_irqrestore(&lockres->l_lock, flags);
- ctl->requeue = 1;
- ret = 0;
- goto leave;
- }
+ lockres->l_ex_holders)
+ goto leave_requeue;
+
+ /*
+ * Can we get a lock in this state if the holder counts are
+ * zero? The meta data unblock code used to check this.
+ */
+ if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
+ && (lockres->l_flags & OCFS2_LOCK_REFRESHING))
+ goto leave_requeue;
+
+ new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
+
+ if (lockres->l_ops->check_downconvert
+ && !lockres->l_ops->check_downconvert(lockres, new_level))
+ goto leave_requeue;
/* If we get here, then we know that there are no more
* incompatible holders (and anyone asking for an incompatible
* lock is blocked). We can now downconvert the lock */
- if (!worker)
+ if (!lockres->l_ops->downconvert_worker)
goto downconvert;
/* Some lockres types want to do a bit of work before
blocking = lockres->l_blocking;
spin_unlock_irqrestore(&lockres->l_lock, flags);
- ctl->unblock_action = worker(lockres, blocking);
+ ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
if (ctl->unblock_action == UNBLOCK_STOP_POST)
goto leave;
downconvert:
ctl->requeue = 0;
- new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
+
+ if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
+ if (lockres->l_level == LKM_EXMODE)
+ set_lvb = 1;
+
+ /*
+ * We only set the lvb if the lock has been fully
+ * refreshed - otherwise we risk setting stale
+ * data. Otherwise, there's no need to actually clear
+ * out the lvb here as it's value is still valid.
+ */
+ if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
+ lockres->l_ops->set_lvb(lockres);
+ }
ocfs2_prepare_downconvert(lockres, new_level);
spin_unlock_irqrestore(&lockres->l_lock, flags);
- ret = ocfs2_downconvert_lock(osb, lockres, new_level, 0);
+ ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb);
leave:
mlog_exit(ret);
return ret;
+
+leave_requeue:
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ ctl->requeue = 1;
+
+ mlog_exit(0);
+ return 0;
}
static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
inode = ocfs2_lock_res_inode(lockres);
mapping = inode->i_mapping;
+ if (!S_ISREG(inode->i_mode))
+ goto out;
+
+ /*
+ * We need this before the filemap_fdatawrite() so that it can
+ * transfer the dirty bit from the PTE to the
+ * page. Unfortunately this means that even for EX->PR
+ * downconverts, we'll lose our mappings and have to build
+ * them up again.
+ */
+ unmap_mapping_range(mapping, 0, 0, 0);
+
if (filemap_fdatawrite(mapping)) {
mlog(ML_ERROR, "Could not sync inode %llu for downconvert!",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
sync_mapping_buffers(mapping);
if (blocking == LKM_EXMODE) {
truncate_inode_pages(mapping, 0);
- unmap_mapping_range(mapping, 0, 0, 0);
} else {
/* We only need to wait on the I/O if we're not also
* truncating pages because truncate_inode_pages waits
filemap_fdatawait(mapping);
}
+out:
return UNBLOCK_CONTINUE;
}
-int ocfs2_unblock_data(struct ocfs2_lock_res *lockres,
- struct ocfs2_unblock_ctl *ctl)
-{
- int status;
- struct inode *inode;
- struct ocfs2_super *osb;
-
- mlog_entry_void();
-
- inode = ocfs2_lock_res_inode(lockres);
- osb = OCFS2_SB(inode->i_sb);
-
- mlog(0, "unblock inode %llu\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno);
-
- status = ocfs2_generic_unblock_lock(osb, lockres, ctl,
- ocfs2_data_convert_worker);
- if (status < 0)
- mlog_errno(status);
-
- mlog(0, "inode %llu, requeue = %d\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno, ctl->requeue);
-
- mlog_exit(status);
- return status;
-}
-
-static int ocfs2_unblock_inode_lock(struct ocfs2_lock_res *lockres,
- struct ocfs2_unblock_ctl *ctl)
+static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
+ int new_level)
{
- int status;
- struct inode *inode;
-
- mlog_entry_void();
-
- mlog(0, "Unblock lockres %s\n", lockres->l_name);
+ struct inode *inode = ocfs2_lock_res_inode(lockres);
+ int checkpointed = ocfs2_inode_fully_checkpointed(inode);
- inode = ocfs2_lock_res_inode(lockres);
+ BUG_ON(new_level != LKM_NLMODE && new_level != LKM_PRMODE);
+ BUG_ON(lockres->l_level != LKM_EXMODE && !checkpointed);
- status = ocfs2_generic_unblock_lock(OCFS2_SB(inode->i_sb),
- lockres, ctl, NULL);
- if (status < 0)
- mlog_errno(status);
+ if (checkpointed)
+ return 1;
- mlog_exit(status);
- return status;
+ ocfs2_start_checkpoint(OCFS2_SB(inode->i_sb));
+ return 0;
}
-static int ocfs2_unblock_meta(struct ocfs2_lock_res *lockres,
- struct ocfs2_unblock_ctl *ctl)
+static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres)
{
- int status;
- struct inode *inode;
-
- mlog_entry_void();
-
- inode = ocfs2_lock_res_inode(lockres);
-
- mlog(0, "unblock inode %llu\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno);
-
- status = ocfs2_do_unblock_meta(inode, &ctl->requeue);
- if (status < 0)
- mlog_errno(status);
-
- mlog(0, "inode %llu, requeue = %d\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno, ctl->requeue);
+ struct inode *inode = ocfs2_lock_res_inode(lockres);
- mlog_exit(status);
- return status;
+ __ocfs2_stuff_meta_lvb(inode);
}
/*
* Does the final reference drop on our dentry lock. Right now this
- * happens in the vote thread, but we could choose to simplify the
+ * happens in the downconvert thread, but we could choose to simplify the
* dlmglue API and push these off to the ocfs2_wq in the future.
*/
static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
return UNBLOCK_CONTINUE_POST;
}
-static int ocfs2_unblock_dentry_lock(struct ocfs2_lock_res *lockres,
- struct ocfs2_unblock_ctl *ctl)
-{
- int ret;
- struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
- struct ocfs2_super *osb = OCFS2_SB(dl->dl_inode->i_sb);
-
- mlog(0, "unblock dentry lock: %llu\n",
- (unsigned long long)OCFS2_I(dl->dl_inode)->ip_blkno);
-
- ret = ocfs2_generic_unblock_lock(osb,
- lockres,
- ctl,
- ocfs2_dentry_convert_worker);
- if (ret < 0)
- mlog_errno(ret);
-
- mlog(0, "requeue = %d, post = %d\n", ctl->requeue, ctl->unblock_action);
-
- return ret;
-}
-
-/* Generic unblock function for any lockres whose private data is an
- * ocfs2_super pointer. */
-static int ocfs2_unblock_osb_lock(struct ocfs2_lock_res *lockres,
- struct ocfs2_unblock_ctl *ctl)
-{
- int status;
- struct ocfs2_super *osb;
-
- mlog_entry_void();
-
- mlog(0, "Unblock lockres %s\n", lockres->l_name);
-
- osb = ocfs2_get_lockres_osb(lockres);
-
- status = ocfs2_generic_unblock_lock(osb,
- lockres,
- ctl,
- NULL);
- if (status < 0)
- mlog_errno(status);
-
- mlog_exit(status);
- return status;
-}
-
-void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
- struct ocfs2_lock_res *lockres)
+static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres)
{
int status;
struct ocfs2_unblock_ctl ctl = {0, 0,};
BUG_ON(!lockres);
BUG_ON(!lockres->l_ops);
- BUG_ON(!lockres->l_ops->unblock);
mlog(0, "lockres %s blocked.\n", lockres->l_name);
/* Detect whether a lock has been marked as going away while
- * the vote thread was processing other things. A lock can
+ * the downconvert thread was processing other things. A lock can
* still be marked with OCFS2_LOCK_FREEING after this check,
* but short circuiting here will still save us some
* performance. */
goto unqueue;
spin_unlock_irqrestore(&lockres->l_lock, flags);
- status = lockres->l_ops->unblock(lockres, &ctl);
+ status = ocfs2_unblock_lock(osb, lockres, &ctl);
if (status < 0)
mlog_errno(status);
lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
- spin_lock(&osb->vote_task_lock);
+ spin_lock(&osb->dc_task_lock);
if (list_empty(&lockres->l_blocked_list)) {
list_add_tail(&lockres->l_blocked_list,
&osb->blocked_lock_list);
osb->blocked_lock_count++;
}
- spin_unlock(&osb->vote_task_lock);
+ spin_unlock(&osb->dc_task_lock);
mlog_exit_void();
}
-/* This aids in debugging situations where a bad LVB might be involved. */
-void ocfs2_dump_meta_lvb_info(u64 level,
- const char *function,
- unsigned int line,
- struct ocfs2_lock_res *lockres)
+static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
{
- struct ocfs2_meta_lvb *lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
+ unsigned long processed;
+ struct ocfs2_lock_res *lockres;
- mlog(level, "LVB information for %s (called from %s:%u):\n",
- lockres->l_name, function, line);
- mlog(level, "version: %u, clusters: %u, generation: 0x%x\n",
- lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters),
- be32_to_cpu(lvb->lvb_igeneration));
- mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n",
- (unsigned long long)be64_to_cpu(lvb->lvb_isize),
- be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid),
- be16_to_cpu(lvb->lvb_imode));
- mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
- "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink),
- (long long)be64_to_cpu(lvb->lvb_iatime_packed),
- (long long)be64_to_cpu(lvb->lvb_ictime_packed),
- (long long)be64_to_cpu(lvb->lvb_imtime_packed),
- be32_to_cpu(lvb->lvb_iattr));
+ mlog_entry_void();
+
+ spin_lock(&osb->dc_task_lock);
+ /* grab this early so we know to try again if a state change and
+ * wake happens part-way through our work */
+ osb->dc_work_sequence = osb->dc_wake_sequence;
+
+ processed = osb->blocked_lock_count;
+ while (processed) {
+ BUG_ON(list_empty(&osb->blocked_lock_list));
+
+ lockres = list_entry(osb->blocked_lock_list.next,
+ struct ocfs2_lock_res, l_blocked_list);
+ list_del_init(&lockres->l_blocked_list);
+ osb->blocked_lock_count--;
+ spin_unlock(&osb->dc_task_lock);
+
+ BUG_ON(!processed);
+ processed--;
+
+ ocfs2_process_blocked_lock(osb, lockres);
+
+ spin_lock(&osb->dc_task_lock);
+ }
+ spin_unlock(&osb->dc_task_lock);
+
+ mlog_exit_void();
+}
+
+static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
+{
+ int empty = 0;
+
+ spin_lock(&osb->dc_task_lock);
+ if (list_empty(&osb->blocked_lock_list))
+ empty = 1;
+
+ spin_unlock(&osb->dc_task_lock);
+ return empty;
+}
+
+static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
+{
+ int should_wake = 0;
+
+ spin_lock(&osb->dc_task_lock);
+ if (osb->dc_work_sequence != osb->dc_wake_sequence)
+ should_wake = 1;
+ spin_unlock(&osb->dc_task_lock);
+
+ return should_wake;
+}
+
+static int ocfs2_downconvert_thread(void *arg)
+{
+ int status = 0;
+ struct ocfs2_super *osb = arg;
+
+ /* only quit once we've been asked to stop and there is no more
+ * work available */
+ while (!(kthread_should_stop() &&
+ ocfs2_downconvert_thread_lists_empty(osb))) {
+
+ wait_event_interruptible(osb->dc_event,
+ ocfs2_downconvert_thread_should_wake(osb) ||
+ kthread_should_stop());
+
+ mlog(0, "downconvert_thread: awoken\n");
+
+ ocfs2_downconvert_thread_do_work(osb);
+ }
+
+ osb->dc_task = NULL;
+ return status;
+}
+
+void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
+{
+ spin_lock(&osb->dc_task_lock);
+ /* make sure the voting thread gets a swipe at whatever changes
+ * the caller may have made to the voting state */
+ osb->dc_wake_sequence++;
+ spin_unlock(&osb->dc_task_lock);
+ wake_up(&osb->dc_event);
}