#include <linux/syscalls.h>
#include <linux/time.h>
#include <linux/rcupdate.h>
+#include <linux/pid_namespace.h>
-#include <asm/semaphore.h>
#include <asm/uaccess.h>
#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
fl->fl_fasync = NULL;
fl->fl_owner = NULL;
fl->fl_pid = 0;
+ fl->fl_nspid = NULL;
fl->fl_file = NULL;
fl->fl_flags = 0;
fl->fl_type = 0;
* Initialises the fields of the file lock which are invariant for
* free file_locks.
*/
-static void init_once(void *foo, struct kmem_cache *cache, unsigned long flags)
+static void init_once(struct kmem_cache *cache, void *foo)
{
struct file_lock *lock = (struct file_lock *) foo;
/*
* Initialize a new lock from an existing file_lock structure.
*/
-static void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
+void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
{
new->fl_owner = fl->fl_owner;
new->fl_pid = fl->fl_pid;
new->fl_ops = NULL;
new->fl_lmops = NULL;
}
+EXPORT_SYMBOL(__locks_copy_lock);
void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
{
{
list_add(&fl->fl_link, &file_lock_list);
+ fl->fl_nspid = get_pid(task_tgid(current));
+
/* insert into file's list */
fl->fl_next = *pos;
*pos = fl;
if (fl->fl_ops && fl->fl_ops->fl_remove)
fl->fl_ops->fl_remove(fl);
+ if (fl->fl_nspid) {
+ put_pid(fl->fl_nspid);
+ fl->fl_nspid = NULL;
+ }
+
locks_wake_up_blocks(fl);
locks_free_lock(fl);
}
return (locks_conflict(caller_fl, sys_fl));
}
-static int interruptible_sleep_on_locked(wait_queue_head_t *fl_wait, int timeout)
-{
- int result = 0;
- DECLARE_WAITQUEUE(wait, current);
-
- __set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(fl_wait, &wait);
- if (timeout == 0)
- schedule();
- else
- result = schedule_timeout(timeout);
- if (signal_pending(current))
- result = -ERESTARTSYS;
- remove_wait_queue(fl_wait, &wait);
- __set_current_state(TASK_RUNNING);
- return result;
-}
-
-static int locks_block_on_timeout(struct file_lock *blocker, struct file_lock *waiter, int time)
-{
- int result;
- locks_insert_block(blocker, waiter);
- result = interruptible_sleep_on_locked(&waiter->fl_wait, time);
- __locks_delete_block(waiter);
- return result;
-}
-
void
posix_test_lock(struct file *filp, struct file_lock *fl)
{
if (posix_locks_conflict(fl, cfl))
break;
}
- if (cfl)
+ if (cfl) {
__locks_copy_lock(fl, cfl);
- else
+ if (cfl->fl_nspid)
+ fl->fl_pid = pid_vnr(cfl->fl_nspid);
+ } else
fl->fl_type = F_UNLCK;
unlock_kernel();
return;
}
-
EXPORT_SYMBOL(posix_test_lock);
-/* This function tests for deadlock condition before putting a process to
- * sleep. The detection scheme is no longer recursive. Recursive was neat,
- * but dangerous - we risked stack corruption if the lock data was bad, or
- * if the recursion was too deep for any other reason.
+/*
+ * Deadlock detection:
+ *
+ * We attempt to detect deadlocks that are due purely to posix file
+ * locks.
*
- * We rely on the fact that a task can only be on one lock's wait queue
- * at a time. When we find blocked_task on a wait queue we can re-search
- * with blocked_task equal to that queue's owner, until either blocked_task
- * isn't found, or blocked_task is found on a queue owned by my_task.
+ * We assume that a task can be waiting for at most one lock at a time.
+ * So for any acquired lock, the process holding that lock may be
+ * waiting on at most one other lock. That lock in turns may be held by
+ * someone waiting for at most one other lock. Given a requested lock
+ * caller_fl which is about to wait for a conflicting lock block_fl, we
+ * follow this chain of waiters to ensure we are not about to create a
+ * cycle.
*
- * Note: the above assumption may not be true when handling lock requests
- * from a broken NFS client. But broken NFS clients have a lot more to
- * worry about than proper deadlock detection anyway... --okir
+ * Since we do this before we ever put a process to sleep on a lock, we
+ * are ensured that there is never a cycle; that is what guarantees that
+ * the while() loop in posix_locks_deadlock() eventually completes.
+ *
+ * Note: the above assumption may not be true when handling lock
+ * requests from a broken NFS client. It may also fail in the presence
+ * of tasks (such as posix threads) sharing the same open file table.
+ *
+ * To handle those cases, we just bail out after a few iterations.
*/
-static int posix_locks_deadlock(struct file_lock *caller_fl,
- struct file_lock *block_fl)
+
+#define MAX_DEADLK_ITERATIONS 10
+
+/* Find a lock that the owner of the given block_fl is blocking on. */
+static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
{
struct file_lock *fl;
-next_task:
- if (posix_same_owner(caller_fl, block_fl))
- return 1;
list_for_each_entry(fl, &blocked_list, fl_link) {
- if (posix_same_owner(fl, block_fl)) {
- fl = fl->fl_next;
- block_fl = fl;
- goto next_task;
- }
+ if (posix_same_owner(fl, block_fl))
+ return fl->fl_next;
+ }
+ return NULL;
+}
+
+static int posix_locks_deadlock(struct file_lock *caller_fl,
+ struct file_lock *block_fl)
+{
+ int i = 0;
+
+ while ((block_fl = what_owner_is_waiting_for(block_fl))) {
+ if (i++ > MAX_DEADLK_ITERATIONS)
+ return 0;
+ if (posix_same_owner(caller_fl, block_fl))
+ return 1;
}
return 0;
}
if (!posix_locks_conflict(request, fl))
continue;
if (conflock)
- locks_copy_lock(conflock, fl);
+ __locks_copy_lock(conflock, fl);
error = -EAGAIN;
if (!(request->fl_flags & FL_SLEEP))
goto out;
if (break_time == 0)
break_time++;
}
- error = locks_block_on_timeout(flock, new_fl, break_time);
+ locks_insert_block(flock, new_fl);
+ error = wait_event_interruptible_timeout(new_fl->fl_wait,
+ !new_fl->fl_next, break_time);
+ __locks_delete_block(new_fl);
if (error >= 0) {
if (error == 0)
time_out_leases(inode);
EXPORT_SYMBOL(__break_lease);
/**
- * lease_get_mtime
+ * lease_get_mtime - get the last modified time of an inode
* @inode: the inode
* @time: pointer to a timespec which will contain the last modified time
*
* This is to force NFS clients to flush their caches for files with
* exclusive leases. The justification is that if someone has an
- * exclusive lease, then they could be modifiying it.
+ * exclusive lease, then they could be modifying it.
*/
void lease_get_mtime(struct inode *inode, struct timespec *time)
{
lease = *flp;
- error = -EAGAIN;
- if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
- goto out;
- if ((arg == F_WRLCK)
- && ((atomic_read(&dentry->d_count) > 1)
- || (atomic_read(&inode->i_count) > 1)))
- goto out;
+ if (arg != F_UNLCK) {
+ error = -ENOMEM;
+ new_fl = locks_alloc_lock();
+ if (new_fl == NULL)
+ goto out;
- error = -ENOMEM;
- new_fl = locks_alloc_lock();
- if (new_fl == NULL)
- goto out;
+ error = -EAGAIN;
+ if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
+ goto out;
+ if ((arg == F_WRLCK)
+ && ((atomic_read(&dentry->d_count) > 1)
+ || (atomic_read(&inode->i_count) > 1)))
+ goto out;
+ }
/*
* At this point, we know that if there is an exclusive
rdlease_count++;
}
+ error = -EAGAIN;
if ((arg == F_RDLCK && (wrlease_count > 0)) ||
(arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0)))
goto out;
int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
{
struct file_lock fl, *flp = &fl;
- struct dentry *dentry = filp->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = filp->f_path.dentry->d_inode;
int error;
locks_init_lock(&fl);
if (error)
goto out;
- for (;;) {
- error = vfs_lock_file(filp, cmd, file_lock, NULL);
- if (error != -EAGAIN || cmd == F_SETLK)
- break;
- error = wait_event_interruptible(file_lock->fl_wait,
- !file_lock->fl_next);
- if (!error)
- continue;
+ if (filp->f_op && filp->f_op->lock != NULL)
+ error = filp->f_op->lock(filp, cmd, file_lock);
+ else {
+ for (;;) {
+ error = posix_lock_file(filp, file_lock, NULL);
+ if (error != -EAGAIN || cmd == F_SETLK)
+ break;
+ error = wait_event_interruptible(file_lock->fl_wait,
+ !file_lock->fl_next);
+ if (!error)
+ continue;
- locks_delete_block(file_lock);
- break;
+ locks_delete_block(file_lock);
+ break;
+ }
}
/*
if (error)
goto out;
- for (;;) {
- error = vfs_lock_file(filp, cmd, file_lock, NULL);
- if (error != -EAGAIN || cmd == F_SETLK64)
- break;
- error = wait_event_interruptible(file_lock->fl_wait,
- !file_lock->fl_next);
- if (!error)
- continue;
+ if (filp->f_op && filp->f_op->lock != NULL)
+ error = filp->f_op->lock(filp, cmd, file_lock);
+ else {
+ for (;;) {
+ error = posix_lock_file(filp, file_lock, NULL);
+ if (error != -EAGAIN || cmd == F_SETLK64)
+ break;
+ error = wait_event_interruptible(file_lock->fl_wait,
+ !file_lock->fl_next);
+ if (!error)
+ continue;
- locks_delete_block(file_lock);
- break;
+ locks_delete_block(file_lock);
+ break;
+ }
}
/*
EXPORT_SYMBOL_GPL(vfs_cancel_lock);
-static void lock_get_status(char* out, struct file_lock *fl, int id, char *pfx)
+#ifdef CONFIG_PROC_FS
+#include <linux/seq_file.h>
+
+static void lock_get_status(struct seq_file *f, struct file_lock *fl,
+ int id, char *pfx)
{
struct inode *inode = NULL;
+ unsigned int fl_pid;
+
+ if (fl->fl_nspid)
+ fl_pid = pid_vnr(fl->fl_nspid);
+ else
+ fl_pid = fl->fl_pid;
if (fl->fl_file != NULL)
inode = fl->fl_file->f_path.dentry->d_inode;
- out += sprintf(out, "%d:%s ", id, pfx);
+ seq_printf(f, "%d:%s ", id, pfx);
if (IS_POSIX(fl)) {
- out += sprintf(out, "%6s %s ",
+ seq_printf(f, "%6s %s ",
(fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
(inode == NULL) ? "*NOINODE*" :
mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
} else if (IS_FLOCK(fl)) {
if (fl->fl_type & LOCK_MAND) {
- out += sprintf(out, "FLOCK MSNFS ");
+ seq_printf(f, "FLOCK MSNFS ");
} else {
- out += sprintf(out, "FLOCK ADVISORY ");
+ seq_printf(f, "FLOCK ADVISORY ");
}
} else if (IS_LEASE(fl)) {
- out += sprintf(out, "LEASE ");
+ seq_printf(f, "LEASE ");
if (fl->fl_type & F_INPROGRESS)
- out += sprintf(out, "BREAKING ");
+ seq_printf(f, "BREAKING ");
else if (fl->fl_file)
- out += sprintf(out, "ACTIVE ");
+ seq_printf(f, "ACTIVE ");
else
- out += sprintf(out, "BREAKER ");
+ seq_printf(f, "BREAKER ");
} else {
- out += sprintf(out, "UNKNOWN UNKNOWN ");
+ seq_printf(f, "UNKNOWN UNKNOWN ");
}
if (fl->fl_type & LOCK_MAND) {
- out += sprintf(out, "%s ",
+ seq_printf(f, "%s ",
(fl->fl_type & LOCK_READ)
? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
: (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
} else {
- out += sprintf(out, "%s ",
+ seq_printf(f, "%s ",
(fl->fl_type & F_INPROGRESS)
? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
: (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
}
if (inode) {
#ifdef WE_CAN_BREAK_LSLK_NOW
- out += sprintf(out, "%d %s:%ld ", fl->fl_pid,
+ seq_printf(f, "%d %s:%ld ", fl_pid,
inode->i_sb->s_id, inode->i_ino);
#else
/* userspace relies on this representation of dev_t ;-( */
- out += sprintf(out, "%d %02x:%02x:%ld ", fl->fl_pid,
+ seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
MAJOR(inode->i_sb->s_dev),
MINOR(inode->i_sb->s_dev), inode->i_ino);
#endif
} else {
- out += sprintf(out, "%d <none>:0 ", fl->fl_pid);
+ seq_printf(f, "%d <none>:0 ", fl_pid);
}
if (IS_POSIX(fl)) {
if (fl->fl_end == OFFSET_MAX)
- out += sprintf(out, "%Ld EOF\n", fl->fl_start);
+ seq_printf(f, "%Ld EOF\n", fl->fl_start);
else
- out += sprintf(out, "%Ld %Ld\n", fl->fl_start,
- fl->fl_end);
+ seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
} else {
- out += sprintf(out, "0 EOF\n");
+ seq_printf(f, "0 EOF\n");
}
}
-static void move_lock_status(char **p, off_t* pos, off_t offset)
+static int locks_show(struct seq_file *f, void *v)
{
- int len;
- len = strlen(*p);
- if(*pos >= offset) {
- /* the complete line is valid */
- *p += len;
- *pos += len;
- return;
- }
- if(*pos+len > offset) {
- /* use the second part of the line */
- int i = offset-*pos;
- memmove(*p,*p+i,len-i);
- *p += len-i;
- *pos += len;
- return;
- }
- /* discard the complete line */
- *pos += len;
-}
+ struct file_lock *fl, *bfl;
-/**
- * get_locks_status - reports lock usage in /proc/locks
- * @buffer: address in userspace to write into
- * @start: ?
- * @offset: how far we are through the buffer
- * @length: how much to read
- */
+ fl = list_entry(v, struct file_lock, fl_link);
-int get_locks_status(char *buffer, char **start, off_t offset, int length)
-{
- struct file_lock *fl;
- char *q = buffer;
- off_t pos = 0;
- int i = 0;
+ lock_get_status(f, fl, (long)f->private, "");
- lock_kernel();
- list_for_each_entry(fl, &file_lock_list, fl_link) {
- struct file_lock *bfl;
+ list_for_each_entry(bfl, &fl->fl_block, fl_block)
+ lock_get_status(f, bfl, (long)f->private, " ->");
- lock_get_status(q, fl, ++i, "");
- move_lock_status(&q, &pos, offset);
+ f->private++;
+ return 0;
+}
- if(pos >= offset+length)
- goto done;
+static void *locks_start(struct seq_file *f, loff_t *pos)
+{
+ lock_kernel();
+ f->private = (void *)1;
+ return seq_list_start(&file_lock_list, *pos);
+}
- list_for_each_entry(bfl, &fl->fl_block, fl_block) {
- lock_get_status(q, bfl, i, " ->");
- move_lock_status(&q, &pos, offset);
+static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
+{
+ return seq_list_next(v, &file_lock_list, pos);
+}
- if(pos >= offset+length)
- goto done;
- }
- }
-done:
+static void locks_stop(struct seq_file *f, void *v)
+{
unlock_kernel();
- *start = buffer;
- if(q-buffer < length)
- return (q-buffer);
- return length;
}
+struct seq_operations locks_seq_operations = {
+ .start = locks_start,
+ .next = locks_next,
+ .stop = locks_stop,
+ .show = locks_show,
+};
+#endif
+
/**
* lock_may_read - checks that the region is free of locks
* @inode: the inode that is being read