X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Flocks.c;h=a8794f233bc9c38d45c47f69d03652f7f6f8d533;hb=e22f628395432b967f2f505858c64450f7835365;hp=dda83d6cd48b17934797de3f6c9f65c52662f859;hpb=993dfa8776308dcfd311cf77a3bbed4aa11e9868;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/locks.c b/fs/locks.c index dda83d6..a8794f2 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -116,6 +116,7 @@ #include #include +#include #include #include #include @@ -125,8 +126,8 @@ #include #include #include +#include -#include #include #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX) @@ -142,15 +143,15 @@ int lease_break_time = 45; static LIST_HEAD(file_lock_list); static LIST_HEAD(blocked_list); -static kmem_cache_t *filelock_cache __read_mostly; +static struct kmem_cache *filelock_cache __read_mostly; /* Allocate an empty lock structure. */ static struct file_lock *locks_alloc_lock(void) { - return kmem_cache_alloc(filelock_cache, SLAB_KERNEL); + return kmem_cache_alloc(filelock_cache, GFP_KERNEL); } -static void locks_release_private(struct file_lock *fl) +void locks_release_private(struct file_lock *fl) { if (fl->fl_ops) { if (fl->fl_ops->fl_release_private) @@ -164,6 +165,7 @@ static void locks_release_private(struct file_lock *fl) } } +EXPORT_SYMBOL_GPL(locks_release_private); /* Free a lock which is not in use. */ static void locks_free_lock(struct file_lock *fl) @@ -185,6 +187,7 @@ void locks_init_lock(struct file_lock *fl) fl->fl_fasync = NULL; fl->fl_owner = NULL; fl->fl_pid = 0; + fl->fl_nspid = NULL; fl->fl_file = NULL; fl->fl_flags = 0; fl->fl_type = 0; @@ -199,14 +202,10 @@ EXPORT_SYMBOL(locks_init_lock); * Initialises the fields of the file lock which are invariant for * free file_locks. */ -static void init_once(void *foo, kmem_cache_t *cache, unsigned long flags) +static void init_once(void *foo) { struct file_lock *lock = (struct file_lock *) foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) != - SLAB_CTOR_CONSTRUCTOR) - return; - locks_init_lock(lock); } @@ -227,7 +226,7 @@ static void locks_copy_private(struct file_lock *new, struct file_lock *fl) /* * Initialize a new lock from an existing file_lock structure. */ -static void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl) +void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl) { new->fl_owner = fl->fl_owner; new->fl_pid = fl->fl_pid; @@ -239,6 +238,7 @@ static void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl) new->fl_ops = NULL; new->fl_lmops = NULL; } +EXPORT_SYMBOL(__locks_copy_lock); void locks_copy_lock(struct file_lock *new, struct file_lock *fl) { @@ -314,14 +314,14 @@ static int flock_to_posix_lock(struct file *filp, struct file_lock *fl, off_t start, end; switch (l->l_whence) { - case 0: /*SEEK_SET*/ + case SEEK_SET: start = 0; break; - case 1: /*SEEK_CUR*/ + case SEEK_CUR: start = filp->f_pos; break; - case 2: /*SEEK_END*/ - start = i_size_read(filp->f_dentry->d_inode); + case SEEK_END: + start = i_size_read(filp->f_path.dentry->d_inode); break; default: return -EINVAL; @@ -364,14 +364,14 @@ static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl, loff_t start; switch (l->l_whence) { - case 0: /*SEEK_SET*/ + case SEEK_SET: start = 0; break; - case 1: /*SEEK_CUR*/ + case SEEK_CUR: start = filp->f_pos; break; - case 2: /*SEEK_END*/ - start = i_size_read(filp->f_dentry->d_inode); + case SEEK_END: + start = i_size_read(filp->f_path.dentry->d_inode); break; default: return -EINVAL; @@ -434,7 +434,7 @@ static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try) return fl->fl_file == try->fl_file; } -static struct lock_manager_operations lease_manager_ops = { +static const struct lock_manager_operations lease_manager_ops = { .fl_break = lease_break_callback, .fl_release_private = lease_release_private_callback, .fl_mylease = lease_mylease_callback, @@ -446,15 +446,14 @@ static struct lock_manager_operations lease_manager_ops = { */ static int lease_init(struct file *filp, int type, struct file_lock *fl) { + if (assign_type(fl, type) != 0) + return -EINVAL; + fl->fl_owner = current->files; fl->fl_pid = current->tgid; fl->fl_file = filp; fl->fl_flags = FL_LEASE; - if (assign_type(fl, type) != 0) { - locks_free_lock(fl); - return -EINVAL; - } fl->fl_start = 0; fl->fl_end = OFFSET_MAX; fl->fl_ops = NULL; @@ -463,19 +462,20 @@ static int lease_init(struct file *filp, int type, struct file_lock *fl) } /* Allocate a file_lock initialised to this type of lease */ -static int lease_alloc(struct file *filp, int type, struct file_lock **flp) +static struct file_lock *lease_alloc(struct file *filp, int type) { struct file_lock *fl = locks_alloc_lock(); - int error; + int error = -ENOMEM; if (fl == NULL) - return -ENOMEM; + return ERR_PTR(error); error = lease_init(filp, type, fl); - if (error) - return error; - *flp = fl; - return 0; + if (error) { + locks_free_lock(fl); + return ERR_PTR(error); + } + return fl; } /* Check if two locks overlap each other. @@ -538,7 +538,9 @@ static void locks_insert_block(struct file_lock *blocker, static void locks_wake_up_blocks(struct file_lock *blocker) { while (!list_empty(&blocker->fl_block)) { - struct file_lock *waiter = list_entry(blocker->fl_block.next, + struct file_lock *waiter; + + waiter = list_first_entry(&blocker->fl_block, struct file_lock, fl_block); __locks_delete_block(waiter); if (waiter->fl_lmops && waiter->fl_lmops->fl_notify) @@ -555,12 +557,11 @@ static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl) { list_add(&fl->fl_link, &file_lock_list); + fl->fl_nspid = get_pid(task_tgid(current)); + /* insert into file's list */ fl->fl_next = *pos; *pos = fl; - - if (fl->fl_ops && fl->fl_ops->fl_insert) - fl->fl_ops->fl_insert(fl); } /* @@ -583,8 +584,10 @@ static void locks_delete_lock(struct file_lock **thisfl_p) fl->fl_fasync = NULL; } - if (fl->fl_ops && fl->fl_ops->fl_remove) - fl->fl_ops->fl_remove(fl); + if (fl->fl_nspid) { + put_pid(fl->fl_nspid); + fl->fl_nspid = NULL; + } locks_wake_up_blocks(fl); locks_free_lock(fl); @@ -636,105 +639,109 @@ static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *s return (locks_conflict(caller_fl, sys_fl)); } -static int interruptible_sleep_on_locked(wait_queue_head_t *fl_wait, int timeout) -{ - int result = 0; - DECLARE_WAITQUEUE(wait, current); - - __set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue(fl_wait, &wait); - if (timeout == 0) - schedule(); - else - result = schedule_timeout(timeout); - if (signal_pending(current)) - result = -ERESTARTSYS; - remove_wait_queue(fl_wait, &wait); - __set_current_state(TASK_RUNNING); - return result; -} - -static int locks_block_on_timeout(struct file_lock *blocker, struct file_lock *waiter, int time) -{ - int result; - locks_insert_block(blocker, waiter); - result = interruptible_sleep_on_locked(&waiter->fl_wait, time); - __locks_delete_block(waiter); - return result; -} - -int -posix_test_lock(struct file *filp, struct file_lock *fl, - struct file_lock *conflock) +void +posix_test_lock(struct file *filp, struct file_lock *fl) { struct file_lock *cfl; lock_kernel(); - for (cfl = filp->f_dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) { + for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) { if (!IS_POSIX(cfl)) continue; - if (posix_locks_conflict(cfl, fl)) + if (posix_locks_conflict(fl, cfl)) break; } if (cfl) { - __locks_copy_lock(conflock, cfl); - unlock_kernel(); - return 1; - } + __locks_copy_lock(fl, cfl); + if (cfl->fl_nspid) + fl->fl_pid = pid_vnr(cfl->fl_nspid); + } else + fl->fl_type = F_UNLCK; unlock_kernel(); - return 0; + return; } - EXPORT_SYMBOL(posix_test_lock); -/* This function tests for deadlock condition before putting a process to - * sleep. The detection scheme is no longer recursive. Recursive was neat, - * but dangerous - we risked stack corruption if the lock data was bad, or - * if the recursion was too deep for any other reason. +/* + * Deadlock detection: + * + * We attempt to detect deadlocks that are due purely to posix file + * locks. + * + * We assume that a task can be waiting for at most one lock at a time. + * So for any acquired lock, the process holding that lock may be + * waiting on at most one other lock. That lock in turns may be held by + * someone waiting for at most one other lock. Given a requested lock + * caller_fl which is about to wait for a conflicting lock block_fl, we + * follow this chain of waiters to ensure we are not about to create a + * cycle. + * + * Since we do this before we ever put a process to sleep on a lock, we + * are ensured that there is never a cycle; that is what guarantees that + * the while() loop in posix_locks_deadlock() eventually completes. * - * We rely on the fact that a task can only be on one lock's wait queue - * at a time. When we find blocked_task on a wait queue we can re-search - * with blocked_task equal to that queue's owner, until either blocked_task - * isn't found, or blocked_task is found on a queue owned by my_task. + * Note: the above assumption may not be true when handling lock + * requests from a broken NFS client. It may also fail in the presence + * of tasks (such as posix threads) sharing the same open file table. * - * Note: the above assumption may not be true when handling lock requests - * from a broken NFS client. But broken NFS clients have a lot more to - * worry about than proper deadlock detection anyway... --okir + * To handle those cases, we just bail out after a few iterations. */ -int posix_locks_deadlock(struct file_lock *caller_fl, + +#define MAX_DEADLK_ITERATIONS 10 + +/* Find a lock that the owner of the given block_fl is blocking on. */ +static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl) +{ + struct file_lock *fl; + + list_for_each_entry(fl, &blocked_list, fl_link) { + if (posix_same_owner(fl, block_fl)) + return fl->fl_next; + } + return NULL; +} + +static int posix_locks_deadlock(struct file_lock *caller_fl, struct file_lock *block_fl) { - struct list_head *tmp; + int i = 0; -next_task: - if (posix_same_owner(caller_fl, block_fl)) - return 1; - list_for_each(tmp, &blocked_list) { - struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link); - if (posix_same_owner(fl, block_fl)) { - fl = fl->fl_next; - block_fl = fl; - goto next_task; - } + while ((block_fl = what_owner_is_waiting_for(block_fl))) { + if (i++ > MAX_DEADLK_ITERATIONS) + return 0; + if (posix_same_owner(caller_fl, block_fl)) + return 1; } return 0; } -EXPORT_SYMBOL(posix_locks_deadlock); - /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks - * at the head of the list, but that's secret knowledge known only to - * flock_lock_file and posix_lock_file. + * after any leases, but before any posix locks. + * + * Note that if called with an FL_EXISTS argument, the caller may determine + * whether or not a lock was successfully freed by testing the return + * value for -ENOENT. */ static int flock_lock_file(struct file *filp, struct file_lock *request) { struct file_lock *new_fl = NULL; struct file_lock **before; - struct inode * inode = filp->f_dentry->d_inode; + struct inode * inode = filp->f_path.dentry->d_inode; int error = 0; int found = 0; lock_kernel(); + if (request->fl_flags & FL_ACCESS) + goto find_conflict; + + if (request->fl_type != F_UNLCK) { + error = -ENOMEM; + new_fl = locks_alloc_lock(); + if (new_fl == NULL) + goto out; + error = 0; + } + for_each_lock(inode, before) { struct file_lock *fl = *before; if (IS_POSIX(fl)) @@ -750,12 +757,12 @@ static int flock_lock_file(struct file *filp, struct file_lock *request) break; } - if (request->fl_type == F_UNLCK) + if (request->fl_type == F_UNLCK) { + if ((request->fl_flags & FL_EXISTS) && !found) + error = -ENOENT; goto out; + } - new_fl = locks_alloc_lock(); - if (new_fl == NULL) - goto out; /* * If a higher-priority process was blocked on the old file lock, * give it the opportunity to lock the file. @@ -763,6 +770,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request) if (found) cond_resched(); +find_conflict: for_each_lock(inode, before) { struct file_lock *fl = *before; if (IS_POSIX(fl)) @@ -772,13 +780,18 @@ static int flock_lock_file(struct file *filp, struct file_lock *request) if (!flock_locks_conflict(request, fl)) continue; error = -EAGAIN; - if (request->fl_flags & FL_SLEEP) - locks_insert_block(fl, request); + if (!(request->fl_flags & FL_SLEEP)) + goto out; + error = FILE_LOCK_DEFERRED; + locks_insert_block(fl, request); goto out; } + if (request->fl_flags & FL_ACCESS) + goto out; locks_copy_lock(new_fl, request); - locks_insert_lock(&inode->i_flock, new_fl); + locks_insert_lock(before, new_fl); new_fl = NULL; + error = 0; out: unlock_kernel(); @@ -787,10 +800,11 @@ out: return error; } -static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request, struct file_lock *conflock) +static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock) { struct file_lock *fl; - struct file_lock *new_fl, *new_fl2; + struct file_lock *new_fl = NULL; + struct file_lock *new_fl2 = NULL; struct file_lock *left = NULL; struct file_lock *right = NULL; struct file_lock **before; @@ -799,27 +813,33 @@ static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request /* * We may need two file_lock structures for this operation, * so we get them in advance to avoid races. + * + * In some cases we can be sure, that no new locks will be needed */ - new_fl = locks_alloc_lock(); - new_fl2 = locks_alloc_lock(); + if (!(request->fl_flags & FL_ACCESS) && + (request->fl_type != F_UNLCK || + request->fl_start != 0 || request->fl_end != OFFSET_MAX)) { + new_fl = locks_alloc_lock(); + new_fl2 = locks_alloc_lock(); + } lock_kernel(); if (request->fl_type != F_UNLCK) { for_each_lock(inode, before) { - struct file_lock *fl = *before; + fl = *before; if (!IS_POSIX(fl)) continue; if (!posix_locks_conflict(request, fl)) continue; if (conflock) - locks_copy_lock(conflock, fl); + __locks_copy_lock(conflock, fl); error = -EAGAIN; if (!(request->fl_flags & FL_SLEEP)) goto out; error = -EDEADLK; if (posix_locks_deadlock(request, fl)) goto out; - error = -EAGAIN; + error = FILE_LOCK_DEFERRED; locks_insert_block(fl, request); goto out; } @@ -830,14 +850,7 @@ static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request if (request->fl_flags & FL_ACCESS) goto out; - error = -ENOLCK; /* "no luck" */ - if (!(new_fl && new_fl2)) - goto out; - /* - * We've allocated the new locks in advance, so there are no - * errors possible (and no blocking operations) from here on. - * * Find the first old lock with the same owner as the new lock. */ @@ -934,10 +947,28 @@ static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request before = &fl->fl_next; } + /* + * The above code only modifies existing locks in case of + * merging or replacing. If new lock(s) need to be inserted + * all modifications are done bellow this, so it's safe yet to + * bail out. + */ + error = -ENOLCK; /* "no luck" */ + if (right && left == right && !new_fl2) + goto out; + error = 0; if (!added) { - if (request->fl_type == F_UNLCK) + if (request->fl_type == F_UNLCK) { + if (request->fl_flags & FL_EXISTS) + error = -ENOENT; goto out; + } + + if (!new_fl) { + error = -ENOLCK; + goto out; + } locks_copy_lock(new_fl, request); locks_insert_lock(before, new_fl); new_fl = NULL; @@ -975,31 +1006,22 @@ static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request * posix_lock_file - Apply a POSIX-style lock to a file * @filp: The file to apply the lock to * @fl: The lock to be applied + * @conflock: Place to return a copy of the conflicting lock, if found. * * Add a POSIX style lock to a file. * We merge adjacent & overlapping locks whenever possible. * POSIX locks are sorted by owner task, then by starting address - */ -int posix_lock_file(struct file *filp, struct file_lock *fl) -{ - return __posix_lock_file_conf(filp->f_dentry->d_inode, fl, NULL); -} -EXPORT_SYMBOL(posix_lock_file); - -/** - * posix_lock_file_conf - Apply a POSIX-style lock to a file - * @filp: The file to apply the lock to - * @fl: The lock to be applied - * @conflock: Place to return a copy of the conflicting lock, if found. * - * Except for the conflock parameter, acts just like posix_lock_file. + * Note that if called with an FL_EXISTS argument, the caller may determine + * whether or not a lock was successfully freed by testing the return + * value for -ENOENT. */ -int posix_lock_file_conf(struct file *filp, struct file_lock *fl, +int posix_lock_file(struct file *filp, struct file_lock *fl, struct file_lock *conflock) { - return __posix_lock_file_conf(filp->f_dentry->d_inode, fl, conflock); + return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock); } -EXPORT_SYMBOL(posix_lock_file_conf); +EXPORT_SYMBOL(posix_lock_file); /** * posix_lock_file_wait - Apply a POSIX-style lock to a file @@ -1015,8 +1037,8 @@ int posix_lock_file_wait(struct file *filp, struct file_lock *fl) int error; might_sleep (); for (;;) { - error = posix_lock_file(filp, fl); - if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) + error = posix_lock_file(filp, fl, NULL); + if (error != FILE_LOCK_DEFERRED) break; error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); if (!error) @@ -1087,10 +1109,8 @@ int locks_mandatory_area(int read_write, struct inode *inode, fl.fl_end = offset + count - 1; for (;;) { - error = __posix_lock_file_conf(inode, &fl, NULL); - if (error != -EAGAIN) - break; - if (!(fl.fl_flags & FL_SLEEP)) + error = __posix_lock_file(inode, &fl, NULL); + if (error != FILE_LOCK_DEFERRED) break; error = wait_event_interruptible(fl.fl_wait, !fl.fl_next); if (!error) { @@ -1098,7 +1118,7 @@ int locks_mandatory_area(int read_write, struct inode *inode, * If we've been sleeping someone might have * changed the permissions behind our back. */ - if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) + if (__mandatory_lock(inode)) continue; } @@ -1150,9 +1170,9 @@ static void time_out_leases(struct inode *inode) * @inode: the inode of the file to return * @mode: the open mode (read or write) * - * break_lease (inlined for speed) has checked there already - * is a lease on this file. Leases are broken on a call to open() - * or truncate(). This function can sleep unless you + * break_lease (inlined for speed) has checked there already is at least + * some kind of lock (maybe a lease) on this file. Leases are broken on + * a call to open() or truncate(). This function can sleep unless you * specified %O_NONBLOCK to your open(). */ int __break_lease(struct inode *inode, unsigned int mode) @@ -1160,12 +1180,10 @@ int __break_lease(struct inode *inode, unsigned int mode) int error = 0, future; struct file_lock *new_fl, *flock; struct file_lock *fl; - int alloc_err; unsigned long break_time; int i_have_this_lease = 0; - alloc_err = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK, - &new_fl); + new_fl = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK); lock_kernel(); @@ -1193,8 +1211,9 @@ int __break_lease(struct inode *inode, unsigned int mode) goto out; } - if (alloc_err && !i_have_this_lease && ((mode & O_NONBLOCK) == 0)) { - error = alloc_err; + if (IS_ERR(new_fl) && !i_have_this_lease + && ((mode & O_NONBLOCK) == 0)) { + error = PTR_ERR(new_fl); goto out; } @@ -1226,7 +1245,10 @@ restart: if (break_time == 0) break_time++; } - error = locks_block_on_timeout(flock, new_fl, break_time); + locks_insert_block(flock, new_fl); + error = wait_event_interruptible_timeout(new_fl->fl_wait, + !new_fl->fl_next, break_time); + __locks_delete_block(new_fl); if (error >= 0) { if (error == 0) time_out_leases(inode); @@ -1241,7 +1263,7 @@ restart: out: unlock_kernel(); - if (!alloc_err) + if (!IS_ERR(new_fl)) locks_free_lock(new_fl); return error; } @@ -1249,13 +1271,13 @@ out: EXPORT_SYMBOL(__break_lease); /** - * lease_get_mtime + * lease_get_mtime - get the last modified time of an inode * @inode: the inode * @time: pointer to a timespec which will contain the last modified time * * This is to force NFS clients to flush their caches for files with * exclusive leases. The justification is that if someone has an - * exclusive lease, then they could be modifiying it. + * exclusive lease, then they could be modifying it. */ void lease_get_mtime(struct inode *inode, struct timespec *time) { @@ -1297,8 +1319,8 @@ int fcntl_getlease(struct file *filp) int type = F_UNLCK; lock_kernel(); - time_out_leases(filp->f_dentry->d_inode); - for (fl = filp->f_dentry->d_inode->i_flock; fl && IS_LEASE(fl); + time_out_leases(filp->f_path.dentry->d_inode); + for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl); fl = fl->fl_next) { if (fl->fl_file == filp) { type = fl->fl_type & ~F_INPROGRESS; @@ -1310,7 +1332,7 @@ int fcntl_getlease(struct file *filp) } /** - * __setlease - sets a lease on an open file + * generic_setlease - sets a lease on an open file * @filp: file pointer * @arg: type of lease to obtain * @flp: input - file_lock to use, output - file_lock inserted @@ -1320,28 +1342,42 @@ int fcntl_getlease(struct file *filp) * * Called with kernel lock held. */ -static int __setlease(struct file *filp, long arg, struct file_lock **flp) +int generic_setlease(struct file *filp, long arg, struct file_lock **flp) { struct file_lock *fl, **before, **my_before = NULL, *lease; - struct dentry *dentry = filp->f_dentry; + struct file_lock *new_fl = NULL; + struct dentry *dentry = filp->f_path.dentry; struct inode *inode = dentry->d_inode; int error, rdlease_count = 0, wrlease_count = 0; + if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE)) + return -EACCES; + if (!S_ISREG(inode->i_mode)) + return -EINVAL; + error = security_file_lock(filp, arg); + if (error) + return error; + time_out_leases(inode); - error = -EINVAL; - if (!flp || !(*flp) || !(*flp)->fl_lmops || !(*flp)->fl_lmops->fl_break) - goto out; + BUG_ON(!(*flp)->fl_lmops->fl_break); lease = *flp; - error = -EAGAIN; - if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) - goto out; - if ((arg == F_WRLCK) - && ((atomic_read(&dentry->d_count) > 1) - || (atomic_read(&inode->i_count) > 1))) - goto out; + if (arg != F_UNLCK) { + error = -ENOMEM; + new_fl = locks_alloc_lock(); + if (new_fl == NULL) + goto out; + + error = -EAGAIN; + if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) + goto out; + if ((arg == F_WRLCK) + && ((atomic_read(&dentry->d_count) > 1) + || (atomic_read(&inode->i_count) > 1))) + goto out; + } /* * At this point, we know that if there is an exclusive @@ -1367,11 +1403,13 @@ static int __setlease(struct file *filp, long arg, struct file_lock **flp) rdlease_count++; } + error = -EAGAIN; if ((arg == F_RDLCK && (wrlease_count > 0)) || (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0))) goto out; if (my_before != NULL) { + *flp = *my_before; error = lease->fl_lmops->fl_change(my_before, arg); goto out; } @@ -1384,51 +1422,60 @@ static int __setlease(struct file *filp, long arg, struct file_lock **flp) if (!leases_enable) goto out; - error = lease_alloc(filp, arg, &fl); - if (error) - goto out; - - locks_copy_lock(fl, lease); + locks_copy_lock(new_fl, lease); + locks_insert_lock(before, new_fl); - locks_insert_lock(before, fl); + *flp = new_fl; + return 0; - *flp = fl; out: + if (new_fl != NULL) + locks_free_lock(new_fl); return error; } +EXPORT_SYMBOL(generic_setlease); /** - * setlease - sets a lease on an open file + * vfs_setlease - sets a lease on an open file * @filp: file pointer * @arg: type of lease to obtain * @lease: file_lock to use * * Call this to establish a lease on the file. - * The fl_lmops fl_break function is required by break_lease + * The (*lease)->fl_lmops->fl_break operation must be set; if not, + * break_lease will oops! + * + * This will call the filesystem's setlease file method, if + * defined. Note that there is no getlease method; instead, the + * filesystem setlease method should call back to setlease() to + * add a lease to the inode's lease list, where fcntl_getlease() can + * find it. Since fcntl_getlease() only reports whether the current + * task holds a lease, a cluster filesystem need only do this for + * leases held by processes on this node. + * + * There is also no break_lease method; filesystems that + * handle their own leases shoud break leases themselves from the + * filesystem's open, create, and (on truncate) setattr methods. + * + * Warning: the only current setlease methods exist only to disable + * leases in certain cases. More vfs changes may be required to + * allow a full filesystem lease implementation. */ -int setlease(struct file *filp, long arg, struct file_lock **lease) +int vfs_setlease(struct file *filp, long arg, struct file_lock **lease) { - struct dentry *dentry = filp->f_dentry; - struct inode *inode = dentry->d_inode; int error; - if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE)) - return -EACCES; - if (!S_ISREG(inode->i_mode)) - return -EINVAL; - error = security_file_lock(filp, arg); - if (error) - return error; - lock_kernel(); - error = __setlease(filp, arg, lease); + if (filp->f_op && filp->f_op->setlease) + error = filp->f_op->setlease(filp, arg, lease); + else + error = generic_setlease(filp, arg, lease); unlock_kernel(); return error; } - -EXPORT_SYMBOL(setlease); +EXPORT_SYMBOL_GPL(vfs_setlease); /** * fcntl_setlease - sets a lease on an open file @@ -1443,18 +1490,9 @@ EXPORT_SYMBOL(setlease); int fcntl_setlease(unsigned int fd, struct file *filp, long arg) { struct file_lock fl, *flp = &fl; - struct dentry *dentry = filp->f_dentry; - struct inode *inode = dentry->d_inode; + struct inode *inode = filp->f_path.dentry->d_inode; int error; - if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE)) - return -EACCES; - if (!S_ISREG(inode->i_mode)) - return -EINVAL; - error = security_file_lock(filp, arg); - if (error) - return error; - locks_init_lock(&fl); error = lease_init(filp, arg, &fl); if (error) @@ -1462,20 +1500,20 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg) lock_kernel(); - error = __setlease(filp, arg, &flp); + error = vfs_setlease(filp, arg, &flp); if (error || arg == F_UNLCK) goto out_unlock; error = fasync_helper(fd, filp, 1, &flp->fl_fasync); if (error < 0) { - /* remove lease just inserted by __setlease */ + /* remove lease just inserted by setlease */ flp->fl_type = F_UNLCK | F_INPROGRESS; - flp->fl_break_time = jiffies- 10; + flp->fl_break_time = jiffies - 10; time_out_leases(inode); goto out_unlock; } - error = f_setown(filp, current->pid, 0); + error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); out_unlock: unlock_kernel(); return error; @@ -1494,7 +1532,7 @@ int flock_lock_file_wait(struct file *filp, struct file_lock *fl) might_sleep(); for (;;) { error = flock_lock_file(filp, fl); - if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) + if (error != FILE_LOCK_DEFERRED) break; error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); if (!error) @@ -1527,7 +1565,7 @@ EXPORT_SYMBOL(flock_lock_file_wait); * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other * processes read and write access respectively. */ -asmlinkage long sys_flock(unsigned int fd, unsigned int cmd) +SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd) { struct file *filp; struct file_lock *lock; @@ -1543,7 +1581,8 @@ asmlinkage long sys_flock(unsigned int fd, unsigned int cmd) cmd &= ~LOCK_NB; unlock = (cmd == LOCK_UN); - if (!unlock && !(cmd & LOCK_MAND) && !(filp->f_mode & 3)) + if (!unlock && !(cmd & LOCK_MAND) && + !(filp->f_mode & (FMODE_READ|FMODE_WRITE))) goto out_putf; error = flock_make_lock(filp, &lock, cmd); @@ -1552,7 +1591,7 @@ asmlinkage long sys_flock(unsigned int fd, unsigned int cmd) if (can_sleep) lock->fl_flags |= FL_SLEEP; - error = security_file_lock(filp, cmd); + error = security_file_lock(filp, lock->fl_type); if (error) goto out_free; @@ -1572,12 +1611,62 @@ asmlinkage long sys_flock(unsigned int fd, unsigned int cmd) return error; } +/** + * vfs_test_lock - test file byte range lock + * @filp: The file to test lock for + * @fl: The lock to test; also used to hold result + * + * Returns -ERRNO on failure. Indicates presence of conflicting lock by + * setting conf->fl_type to something other than F_UNLCK. + */ +int vfs_test_lock(struct file *filp, struct file_lock *fl) +{ + if (filp->f_op && filp->f_op->lock) + return filp->f_op->lock(filp, F_GETLK, fl); + posix_test_lock(filp, fl); + return 0; +} +EXPORT_SYMBOL_GPL(vfs_test_lock); + +static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl) +{ + flock->l_pid = fl->fl_pid; +#if BITS_PER_LONG == 32 + /* + * Make sure we can represent the posix lock via + * legacy 32bit flock. + */ + if (fl->fl_start > OFFT_OFFSET_MAX) + return -EOVERFLOW; + if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX) + return -EOVERFLOW; +#endif + flock->l_start = fl->fl_start; + flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : + fl->fl_end - fl->fl_start + 1; + flock->l_whence = 0; + flock->l_type = fl->fl_type; + return 0; +} + +#if BITS_PER_LONG == 32 +static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl) +{ + flock->l_pid = fl->fl_pid; + flock->l_start = fl->fl_start; + flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : + fl->fl_end - fl->fl_start + 1; + flock->l_whence = 0; + flock->l_type = fl->fl_type; +} +#endif + /* Report the first existing lock that would conflict with l. * This implements the F_GETLK command of fcntl(). */ int fcntl_getlk(struct file *filp, struct flock __user *l) { - struct file_lock *fl, cfl, file_lock; + struct file_lock file_lock; struct flock flock; int error; @@ -1592,38 +1681,15 @@ int fcntl_getlk(struct file *filp, struct flock __user *l) if (error) goto out; - if (filp->f_op && filp->f_op->lock) { - error = filp->f_op->lock(filp, F_GETLK, &file_lock); - if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private) - file_lock.fl_ops->fl_release_private(&file_lock); - if (error < 0) - goto out; - else - fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock); - } else { - fl = (posix_test_lock(filp, &file_lock, &cfl) ? &cfl : NULL); - } + error = vfs_test_lock(filp, &file_lock); + if (error) + goto out; - flock.l_type = F_UNLCK; - if (fl != NULL) { - flock.l_pid = fl->fl_pid; -#if BITS_PER_LONG == 32 - /* - * Make sure we can represent the posix lock via - * legacy 32bit flock. - */ - error = -EOVERFLOW; - if (fl->fl_start > OFFT_OFFSET_MAX) - goto out; - if ((fl->fl_end != OFFSET_MAX) - && (fl->fl_end > OFFT_OFFSET_MAX)) + flock.l_type = file_lock.fl_type; + if (file_lock.fl_type != F_UNLCK) { + error = posix_lock_to_flock(&flock, &file_lock); + if (error) goto out; -#endif - flock.l_start = fl->fl_start; - flock.l_len = fl->fl_end == OFFSET_MAX ? 0 : - fl->fl_end - fl->fl_start + 1; - flock.l_whence = 0; - flock.l_type = fl->fl_type; } error = -EFAULT; if (!copy_to_user(l, &flock, sizeof(flock))) @@ -1632,6 +1698,72 @@ out: return error; } +/** + * vfs_lock_file - file byte range lock + * @filp: The file to apply the lock to + * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.) + * @fl: The lock to be applied + * @conf: Place to return a copy of the conflicting lock, if found. + * + * A caller that doesn't care about the conflicting lock may pass NULL + * as the final argument. + * + * If the filesystem defines a private ->lock() method, then @conf will + * be left unchanged; so a caller that cares should initialize it to + * some acceptable default. + * + * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX + * locks, the ->lock() interface may return asynchronously, before the lock has + * been granted or denied by the underlying filesystem, if (and only if) + * fl_grant is set. Callers expecting ->lock() to return asynchronously + * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if) + * the request is for a blocking lock. When ->lock() does return asynchronously, + * it must return FILE_LOCK_DEFERRED, and call ->fl_grant() when the lock + * request completes. + * If the request is for non-blocking lock the file system should return + * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine + * with the result. If the request timed out the callback routine will return a + * nonzero return code and the file system should release the lock. The file + * system is also responsible to keep a corresponding posix lock when it + * grants a lock so the VFS can find out which locks are locally held and do + * the correct lock cleanup when required. + * The underlying filesystem must not drop the kernel lock or call + * ->fl_grant() before returning to the caller with a FILE_LOCK_DEFERRED + * return code. + */ +int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf) +{ + if (filp->f_op && filp->f_op->lock) + return filp->f_op->lock(filp, cmd, fl); + else + return posix_lock_file(filp, fl, conf); +} +EXPORT_SYMBOL_GPL(vfs_lock_file); + +static int do_lock_file_wait(struct file *filp, unsigned int cmd, + struct file_lock *fl) +{ + int error; + + error = security_file_lock(filp, fl->fl_type); + if (error) + return error; + + for (;;) { + error = vfs_lock_file(filp, cmd, fl, NULL); + if (error != FILE_LOCK_DEFERRED) + break; + error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); + if (!error) + continue; + + locks_delete_block(fl); + break; + } + + return error; +} + /* Apply the lock described by l to an open file descriptor. * This implements both the F_SETLK and F_SETLKW commands of fcntl(). */ @@ -1641,6 +1773,7 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, struct file_lock *file_lock = locks_alloc_lock(); struct flock flock; struct inode *inode; + struct file *f; int error; if (file_lock == NULL) @@ -1653,14 +1786,12 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, if (copy_from_user(&flock, l, sizeof(flock))) goto out; - inode = filp->f_dentry->d_inode; + inode = filp->f_path.dentry->d_inode; /* Don't allow mandatory locks on files that may be memory mapped * and shared. */ - if (IS_MANDLOCK(inode) && - (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && - mapping_writably_mapped(filp->f_mapping)) { + if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) { error = -EAGAIN; goto out; } @@ -1690,32 +1821,21 @@ again: goto out; } - error = security_file_lock(filp, file_lock->fl_type); - if (error) - goto out; - - if (filp->f_op && filp->f_op->lock != NULL) - error = filp->f_op->lock(filp, cmd, file_lock); - else { - for (;;) { - error = posix_lock_file(filp, file_lock); - if ((error != -EAGAIN) || (cmd == F_SETLK)) - break; - error = wait_event_interruptible(file_lock->fl_wait, - !file_lock->fl_next); - if (!error) - continue; - - locks_delete_block(file_lock); - break; - } - } + error = do_lock_file_wait(filp, cmd, file_lock); /* * Attempt to detect a close/fcntl race and recover by * releasing the lock that was just acquired. */ - if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) { + /* + * we need that spin_lock here - it prevents reordering between + * update of inode->i_flock and check for it done in close(). + * rcu_read_lock() wouldn't do. + */ + spin_lock(¤t->files->file_lock); + f = fcheck(fd); + spin_unlock(¤t->files->file_lock); + if (!error && f != filp && flock.l_type != F_UNLCK) { flock.l_type = F_UNLCK; goto again; } @@ -1731,7 +1851,7 @@ out: */ int fcntl_getlk64(struct file *filp, struct flock64 __user *l) { - struct file_lock *fl, cfl, file_lock; + struct file_lock file_lock; struct flock64 flock; int error; @@ -1746,27 +1866,14 @@ int fcntl_getlk64(struct file *filp, struct flock64 __user *l) if (error) goto out; - if (filp->f_op && filp->f_op->lock) { - error = filp->f_op->lock(filp, F_GETLK, &file_lock); - if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private) - file_lock.fl_ops->fl_release_private(&file_lock); - if (error < 0) - goto out; - else - fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock); - } else { - fl = (posix_test_lock(filp, &file_lock, &cfl) ? &cfl : NULL); - } - - flock.l_type = F_UNLCK; - if (fl != NULL) { - flock.l_pid = fl->fl_pid; - flock.l_start = fl->fl_start; - flock.l_len = fl->fl_end == OFFSET_MAX ? 0 : - fl->fl_end - fl->fl_start + 1; - flock.l_whence = 0; - flock.l_type = fl->fl_type; - } + error = vfs_test_lock(filp, &file_lock); + if (error) + goto out; + + flock.l_type = file_lock.fl_type; + if (file_lock.fl_type != F_UNLCK) + posix_lock_to_flock64(&flock, &file_lock); + error = -EFAULT; if (!copy_to_user(l, &flock, sizeof(flock))) error = 0; @@ -1784,6 +1891,7 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, struct file_lock *file_lock = locks_alloc_lock(); struct flock64 flock; struct inode *inode; + struct file *f; int error; if (file_lock == NULL) @@ -1796,14 +1904,12 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, if (copy_from_user(&flock, l, sizeof(flock))) goto out; - inode = filp->f_dentry->d_inode; + inode = filp->f_path.dentry->d_inode; /* Don't allow mandatory locks on files that may be memory mapped * and shared. */ - if (IS_MANDLOCK(inode) && - (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && - mapping_writably_mapped(filp->f_mapping)) { + if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) { error = -EAGAIN; goto out; } @@ -1833,32 +1939,16 @@ again: goto out; } - error = security_file_lock(filp, file_lock->fl_type); - if (error) - goto out; - - if (filp->f_op && filp->f_op->lock != NULL) - error = filp->f_op->lock(filp, cmd, file_lock); - else { - for (;;) { - error = posix_lock_file(filp, file_lock); - if ((error != -EAGAIN) || (cmd == F_SETLK64)) - break; - error = wait_event_interruptible(file_lock->fl_wait, - !file_lock->fl_next); - if (!error) - continue; - - locks_delete_block(file_lock); - break; - } - } + error = do_lock_file_wait(filp, cmd, file_lock); /* * Attempt to detect a close/fcntl race and recover by * releasing the lock that was just acquired. */ - if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) { + spin_lock(¤t->files->file_lock); + f = fcheck(fd); + spin_unlock(¤t->files->file_lock); + if (!error && f != filp && flock.l_type != F_UNLCK) { flock.l_type = F_UNLCK; goto again; } @@ -1876,19 +1966,18 @@ out: */ void locks_remove_posix(struct file *filp, fl_owner_t owner) { - struct file_lock lock, **before; + struct file_lock lock; /* * If there are no locks held on this file, we don't need to call * posix_lock_file(). Another process could be setting a lock on this * file at the same time, but we wouldn't remove that lock anyway. */ - before = &filp->f_dentry->d_inode->i_flock; - if (*before == NULL) + if (!filp->f_path.dentry->d_inode->i_flock) return; lock.fl_type = F_UNLCK; - lock.fl_flags = FL_POSIX; + lock.fl_flags = FL_POSIX | FL_CLOSE; lock.fl_start = 0; lock.fl_end = OFFSET_MAX; lock.fl_owner = owner; @@ -1897,25 +1986,8 @@ void locks_remove_posix(struct file *filp, fl_owner_t owner) lock.fl_ops = NULL; lock.fl_lmops = NULL; - if (filp->f_op && filp->f_op->lock != NULL) { - filp->f_op->lock(filp, F_SETLK, &lock); - goto out; - } + vfs_lock_file(filp, F_SETLK, &lock, NULL); - /* Can't use posix_lock_file here; we need to remove it no matter - * which pid we have. - */ - lock_kernel(); - while (*before != NULL) { - struct file_lock *fl = *before; - if (IS_POSIX(fl) && posix_same_owner(fl, &lock)) { - locks_delete_lock(before); - continue; - } - before = &fl->fl_next; - } - unlock_kernel(); -out: if (lock.fl_ops && lock.fl_ops->fl_release_private) lock.fl_ops->fl_release_private(&lock); } @@ -1927,7 +1999,7 @@ EXPORT_SYMBOL(locks_remove_posix); */ void locks_remove_flock(struct file *filp) { - struct inode * inode = filp->f_dentry->d_inode; + struct inode * inode = filp->f_path.dentry->d_inode; struct file_lock *fl; struct file_lock **before; @@ -1991,138 +2063,156 @@ posix_unblock_lock(struct file *filp, struct file_lock *waiter) EXPORT_SYMBOL(posix_unblock_lock); -static void lock_get_status(char* out, struct file_lock *fl, int id, char *pfx) +/** + * vfs_cancel_lock - file byte range unblock lock + * @filp: The file to apply the unblock to + * @fl: The lock to be unblocked + * + * Used by lock managers to cancel blocked requests + */ +int vfs_cancel_lock(struct file *filp, struct file_lock *fl) +{ + if (filp->f_op && filp->f_op->lock) + return filp->f_op->lock(filp, F_CANCELLK, fl); + return 0; +} + +EXPORT_SYMBOL_GPL(vfs_cancel_lock); + +#ifdef CONFIG_PROC_FS +#include +#include + +static void lock_get_status(struct seq_file *f, struct file_lock *fl, + int id, char *pfx) { struct inode *inode = NULL; + unsigned int fl_pid; + + if (fl->fl_nspid) + fl_pid = pid_vnr(fl->fl_nspid); + else + fl_pid = fl->fl_pid; if (fl->fl_file != NULL) - inode = fl->fl_file->f_dentry->d_inode; + inode = fl->fl_file->f_path.dentry->d_inode; - out += sprintf(out, "%d:%s ", id, pfx); + seq_printf(f, "%d:%s ", id, pfx); if (IS_POSIX(fl)) { - out += sprintf(out, "%6s %s ", + seq_printf(f, "%6s %s ", (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ", (inode == NULL) ? "*NOINODE*" : - (IS_MANDLOCK(inode) && - (inode->i_mode & (S_IXGRP | S_ISGID)) == S_ISGID) ? - "MANDATORY" : "ADVISORY "); + mandatory_lock(inode) ? "MANDATORY" : "ADVISORY "); } else if (IS_FLOCK(fl)) { if (fl->fl_type & LOCK_MAND) { - out += sprintf(out, "FLOCK MSNFS "); + seq_printf(f, "FLOCK MSNFS "); } else { - out += sprintf(out, "FLOCK ADVISORY "); + seq_printf(f, "FLOCK ADVISORY "); } } else if (IS_LEASE(fl)) { - out += sprintf(out, "LEASE "); + seq_printf(f, "LEASE "); if (fl->fl_type & F_INPROGRESS) - out += sprintf(out, "BREAKING "); + seq_printf(f, "BREAKING "); else if (fl->fl_file) - out += sprintf(out, "ACTIVE "); + seq_printf(f, "ACTIVE "); else - out += sprintf(out, "BREAKER "); + seq_printf(f, "BREAKER "); } else { - out += sprintf(out, "UNKNOWN UNKNOWN "); + seq_printf(f, "UNKNOWN UNKNOWN "); } if (fl->fl_type & LOCK_MAND) { - out += sprintf(out, "%s ", + seq_printf(f, "%s ", (fl->fl_type & LOCK_READ) ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ " : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE "); } else { - out += sprintf(out, "%s ", + seq_printf(f, "%s ", (fl->fl_type & F_INPROGRESS) ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ " : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ "); } if (inode) { #ifdef WE_CAN_BREAK_LSLK_NOW - out += sprintf(out, "%d %s:%ld ", fl->fl_pid, + seq_printf(f, "%d %s:%ld ", fl_pid, inode->i_sb->s_id, inode->i_ino); #else /* userspace relies on this representation of dev_t ;-( */ - out += sprintf(out, "%d %02x:%02x:%ld ", fl->fl_pid, + seq_printf(f, "%d %02x:%02x:%ld ", fl_pid, MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev), inode->i_ino); #endif } else { - out += sprintf(out, "%d :0 ", fl->fl_pid); + seq_printf(f, "%d :0 ", fl_pid); } if (IS_POSIX(fl)) { if (fl->fl_end == OFFSET_MAX) - out += sprintf(out, "%Ld EOF\n", fl->fl_start); + seq_printf(f, "%Ld EOF\n", fl->fl_start); else - out += sprintf(out, "%Ld %Ld\n", fl->fl_start, - fl->fl_end); + seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end); } else { - out += sprintf(out, "0 EOF\n"); + seq_printf(f, "0 EOF\n"); } } -static void move_lock_status(char **p, off_t* pos, off_t offset) +static int locks_show(struct seq_file *f, void *v) { - int len; - len = strlen(*p); - if(*pos >= offset) { - /* the complete line is valid */ - *p += len; - *pos += len; - return; - } - if(*pos+len > offset) { - /* use the second part of the line */ - int i = offset-*pos; - memmove(*p,*p+i,len-i); - *p += len-i; - *pos += len; - return; - } - /* discard the complete line */ - *pos += len; + struct file_lock *fl, *bfl; + + fl = list_entry(v, struct file_lock, fl_link); + + lock_get_status(f, fl, (long)f->private, ""); + + list_for_each_entry(bfl, &fl->fl_block, fl_block) + lock_get_status(f, bfl, (long)f->private, " ->"); + + f->private++; + return 0; } -/** - * get_locks_status - reports lock usage in /proc/locks - * @buffer: address in userspace to write into - * @start: ? - * @offset: how far we are through the buffer - * @length: how much to read - */ +static void *locks_start(struct seq_file *f, loff_t *pos) +{ + lock_kernel(); + f->private = (void *)1; + return seq_list_start(&file_lock_list, *pos); +} -int get_locks_status(char *buffer, char **start, off_t offset, int length) +static void *locks_next(struct seq_file *f, void *v, loff_t *pos) { - struct list_head *tmp; - char *q = buffer; - off_t pos = 0; - int i = 0; + return seq_list_next(v, &file_lock_list, pos); +} - lock_kernel(); - list_for_each(tmp, &file_lock_list) { - struct list_head *btmp; - struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link); - lock_get_status(q, fl, ++i, ""); - move_lock_status(&q, &pos, offset); - - if(pos >= offset+length) - goto done; - - list_for_each(btmp, &fl->fl_block) { - struct file_lock *bfl = list_entry(btmp, - struct file_lock, fl_block); - lock_get_status(q, bfl, i, " ->"); - move_lock_status(&q, &pos, offset); - - if(pos >= offset+length) - goto done; - } - } -done: +static void locks_stop(struct seq_file *f, void *v) +{ unlock_kernel(); - *start = buffer; - if(q-buffer < length) - return (q-buffer); - return length; } +static const struct seq_operations locks_seq_operations = { + .start = locks_start, + .next = locks_next, + .stop = locks_stop, + .show = locks_show, +}; + +static int locks_open(struct inode *inode, struct file *filp) +{ + return seq_open(filp, &locks_seq_operations); +} + +static const struct file_operations proc_locks_operations = { + .open = locks_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int __init proc_locks_init(void) +{ + proc_create("locks", 0, NULL, &proc_locks_operations); + return 0; +} +module_init(proc_locks_init); +#endif + /** * lock_may_read - checks that the region is free of locks * @inode: the inode that is being read @@ -2201,63 +2291,11 @@ int lock_may_write(struct inode *inode, loff_t start, unsigned long len) EXPORT_SYMBOL(lock_may_write); -static inline void __steal_locks(struct file *file, fl_owner_t from) -{ - struct inode *inode = file->f_dentry->d_inode; - struct file_lock *fl = inode->i_flock; - - while (fl) { - if (fl->fl_file == file && fl->fl_owner == from) - fl->fl_owner = current->files; - fl = fl->fl_next; - } -} - -/* When getting ready for executing a binary, we make sure that current - * has a files_struct on its own. Before dropping the old files_struct, - * we take over ownership of all locks for all file descriptors we own. - * Note that we may accidentally steal a lock for a file that a sibling - * has created since the unshare_files() call. - */ -void steal_locks(fl_owner_t from) -{ - struct files_struct *files = current->files; - int i, j; - struct fdtable *fdt; - - if (from == files) - return; - - lock_kernel(); - j = 0; - rcu_read_lock(); - fdt = files_fdtable(files); - for (;;) { - unsigned long set; - i = j * __NFDBITS; - if (i >= fdt->max_fdset || i >= fdt->max_fds) - break; - set = fdt->open_fds->fds_bits[j++]; - while (set) { - if (set & 1) { - struct file *file = fdt->fd[i]; - if (file) - __steal_locks(file, from); - } - i++; - set >>= 1; - } - } - rcu_read_unlock(); - unlock_kernel(); -} -EXPORT_SYMBOL(steal_locks); - static int __init filelock_init(void) { filelock_cache = kmem_cache_create("file_lock_cache", sizeof(struct file_lock), 0, SLAB_PANIC, - init_once, NULL); + init_once); return 0; }