X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Flocks.c;h=a8794f233bc9c38d45c47f69d03652f7f6f8d533;hb=1f11abc966b82b9fd0c834707486ef301b2f398d;hp=c3eecb895acf1346944d2dd3af217cf11c9c0f5a;hpb=b533184fc353d4a2d07929b4ac424a6f1bf5a3b9;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/locks.c b/fs/locks.c index c3eecb8..a8794f2 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -116,6 +116,7 @@ #include #include +#include #include #include #include @@ -125,8 +126,8 @@ #include #include #include +#include -#include #include #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX) @@ -150,7 +151,7 @@ static struct file_lock *locks_alloc_lock(void) return kmem_cache_alloc(filelock_cache, GFP_KERNEL); } -static void locks_release_private(struct file_lock *fl) +void locks_release_private(struct file_lock *fl) { if (fl->fl_ops) { if (fl->fl_ops->fl_release_private) @@ -164,6 +165,7 @@ static void locks_release_private(struct file_lock *fl) } } +EXPORT_SYMBOL_GPL(locks_release_private); /* Free a lock which is not in use. */ static void locks_free_lock(struct file_lock *fl) @@ -185,6 +187,7 @@ void locks_init_lock(struct file_lock *fl) fl->fl_fasync = NULL; fl->fl_owner = NULL; fl->fl_pid = 0; + fl->fl_nspid = NULL; fl->fl_file = NULL; fl->fl_flags = 0; fl->fl_type = 0; @@ -199,7 +202,7 @@ EXPORT_SYMBOL(locks_init_lock); * Initialises the fields of the file lock which are invariant for * free file_locks. */ -static void init_once(struct kmem_cache *cache, void *foo) +static void init_once(void *foo) { struct file_lock *lock = (struct file_lock *) foo; @@ -223,7 +226,7 @@ static void locks_copy_private(struct file_lock *new, struct file_lock *fl) /* * Initialize a new lock from an existing file_lock structure. */ -static void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl) +void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl) { new->fl_owner = fl->fl_owner; new->fl_pid = fl->fl_pid; @@ -235,6 +238,7 @@ static void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl) new->fl_ops = NULL; new->fl_lmops = NULL; } +EXPORT_SYMBOL(__locks_copy_lock); void locks_copy_lock(struct file_lock *new, struct file_lock *fl) { @@ -430,7 +434,7 @@ static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try) return fl->fl_file == try->fl_file; } -static struct lock_manager_operations lease_manager_ops = { +static const struct lock_manager_operations lease_manager_ops = { .fl_break = lease_break_callback, .fl_release_private = lease_release_private_callback, .fl_mylease = lease_mylease_callback, @@ -553,12 +557,11 @@ static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl) { list_add(&fl->fl_link, &file_lock_list); + fl->fl_nspid = get_pid(task_tgid(current)); + /* insert into file's list */ fl->fl_next = *pos; *pos = fl; - - if (fl->fl_ops && fl->fl_ops->fl_insert) - fl->fl_ops->fl_insert(fl); } /* @@ -581,8 +584,10 @@ static void locks_delete_lock(struct file_lock **thisfl_p) fl->fl_fasync = NULL; } - if (fl->fl_ops && fl->fl_ops->fl_remove) - fl->fl_ops->fl_remove(fl); + if (fl->fl_nspid) { + put_pid(fl->fl_nspid); + fl->fl_nspid = NULL; + } locks_wake_up_blocks(fl); locks_free_lock(fl); @@ -634,33 +639,6 @@ static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *s return (locks_conflict(caller_fl, sys_fl)); } -static int interruptible_sleep_on_locked(wait_queue_head_t *fl_wait, int timeout) -{ - int result = 0; - DECLARE_WAITQUEUE(wait, current); - - __set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue(fl_wait, &wait); - if (timeout == 0) - schedule(); - else - result = schedule_timeout(timeout); - if (signal_pending(current)) - result = -ERESTARTSYS; - remove_wait_queue(fl_wait, &wait); - __set_current_state(TASK_RUNNING); - return result; -} - -static int locks_block_on_timeout(struct file_lock *blocker, struct file_lock *waiter, int time) -{ - int result; - locks_insert_block(blocker, waiter); - result = interruptible_sleep_on_locked(&waiter->fl_wait, time); - __locks_delete_block(waiter); - return result; -} - void posix_test_lock(struct file *filp, struct file_lock *fl) { @@ -673,14 +651,15 @@ posix_test_lock(struct file *filp, struct file_lock *fl) if (posix_locks_conflict(fl, cfl)) break; } - if (cfl) + if (cfl) { __locks_copy_lock(fl, cfl); - else + if (cfl->fl_nspid) + fl->fl_pid = pid_vnr(cfl->fl_nspid); + } else fl->fl_type = F_UNLCK; unlock_kernel(); return; } - EXPORT_SYMBOL(posix_test_lock); /* @@ -801,8 +780,10 @@ find_conflict: if (!flock_locks_conflict(request, fl)) continue; error = -EAGAIN; - if (request->fl_flags & FL_SLEEP) - locks_insert_block(fl, request); + if (!(request->fl_flags & FL_SLEEP)) + goto out; + error = FILE_LOCK_DEFERRED; + locks_insert_block(fl, request); goto out; } if (request->fl_flags & FL_ACCESS) @@ -851,14 +832,14 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str if (!posix_locks_conflict(request, fl)) continue; if (conflock) - locks_copy_lock(conflock, fl); + __locks_copy_lock(conflock, fl); error = -EAGAIN; if (!(request->fl_flags & FL_SLEEP)) goto out; error = -EDEADLK; if (posix_locks_deadlock(request, fl)) goto out; - error = -EAGAIN; + error = FILE_LOCK_DEFERRED; locks_insert_block(fl, request); goto out; } @@ -1057,7 +1038,7 @@ int posix_lock_file_wait(struct file *filp, struct file_lock *fl) might_sleep (); for (;;) { error = posix_lock_file(filp, fl, NULL); - if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) + if (error != FILE_LOCK_DEFERRED) break; error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); if (!error) @@ -1129,9 +1110,7 @@ int locks_mandatory_area(int read_write, struct inode *inode, for (;;) { error = __posix_lock_file(inode, &fl, NULL); - if (error != -EAGAIN) - break; - if (!(fl.fl_flags & FL_SLEEP)) + if (error != FILE_LOCK_DEFERRED) break; error = wait_event_interruptible(fl.fl_wait, !fl.fl_next); if (!error) { @@ -1266,7 +1245,10 @@ restart: if (break_time == 0) break_time++; } - error = locks_block_on_timeout(flock, new_fl, break_time); + locks_insert_block(flock, new_fl); + error = wait_event_interruptible_timeout(new_fl->fl_wait, + !new_fl->fl_next, break_time); + __locks_delete_block(new_fl); if (error >= 0) { if (error == 0) time_out_leases(inode); @@ -1289,13 +1271,13 @@ out: EXPORT_SYMBOL(__break_lease); /** - * lease_get_mtime + * lease_get_mtime - get the last modified time of an inode * @inode: the inode * @time: pointer to a timespec which will contain the last modified time * * This is to force NFS clients to flush their caches for files with * exclusive leases. The justification is that if someone has an - * exclusive lease, then they could be modifiying it. + * exclusive lease, then they could be modifying it. */ void lease_get_mtime(struct inode *inode, struct timespec *time) { @@ -1368,7 +1350,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp) struct inode *inode = dentry->d_inode; int error, rdlease_count = 0, wrlease_count = 0; - if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE)) + if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE)) return -EACCES; if (!S_ISREG(inode->i_mode)) return -EINVAL; @@ -1382,18 +1364,20 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp) lease = *flp; - error = -EAGAIN; - if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) - goto out; - if ((arg == F_WRLCK) - && ((atomic_read(&dentry->d_count) > 1) - || (atomic_read(&inode->i_count) > 1))) - goto out; + if (arg != F_UNLCK) { + error = -ENOMEM; + new_fl = locks_alloc_lock(); + if (new_fl == NULL) + goto out; - error = -ENOMEM; - new_fl = locks_alloc_lock(); - if (new_fl == NULL) - goto out; + error = -EAGAIN; + if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) + goto out; + if ((arg == F_WRLCK) + && ((atomic_read(&dentry->d_count) > 1) + || (atomic_read(&inode->i_count) > 1))) + goto out; + } /* * At this point, we know that if there is an exclusive @@ -1419,6 +1403,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp) rdlease_count++; } + error = -EAGAIN; if ((arg == F_RDLCK && (wrlease_count > 0)) || (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0))) goto out; @@ -1505,8 +1490,7 @@ EXPORT_SYMBOL_GPL(vfs_setlease); int fcntl_setlease(unsigned int fd, struct file *filp, long arg) { struct file_lock fl, *flp = &fl; - struct dentry *dentry = filp->f_path.dentry; - struct inode *inode = dentry->d_inode; + struct inode *inode = filp->f_path.dentry->d_inode; int error; locks_init_lock(&fl); @@ -1548,7 +1532,7 @@ int flock_lock_file_wait(struct file *filp, struct file_lock *fl) might_sleep(); for (;;) { error = flock_lock_file(filp, fl); - if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) + if (error != FILE_LOCK_DEFERRED) break; error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); if (!error) @@ -1581,7 +1565,7 @@ EXPORT_SYMBOL(flock_lock_file_wait); * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other * processes read and write access respectively. */ -asmlinkage long sys_flock(unsigned int fd, unsigned int cmd) +SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd) { struct file *filp; struct file_lock *lock; @@ -1597,7 +1581,8 @@ asmlinkage long sys_flock(unsigned int fd, unsigned int cmd) cmd &= ~LOCK_NB; unlock = (cmd == LOCK_UN); - if (!unlock && !(cmd & LOCK_MAND) && !(filp->f_mode & 3)) + if (!unlock && !(cmd & LOCK_MAND) && + !(filp->f_mode & (FMODE_READ|FMODE_WRITE))) goto out_putf; error = flock_make_lock(filp, &lock, cmd); @@ -1606,7 +1591,7 @@ asmlinkage long sys_flock(unsigned int fd, unsigned int cmd) if (can_sleep) lock->fl_flags |= FL_SLEEP; - error = security_file_lock(filp, cmd); + error = security_file_lock(filp, lock->fl_type); if (error) goto out_free; @@ -1733,17 +1718,17 @@ out: * fl_grant is set. Callers expecting ->lock() to return asynchronously * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if) * the request is for a blocking lock. When ->lock() does return asynchronously, - * it must return -EINPROGRESS, and call ->fl_grant() when the lock + * it must return FILE_LOCK_DEFERRED, and call ->fl_grant() when the lock * request completes. * If the request is for non-blocking lock the file system should return - * -EINPROGRESS then try to get the lock and call the callback routine with - * the result. If the request timed out the callback routine will return a + * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine + * with the result. If the request timed out the callback routine will return a * nonzero return code and the file system should release the lock. The file * system is also responsible to keep a corresponding posix lock when it * grants a lock so the VFS can find out which locks are locally held and do * the correct lock cleanup when required. * The underlying filesystem must not drop the kernel lock or call - * ->fl_grant() before returning to the caller with a -EINPROGRESS + * ->fl_grant() before returning to the caller with a FILE_LOCK_DEFERRED * return code. */ int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf) @@ -1755,6 +1740,30 @@ int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, str } EXPORT_SYMBOL_GPL(vfs_lock_file); +static int do_lock_file_wait(struct file *filp, unsigned int cmd, + struct file_lock *fl) +{ + int error; + + error = security_file_lock(filp, fl->fl_type); + if (error) + return error; + + for (;;) { + error = vfs_lock_file(filp, cmd, fl, NULL); + if (error != FILE_LOCK_DEFERRED) + break; + error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); + if (!error) + continue; + + locks_delete_block(fl); + break; + } + + return error; +} + /* Apply the lock described by l to an open file descriptor. * This implements both the F_SETLK and F_SETLKW commands of fcntl(). */ @@ -1764,6 +1773,7 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, struct file_lock *file_lock = locks_alloc_lock(); struct flock flock; struct inode *inode; + struct file *f; int error; if (file_lock == NULL) @@ -1811,28 +1821,21 @@ again: goto out; } - error = security_file_lock(filp, file_lock->fl_type); - if (error) - goto out; - - for (;;) { - error = vfs_lock_file(filp, cmd, file_lock, NULL); - if (error != -EAGAIN || cmd == F_SETLK) - break; - error = wait_event_interruptible(file_lock->fl_wait, - !file_lock->fl_next); - if (!error) - continue; - - locks_delete_block(file_lock); - break; - } + error = do_lock_file_wait(filp, cmd, file_lock); /* * Attempt to detect a close/fcntl race and recover by * releasing the lock that was just acquired. */ - if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) { + /* + * we need that spin_lock here - it prevents reordering between + * update of inode->i_flock and check for it done in close(). + * rcu_read_lock() wouldn't do. + */ + spin_lock(¤t->files->file_lock); + f = fcheck(fd); + spin_unlock(¤t->files->file_lock); + if (!error && f != filp && flock.l_type != F_UNLCK) { flock.l_type = F_UNLCK; goto again; } @@ -1888,6 +1891,7 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, struct file_lock *file_lock = locks_alloc_lock(); struct flock64 flock; struct inode *inode; + struct file *f; int error; if (file_lock == NULL) @@ -1935,28 +1939,16 @@ again: goto out; } - error = security_file_lock(filp, file_lock->fl_type); - if (error) - goto out; - - for (;;) { - error = vfs_lock_file(filp, cmd, file_lock, NULL); - if (error != -EAGAIN || cmd == F_SETLK64) - break; - error = wait_event_interruptible(file_lock->fl_wait, - !file_lock->fl_next); - if (!error) - continue; - - locks_delete_block(file_lock); - break; - } + error = do_lock_file_wait(filp, cmd, file_lock); /* * Attempt to detect a close/fcntl race and recover by * releasing the lock that was just acquired. */ - if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) { + spin_lock(¤t->files->file_lock); + f = fcheck(fd); + spin_unlock(¤t->files->file_lock); + if (!error && f != filp && flock.l_type != F_UNLCK) { flock.l_type = F_UNLCK; goto again; } @@ -2088,12 +2080,19 @@ int vfs_cancel_lock(struct file *filp, struct file_lock *fl) EXPORT_SYMBOL_GPL(vfs_cancel_lock); #ifdef CONFIG_PROC_FS +#include #include static void lock_get_status(struct seq_file *f, struct file_lock *fl, int id, char *pfx) { struct inode *inode = NULL; + unsigned int fl_pid; + + if (fl->fl_nspid) + fl_pid = pid_vnr(fl->fl_nspid); + else + fl_pid = fl->fl_pid; if (fl->fl_file != NULL) inode = fl->fl_file->f_path.dentry->d_inode; @@ -2134,16 +2133,16 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl, } if (inode) { #ifdef WE_CAN_BREAK_LSLK_NOW - seq_printf(f, "%d %s:%ld ", fl->fl_pid, + seq_printf(f, "%d %s:%ld ", fl_pid, inode->i_sb->s_id, inode->i_ino); #else /* userspace relies on this representation of dev_t ;-( */ - seq_printf(f, "%d %02x:%02x:%ld ", fl->fl_pid, + seq_printf(f, "%d %02x:%02x:%ld ", fl_pid, MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev), inode->i_ino); #endif } else { - seq_printf(f, "%d :0 ", fl->fl_pid); + seq_printf(f, "%d :0 ", fl_pid); } if (IS_POSIX(fl)) { if (fl->fl_end == OFFSET_MAX) @@ -2187,12 +2186,31 @@ static void locks_stop(struct seq_file *f, void *v) unlock_kernel(); } -struct seq_operations locks_seq_operations = { +static const struct seq_operations locks_seq_operations = { .start = locks_start, .next = locks_next, .stop = locks_stop, .show = locks_show, }; + +static int locks_open(struct inode *inode, struct file *filp) +{ + return seq_open(filp, &locks_seq_operations); +} + +static const struct file_operations proc_locks_operations = { + .open = locks_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int __init proc_locks_init(void) +{ + proc_create("locks", 0, NULL, &proc_locks_operations); + return 0; +} +module_init(proc_locks_init); #endif /**