shmem_getpage return page locked
authorHugh Dickins <hugh@veritas.com>
Tue, 5 Feb 2008 06:28:44 +0000 (22:28 -0800)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Tue, 5 Feb 2008 17:44:15 +0000 (09:44 -0800)
In the new aops, write_begin is supposed to return the page locked: though
I've seen no ill effects, that's been overlooked in the case of
shmem_write_begin, and should be fixed.  Then shmem_write_end must unlock the
page: do so _after_ updating i_size, as we found to be important in other
filesystems (though since shmem pages don't go the usual writeback route, they
never suffered from that corruption).

For shmem_write_begin to return the page locked, we need shmem_getpage to
return the page locked in SGP_WRITE case as well as SGP_CACHE case: let's
simplify the interface and return it locked even when SGP_READ.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/shmem.c

index 20cefe1..43d0719 100644 (file)
@@ -729,6 +729,8 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
                                (void) shmem_getpage(inode,
                                        attr->ia_size>>PAGE_CACHE_SHIFT,
                                                &page, SGP_READ, NULL);
+                               if (page)
+                                       unlock_page(page);
                        }
                        /*
                         * Reset SHMEM_PAGEIN flag so that shmem_truncate can
@@ -1286,12 +1288,7 @@ repeat:
                SetPageUptodate(filepage);
        }
 done:
-       if (*pagep != filepage) {
-               *pagep = filepage;
-               if (sgp != SGP_CACHE)
-                       unlock_page(filepage);
-
-       }
+       *pagep = filepage;
        return 0;
 
 failed:
@@ -1469,12 +1466,13 @@ shmem_write_end(struct file *file, struct address_space *mapping,
 {
        struct inode *inode = mapping->host;
 
+       if (pos + copied > inode->i_size)
+               i_size_write(inode, pos + copied);
+
+       unlock_page(page);
        set_page_dirty(page);
        page_cache_release(page);
 
-       if (pos+copied > inode->i_size)
-               i_size_write(inode, pos+copied);
-
        return copied;
 }
 
@@ -1529,6 +1527,7 @@ shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t
                if (err)
                        break;
 
+               unlock_page(page);
                left = bytes;
                if (PageHighMem(page)) {
                        volatile unsigned char dummy;
@@ -1610,6 +1609,8 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
                                desc->error = 0;
                        break;
                }
+               if (page)
+                       unlock_page(page);
 
                /*
                 * We must evaluate after, since reads (unlike writes)
@@ -1899,6 +1900,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
                        iput(inode);
                        return error;
                }
+               unlock_page(page);
                inode->i_op = &shmem_symlink_inode_operations;
                kaddr = kmap_atomic(page, KM_USER0);
                memcpy(kaddr, symname, len);
@@ -1926,6 +1928,8 @@ static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
        struct page *page = NULL;
        int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
        nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
+       if (page)
+               unlock_page(page);
        return page;
 }