Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes...
[safe/jmp/linux-2.6] / mm / filemap.c
index 4e182a9..876bc59 100644 (file)
 /*
  * Remove a page from the page cache and free it. Caller has to make
  * sure the page is locked and that nobody else uses it - or that usage
- * is safe.  The caller must hold a write_lock on the mapping's tree_lock.
+ * is safe.  The caller must hold the mapping's tree_lock.
  */
 void __remove_from_page_cache(struct page *page)
 {
@@ -141,9 +141,9 @@ void remove_from_page_cache(struct page *page)
 
        BUG_ON(!PageLocked(page));
 
-       write_lock_irq(&mapping->tree_lock);
+       spin_lock_irq(&mapping->tree_lock);
        __remove_from_page_cache(page);
-       write_unlock_irq(&mapping->tree_lock);
+       spin_unlock_irq(&mapping->tree_lock);
 }
 
 static int sync_page(void *word)
@@ -469,7 +469,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
                page->mapping = mapping;
                page->index = offset;
 
-               write_lock_irq(&mapping->tree_lock);
+               spin_lock_irq(&mapping->tree_lock);
                error = radix_tree_insert(&mapping->page_tree, offset, page);
                if (likely(!error)) {
                        mapping->nrpages++;
@@ -480,7 +480,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
                        page_cache_release(page);
                }
 
-               write_unlock_irq(&mapping->tree_lock);
+               spin_unlock_irq(&mapping->tree_lock);
                radix_tree_preload_end();
        } else
                mem_cgroup_uncharge_cache_page(page);
@@ -558,14 +558,14 @@ EXPORT_SYMBOL(wait_on_page_bit);
  * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
  *
  * The first mb is necessary to safely close the critical section opened by the
- * TestSetPageLocked(), the second mb is necessary to enforce ordering between
- * the clear_bit and the read of the waitqueue (to avoid SMP races with a
- * parallel wait_on_page_locked()).
+ * test_and_set_bit() to lock the page; the second mb is necessary to enforce
+ * ordering between the clear_bit and the read of the waitqueue (to avoid SMP
+ * races with a parallel wait_on_page_locked()).
  */
 void unlock_page(struct page *page)
 {
        smp_mb__before_clear_bit();
-       if (!TestClearPageLocked(page))
+       if (!test_and_clear_bit(PG_locked, &page->flags))
                BUG();
        smp_mb__after_clear_bit(); 
        wake_up_page(page, PG_locked);
@@ -637,15 +637,35 @@ void __lock_page_nosync(struct page *page)
  * Is there a pagecache struct page at the given (mapping, offset) tuple?
  * If yes, increment its refcount and return it; if no, return NULL.
  */
-struct page * find_get_page(struct address_space *mapping, pgoff_t offset)
+struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
 {
+       void **pagep;
        struct page *page;
 
-       read_lock_irq(&mapping->tree_lock);
-       page = radix_tree_lookup(&mapping->page_tree, offset);
-       if (page)
-               page_cache_get(page);
-       read_unlock_irq(&mapping->tree_lock);
+       rcu_read_lock();
+repeat:
+       page = NULL;
+       pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
+       if (pagep) {
+               page = radix_tree_deref_slot(pagep);
+               if (unlikely(!page || page == RADIX_TREE_RETRY))
+                       goto repeat;
+
+               if (!page_cache_get_speculative(page))
+                       goto repeat;
+
+               /*
+                * Has the page moved?
+                * This is part of the lockless pagecache protocol. See
+                * include/linux/pagemap.h for details.
+                */
+               if (unlikely(page != *pagep)) {
+                       page_cache_release(page);
+                       goto repeat;
+               }
+       }
+       rcu_read_unlock();
+
        return page;
 }
 EXPORT_SYMBOL(find_get_page);
@@ -660,32 +680,22 @@ EXPORT_SYMBOL(find_get_page);
  *
  * Returns zero if the page was not present. find_lock_page() may sleep.
  */
-struct page *find_lock_page(struct address_space *mapping,
-                               pgoff_t offset)
+struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
 {
        struct page *page;
 
 repeat:
-       read_lock_irq(&mapping->tree_lock);
-       page = radix_tree_lookup(&mapping->page_tree, offset);
+       page = find_get_page(mapping, offset);
        if (page) {
-               page_cache_get(page);
-               if (TestSetPageLocked(page)) {
-                       read_unlock_irq(&mapping->tree_lock);
-                       __lock_page(page);
-
-                       /* Has the page been truncated while we slept? */
-                       if (unlikely(page->mapping != mapping)) {
-                               unlock_page(page);
-                               page_cache_release(page);
-                               goto repeat;
-                       }
-                       VM_BUG_ON(page->index != offset);
-                       goto out;
+               lock_page(page);
+               /* Has the page been truncated? */
+               if (unlikely(page->mapping != mapping)) {
+                       unlock_page(page);
+                       page_cache_release(page);
+                       goto repeat;
                }
+               VM_BUG_ON(page->index != offset);
        }
-       read_unlock_irq(&mapping->tree_lock);
-out:
        return page;
 }
 EXPORT_SYMBOL(find_lock_page);
@@ -751,13 +761,39 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
 {
        unsigned int i;
        unsigned int ret;
+       unsigned int nr_found;
+
+       rcu_read_lock();
+restart:
+       nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
+                               (void ***)pages, start, nr_pages);
+       ret = 0;
+       for (i = 0; i < nr_found; i++) {
+               struct page *page;
+repeat:
+               page = radix_tree_deref_slot((void **)pages[i]);
+               if (unlikely(!page))
+                       continue;
+               /*
+                * this can only trigger if nr_found == 1, making livelock
+                * a non issue.
+                */
+               if (unlikely(page == RADIX_TREE_RETRY))
+                       goto restart;
+
+               if (!page_cache_get_speculative(page))
+                       goto repeat;
+
+               /* Has the page moved? */
+               if (unlikely(page != *((void **)pages[i]))) {
+                       page_cache_release(page);
+                       goto repeat;
+               }
 
-       read_lock_irq(&mapping->tree_lock);
-       ret = radix_tree_gang_lookup(&mapping->page_tree,
-                               (void **)pages, start, nr_pages);
-       for (i = 0; i < ret; i++)
-               page_cache_get(pages[i]);
-       read_unlock_irq(&mapping->tree_lock);
+               pages[ret] = page;
+               ret++;
+       }
+       rcu_read_unlock();
        return ret;
 }
 
@@ -778,19 +814,44 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
 {
        unsigned int i;
        unsigned int ret;
+       unsigned int nr_found;
+
+       rcu_read_lock();
+restart:
+       nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
+                               (void ***)pages, index, nr_pages);
+       ret = 0;
+       for (i = 0; i < nr_found; i++) {
+               struct page *page;
+repeat:
+               page = radix_tree_deref_slot((void **)pages[i]);
+               if (unlikely(!page))
+                       continue;
+               /*
+                * this can only trigger if nr_found == 1, making livelock
+                * a non issue.
+                */
+               if (unlikely(page == RADIX_TREE_RETRY))
+                       goto restart;
 
-       read_lock_irq(&mapping->tree_lock);
-       ret = radix_tree_gang_lookup(&mapping->page_tree,
-                               (void **)pages, index, nr_pages);
-       for (i = 0; i < ret; i++) {
-               if (pages[i]->mapping == NULL || pages[i]->index != index)
+               if (page->mapping == NULL || page->index != index)
                        break;
 
-               page_cache_get(pages[i]);
+               if (!page_cache_get_speculative(page))
+                       goto repeat;
+
+               /* Has the page moved? */
+               if (unlikely(page != *((void **)pages[i]))) {
+                       page_cache_release(page);
+                       goto repeat;
+               }
+
+               pages[ret] = page;
+               ret++;
                index++;
        }
-       read_unlock_irq(&mapping->tree_lock);
-       return i;
+       rcu_read_unlock();
+       return ret;
 }
 EXPORT_SYMBOL(find_get_pages_contig);
 
@@ -810,15 +871,43 @@ unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
 {
        unsigned int i;
        unsigned int ret;
+       unsigned int nr_found;
+
+       rcu_read_lock();
+restart:
+       nr_found = radix_tree_gang_lookup_tag_slot(&mapping->page_tree,
+                               (void ***)pages, *index, nr_pages, tag);
+       ret = 0;
+       for (i = 0; i < nr_found; i++) {
+               struct page *page;
+repeat:
+               page = radix_tree_deref_slot((void **)pages[i]);
+               if (unlikely(!page))
+                       continue;
+               /*
+                * this can only trigger if nr_found == 1, making livelock
+                * a non issue.
+                */
+               if (unlikely(page == RADIX_TREE_RETRY))
+                       goto restart;
+
+               if (!page_cache_get_speculative(page))
+                       goto repeat;
+
+               /* Has the page moved? */
+               if (unlikely(page != *((void **)pages[i]))) {
+                       page_cache_release(page);
+                       goto repeat;
+               }
+
+               pages[ret] = page;
+               ret++;
+       }
+       rcu_read_unlock();
 
-       read_lock_irq(&mapping->tree_lock);
-       ret = radix_tree_gang_lookup_tag(&mapping->page_tree,
-                               (void **)pages, *index, nr_pages, tag);
-       for (i = 0; i < ret; i++)
-               page_cache_get(pages[i]);
        if (ret)
                *index = pages[ret - 1]->index + 1;
-       read_unlock_irq(&mapping->tree_lock);
+
        return ret;
 }
 EXPORT_SYMBOL(find_get_pages_tag);
@@ -842,7 +931,7 @@ grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
        struct page *page = find_get_page(mapping, index);
 
        if (page) {
-               if (!TestSetPageLocked(page))
+               if (trylock_page(page))
                        return page;
                page_cache_release(page);
                return NULL;
@@ -934,8 +1023,17 @@ find_page:
                                        ra, filp, page,
                                        index, last_index - index);
                }
-               if (!PageUptodate(page))
-                       goto page_not_up_to_date;
+               if (!PageUptodate(page)) {
+                       if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
+                                       !mapping->a_ops->is_partially_uptodate)
+                               goto page_not_up_to_date;
+                       if (!trylock_page(page))
+                               goto page_not_up_to_date;
+                       if (!mapping->a_ops->is_partially_uptodate(page,
+                                                               desc, offset))
+                               goto page_not_up_to_date_locked;
+                       unlock_page(page);
+               }
 page_ok:
                /*
                 * i_size must be checked after we know the page is Uptodate.
@@ -1005,6 +1103,7 @@ page_not_up_to_date:
                if (lock_page_killable(page))
                        goto readpage_eio;
 
+page_not_up_to_date_locked:
                /* Did it get truncated before we got the lock? */
                if (!page->mapping) {
                        unlock_page(page);
@@ -1669,8 +1768,9 @@ static int __remove_suid(struct dentry *dentry, int kill)
        return notify_change(dentry, &newattrs);
 }
 
-int remove_suid(struct dentry *dentry)
+int file_remove_suid(struct file *file)
 {
+       struct dentry *dentry = file->f_path.dentry;
        int killsuid = should_remove_suid(dentry);
        int killpriv = security_inode_need_killpriv(dentry);
        int error = 0;
@@ -1684,7 +1784,7 @@ int remove_suid(struct dentry *dentry)
 
        return error;
 }
-EXPORT_SYMBOL(remove_suid);
+EXPORT_SYMBOL(file_remove_suid);
 
 static size_t __iovec_copy_from_user_inatomic(char *vaddr,
                        const struct iovec *iov, size_t base, size_t bytes)
@@ -1779,7 +1879,7 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes)
                 * The !iov->iov_len check ensures we skip over unlikely
                 * zero-length segments (without overruning the iovec).
                 */
-               while (bytes || unlikely(!iov->iov_len && i->count)) {
+               while (bytes || unlikely(i->count && !iov->iov_len)) {
                        int copy;
 
                        copy = min(bytes, iov->iov_len - base);
@@ -2029,13 +2129,20 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
         * After a write we want buffered reads to be sure to go to disk to get
         * the new data.  We invalidate clean cached page from the region we're
         * about to write.  We do this *before* the write so that we can return
-        * -EIO without clobbering -EIOCBQUEUED from ->direct_IO().
+        * without clobbering -EIOCBQUEUED from ->direct_IO().
         */
        if (mapping->nrpages) {
                written = invalidate_inode_pages2_range(mapping,
                                        pos >> PAGE_CACHE_SHIFT, end);
-               if (written)
+               /*
+                * If a page can not be invalidated, return 0 to fall back
+                * to buffered write.
+                */
+               if (written) {
+                       if (written == -EBUSY)
+                               return 0;
                        goto out;
+               }
        }
 
        written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
@@ -2440,7 +2547,7 @@ __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
        if (count == 0)
                goto out;
 
-       err = remove_suid(file->f_path.dentry);
+       err = file_remove_suid(file);
        if (err)
                goto out;