locks: allow lockd to process blocked locks during grace period
[safe/jmp/linux-2.6] / mm / truncate.c
index 9173ab5..6650c1d 100644 (file)
@@ -8,29 +8,88 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/backing-dev.h>
 #include <linux/mm.h>
+#include <linux/swap.h>
 #include <linux/module.h>
 #include <linux/pagemap.h>
+#include <linux/highmem.h>
 #include <linux/pagevec.h>
+#include <linux/task_io_accounting_ops.h>
 #include <linux/buffer_head.h> /* grr. try_to_release_page,
                                   do_invalidatepage */
 
 
+/**
+ * do_invalidatepage - invalidate part or all of a page
+ * @page: the page which is affected
+ * @offset: the index of the truncation point
+ *
+ * do_invalidatepage() is called when all or part of the page has become
+ * invalidated by a truncate operation.
+ *
+ * do_invalidatepage() does not have to release all buffers, but it must
+ * ensure that no dirty buffer is left outside @offset and that no I/O
+ * is underway against any of the blocks which are outside the truncation
+ * point.  Because the caller is about to free (and possibly reuse) those
+ * blocks on-disk.
+ */
+void do_invalidatepage(struct page *page, unsigned long offset)
+{
+       void (*invalidatepage)(struct page *, unsigned long);
+       invalidatepage = page->mapping->a_ops->invalidatepage;
+#ifdef CONFIG_BLOCK
+       if (!invalidatepage)
+               invalidatepage = block_invalidatepage;
+#endif
+       if (invalidatepage)
+               (*invalidatepage)(page, offset);
+}
+
 static inline void truncate_partial_page(struct page *page, unsigned partial)
 {
-       memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
+       zero_user_segment(page, partial, PAGE_CACHE_SIZE);
        if (PagePrivate(page))
                do_invalidatepage(page, partial);
 }
 
 /*
+ * This cancels just the dirty bit on the kernel page itself, it
+ * does NOT actually remove dirty bits on any mmap's that may be
+ * around. It also leaves the page tagged dirty, so any sync
+ * activity will still find it on the dirty lists, and in particular,
+ * clear_page_dirty_for_io() will still look at the dirty bits in
+ * the VM.
+ *
+ * Doing this should *normally* only ever be done when a page
+ * is truncated, and is not actually mapped anywhere at all. However,
+ * fs/buffer.c does this when it notices that somebody has cleaned
+ * out all the buffers on a page without actually doing it through
+ * the VM. Can you say "ext3 is horribly ugly"? Tought you could.
+ */
+void cancel_dirty_page(struct page *page, unsigned int account_size)
+{
+       if (TestClearPageDirty(page)) {
+               struct address_space *mapping = page->mapping;
+               if (mapping && mapping_cap_account_dirty(mapping)) {
+                       dec_zone_page_state(page, NR_FILE_DIRTY);
+                       dec_bdi_stat(mapping->backing_dev_info,
+                                       BDI_RECLAIMABLE);
+                       if (account_size)
+                               task_io_account_cancelled_write(account_size);
+               }
+       }
+}
+EXPORT_SYMBOL(cancel_dirty_page);
+
+/*
  * If truncate cannot remove the fs-private metadata from the page, the page
- * becomes anonymous.  It will be left on the LRU and may even be mapped into
- * user pagetables if we're racing with filemap_nopage().
+ * becomes orphaned.  It will be left on the LRU and may even be mapped into
+ * user pagetables if we're racing with filemap_fault().
  *
  * We need to bale out if page->mapping is no longer equal to the original
  * mapping.  This happens a) when the VM reclaimed the page while we waited on
- * its lock, b) when a concurrent invalidate_inode_pages got there first and
+ * its lock, b) when a concurrent invalidate_mapping_pages got there first and
  * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
  */
 static void
@@ -42,52 +101,46 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
        if (PagePrivate(page))
                do_invalidatepage(page, 0);
 
-       clear_page_dirty(page);
-       ClearPageUptodate(page);
-       ClearPageMappedToDisk(page);
+       cancel_dirty_page(page, PAGE_CACHE_SIZE);
+
        remove_from_page_cache(page);
+       ClearPageMappedToDisk(page);
        page_cache_release(page);       /* pagecache ref */
 }
 
 /*
- * This is for invalidate_inode_pages().  That function can be called at
+ * This is for invalidate_mapping_pages().  That function can be called at
  * any time, and is not supposed to throw away dirty pages.  But pages can
- * be marked dirty at any time too.  So we re-check the dirtiness inside
- * ->tree_lock.  That provides exclusion against the __set_page_dirty
- * functions.
+ * be marked dirty at any time too, so use remove_mapping which safely
+ * discards clean, unused pages.
  *
  * Returns non-zero if the page was successfully invalidated.
  */
 static int
 invalidate_complete_page(struct address_space *mapping, struct page *page)
 {
+       int ret;
+
        if (page->mapping != mapping)
                return 0;
 
        if (PagePrivate(page) && !try_to_release_page(page, 0))
                return 0;
 
-       write_lock_irq(&mapping->tree_lock);
-       if (PageDirty(page)) {
-               write_unlock_irq(&mapping->tree_lock);
-               return 0;
-       }
+       ret = remove_mapping(mapping, page);
 
-       BUG_ON(PagePrivate(page));
-       __remove_from_page_cache(page);
-       write_unlock_irq(&mapping->tree_lock);
-       ClearPageUptodate(page);
-       page_cache_release(page);       /* pagecache ref */
-       return 1;
+       return ret;
 }
 
 /**
- * truncate_inode_pages - truncate *all* the pages from an offset
+ * truncate_inode_pages - truncate range of pages specified by start & end byte offsets
  * @mapping: mapping to truncate
  * @lstart: offset from which to truncate
+ * @lend: offset to which to truncate
  *
- * Truncate the page cache at a set offset, removing the pages that are beyond
- * that offset (and zeroing out partial pages).
+ * Truncate the page cache, removing the pages that are between
+ * specified offsets (and zeroing out partial page
+ * (if lstart is not page aligned)).
  *
  * Truncate takes two passes - the first pass is nonblocking.  It will not
  * block on page locks and it will not block on writeback.  The second pass
@@ -101,12 +154,12 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
  * We pass down the cache-hot hint to the page freeing code.  Even if the
  * mapping is large, it is probably the case that the final pages are the most
  * recently touched, and freeing happens in ascending file offset order.
- *
- * Called under (and serialised by) inode->i_sem.
  */
-void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
+void truncate_inode_pages_range(struct address_space *mapping,
+                               loff_t lstart, loff_t lend)
 {
        const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
+       pgoff_t end;
        const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
        struct pagevec pvec;
        pgoff_t next;
@@ -115,22 +168,36 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
        if (mapping->nrpages == 0)
                return;
 
+       BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
+       end = (lend >> PAGE_CACHE_SHIFT);
+
        pagevec_init(&pvec, 0);
        next = start;
-       while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+       while (next <= end &&
+              pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
                        pgoff_t page_index = page->index;
 
+                       if (page_index > end) {
+                               next = page_index;
+                               break;
+                       }
+
                        if (page_index > next)
                                next = page_index;
                        next++;
-                       if (TestSetPageLocked(page))
+                       if (!trylock_page(page))
                                continue;
                        if (PageWriteback(page)) {
                                unlock_page(page);
                                continue;
                        }
+                       if (page_mapped(page)) {
+                               unmap_mapping_range(mapping,
+                                 (loff_t)page_index<<PAGE_CACHE_SHIFT,
+                                 PAGE_CACHE_SIZE, 0);
+                       }
                        truncate_complete_page(mapping, page);
                        unlock_page(page);
                }
@@ -157,11 +224,22 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
                        next = start;
                        continue;
                }
+               if (pvec.pages[0]->index > end) {
+                       pagevec_release(&pvec);
+                       break;
+               }
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
 
+                       if (page->index > end)
+                               break;
                        lock_page(page);
                        wait_on_page_writeback(page);
+                       if (page_mapped(page)) {
+                               unmap_mapping_range(mapping,
+                                 (loff_t)page->index<<PAGE_CACHE_SHIFT,
+                                 PAGE_CACHE_SIZE, 0);
+                       }
                        if (page->index > next)
                                next = page->index;
                        next++;
@@ -171,24 +249,23 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
                pagevec_release(&pvec);
        }
 }
-
-EXPORT_SYMBOL(truncate_inode_pages);
+EXPORT_SYMBOL(truncate_inode_pages_range);
 
 /**
- * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
- * @mapping: the address_space which holds the pages to invalidate
- * @start: the offset 'from' which to invalidate
- * @end: the offset 'to' which to invalidate (inclusive)
- *
- * This function only removes the unlocked pages, if you want to
- * remove all the pages of one inode, you must call truncate_inode_pages.
+ * truncate_inode_pages - truncate *all* the pages from an offset
+ * @mapping: mapping to truncate
+ * @lstart: offset from which to truncate
  *
- * invalidate_mapping_pages() will not block on IO activity. It will not
- * invalidate pages which are dirty, locked, under writeback or mapped into
- * pagetables.
+ * Called under (and serialised by) inode->i_mutex.
  */
-unsigned long invalidate_mapping_pages(struct address_space *mapping,
-                               pgoff_t start, pgoff_t end)
+void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
+{
+       truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
+}
+EXPORT_SYMBOL(truncate_inode_pages);
+
+unsigned long __invalidate_mapping_pages(struct address_space *mapping,
+                               pgoff_t start, pgoff_t end, bool be_atomic)
 {
        struct pagevec pvec;
        pgoff_t next = start;
@@ -200,14 +277,24 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
                        pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
+                       pgoff_t index;
+                       int lock_failed;
 
-                       if (TestSetPageLocked(page)) {
-                               next++;
-                               continue;
-                       }
-                       if (page->index > next)
-                               next = page->index;
+                       lock_failed = !trylock_page(page);
+
+                       /*
+                        * We really shouldn't be looking at the ->index of an
+                        * unlocked page.  But we're not allowed to lock these
+                        * pages.  So we rely upon nobody altering the ->index
+                        * of this (pinned-by-us) page.
+                        */
+                       index = page->index;
+                       if (index > next)
+                               next = index;
                        next++;
+                       if (lock_failed)
+                               continue;
+
                        if (PageDirty(page) || PageWriteback(page))
                                goto unlock;
                        if (page_mapped(page))
@@ -219,17 +306,70 @@ unlock:
                                break;
                }
                pagevec_release(&pvec);
-               cond_resched();
+               if (likely(!be_atomic))
+                       cond_resched();
        }
        return ret;
 }
 
-unsigned long invalidate_inode_pages(struct address_space *mapping)
+/**
+ * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
+ * @mapping: the address_space which holds the pages to invalidate
+ * @start: the offset 'from' which to invalidate
+ * @end: the offset 'to' which to invalidate (inclusive)
+ *
+ * This function only removes the unlocked pages, if you want to
+ * remove all the pages of one inode, you must call truncate_inode_pages.
+ *
+ * invalidate_mapping_pages() will not block on IO activity. It will not
+ * invalidate pages which are dirty, locked, under writeback or mapped into
+ * pagetables.
+ */
+unsigned long invalidate_mapping_pages(struct address_space *mapping,
+                               pgoff_t start, pgoff_t end)
 {
-       return invalidate_mapping_pages(mapping, 0, ~0UL);
+       return __invalidate_mapping_pages(mapping, start, end, false);
+}
+EXPORT_SYMBOL(invalidate_mapping_pages);
+
+/*
+ * This is like invalidate_complete_page(), except it ignores the page's
+ * refcount.  We do this because invalidate_inode_pages2() needs stronger
+ * invalidation guarantees, and cannot afford to leave pages behind because
+ * shrink_page_list() has a temp ref on them, or because they're transiently
+ * sitting in the lru_cache_add() pagevecs.
+ */
+static int
+invalidate_complete_page2(struct address_space *mapping, struct page *page)
+{
+       if (page->mapping != mapping)
+               return 0;
+
+       if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
+               return 0;
+
+       spin_lock_irq(&mapping->tree_lock);
+       if (PageDirty(page))
+               goto failed;
+
+       BUG_ON(PagePrivate(page));
+       __remove_from_page_cache(page);
+       spin_unlock_irq(&mapping->tree_lock);
+       page_cache_release(page);       /* pagecache ref */
+       return 1;
+failed:
+       spin_unlock_irq(&mapping->tree_lock);
+       return 0;
 }
 
-EXPORT_SYMBOL(invalidate_inode_pages);
+static int do_launder_page(struct address_space *mapping, struct page *page)
+{
+       if (!PageDirty(page))
+               return 0;
+       if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
+               return 0;
+       return mapping->a_ops->launder_page(page);
+}
 
 /**
  * invalidate_inode_pages2_range - remove range of pages from an address_space
@@ -240,7 +380,7 @@ EXPORT_SYMBOL(invalidate_inode_pages);
  * Any pages which are found to be mapped into pagetables are unmapped prior to
  * invalidation.
  *
- * Returns -EIO if any pages could not be invalidated.
+ * Returns -EBUSY if any pages could not be invalidated.
  */
 int invalidate_inode_pages2_range(struct address_space *mapping,
                                  pgoff_t start, pgoff_t end)
@@ -249,18 +389,18 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
        pgoff_t next;
        int i;
        int ret = 0;
+       int ret2 = 0;
        int did_range_unmap = 0;
        int wrapped = 0;
 
        pagevec_init(&pvec, 0);
        next = start;
-       while (next <= end && !ret && !wrapped &&
+       while (next <= end && !wrapped &&
                pagevec_lookup(&pvec, mapping, next,
                        min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
-               for (i = 0; !ret && i < pagevec_count(&pvec); i++) {
+               for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
                        pgoff_t page_index;
-                       int was_dirty;
 
                        lock_page(page);
                        if (page->mapping != mapping) {
@@ -276,7 +416,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
                                break;
                        }
                        wait_on_page_writeback(page);
-                       while (page_mapped(page)) {
+                       if (page_mapped(page)) {
                                if (!did_range_unmap) {
                                        /*
                                         * Zap the rest of the file in one hit.
@@ -296,12 +436,14 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
                                          PAGE_CACHE_SIZE, 0);
                                }
                        }
-                       was_dirty = test_clear_page_dirty(page);
-                       if (!invalidate_complete_page(mapping, page)) {
-                               if (was_dirty)
-                                       set_page_dirty(page);
-                               ret = -EIO;
+                       BUG_ON(page_mapped(page));
+                       ret2 = do_launder_page(mapping, page);
+                       if (ret2 == 0) {
+                               if (!invalidate_complete_page2(mapping, page))
+                                       ret2 = -EBUSY;
                        }
+                       if (ret2 < 0)
+                               ret = ret2;
                        unlock_page(page);
                }
                pagevec_release(&pvec);