[PATCH] r/o bind mounts: unlink: monitor i_nlink
[safe/jmp/linux-2.6] / mm / rmap.c
index 491ac35..e2155d7 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
 /*
  * Lock ordering in mm:
  *
- * inode->i_sem        (while writing or truncating, not reading or faulting)
+ * inode->i_mutex      (while writing or truncating, not reading or faulting)
  *   inode->i_alloc_sem
  *
  * When a page fault occurs in writing from user to file, down_read
- * of mmap_sem nests within i_sem; in sys_msync, i_sem nests within
- * down_read of mmap_sem; i_sem and down_write of mmap_sem are never
- * taken together; in truncation, i_sem is taken outermost.
+ * of mmap_sem nests within i_mutex; in sys_msync, i_mutex nests within
+ * down_read of mmap_sem; i_mutex and down_write of mmap_sem are never
+ * taken together; in truncation, i_mutex is taken outermost.
  *
  * mm->mmap_sem
  *   page->flags PG_locked (lock_page)
  *     mapping->i_mmap_lock
  *       anon_vma->lock
  *         mm->page_table_lock or pte_lock
- *           zone->lru_lock (in mark_page_accessed)
+ *           zone->lru_lock (in mark_page_accessed, isolate_lru_page)
  *           swap_lock (in swap_duplicate, swap_info_get)
  *             mmlist_lock (in mmput, drain_mmlist and others)
  *             mapping->private_lock (in __set_page_dirty_buffers)
 #include <linux/init.h>
 #include <linux/rmap.h>
 #include <linux/rcupdate.h>
+#include <linux/module.h>
 
 #include <asm/tlbflush.h>
 
-//#define RMAP_DEBUG /* can be enabled only for debugging */
-
-kmem_cache_t *anon_vma_cachep;
+struct kmem_cache *anon_vma_cachep;
 
 static inline void validate_anon_vma(struct vm_area_struct *find_vma)
 {
-#ifdef RMAP_DEBUG
+#ifdef CONFIG_DEBUG_VM
        struct anon_vma *anon_vma = find_vma->anon_vma;
        struct vm_area_struct *vma;
        unsigned int mapcount = 0;
@@ -104,7 +103,7 @@ int anon_vma_prepare(struct vm_area_struct *vma)
                spin_lock(&mm->page_table_lock);
                if (likely(!vma->anon_vma)) {
                        vma->anon_vma = anon_vma;
-                       list_add(&vma->anon_vma_node, &anon_vma->head);
+                       list_add_tail(&vma->anon_vma_node, &anon_vma->head);
                        allocated = NULL;
                }
                spin_unlock(&mm->page_table_lock);
@@ -128,7 +127,7 @@ void __anon_vma_link(struct vm_area_struct *vma)
        struct anon_vma *anon_vma = vma->anon_vma;
 
        if (anon_vma) {
-               list_add(&vma->anon_vma_node, &anon_vma->head);
+               list_add_tail(&vma->anon_vma_node, &anon_vma->head);
                validate_anon_vma(vma);
        }
 }
@@ -139,7 +138,7 @@ void anon_vma_link(struct vm_area_struct *vma)
 
        if (anon_vma) {
                spin_lock(&anon_vma->lock);
-               list_add(&vma->anon_vma_node, &anon_vma->head);
+               list_add_tail(&vma->anon_vma_node, &anon_vma->head);
                validate_anon_vma(vma);
                spin_unlock(&anon_vma->lock);
        }
@@ -165,7 +164,8 @@ void anon_vma_unlink(struct vm_area_struct *vma)
                anon_vma_free(anon_vma);
 }
 
-static void anon_vma_ctor(void *data, kmem_cache_t *cachep, unsigned long flags)
+static void anon_vma_ctor(void *data, struct kmem_cache *cachep,
+                         unsigned long flags)
 {
        if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
                                                SLAB_CTOR_CONSTRUCTOR) {
@@ -434,6 +434,95 @@ int page_referenced(struct page *page, int is_locked)
        return referenced;
 }
 
+static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       unsigned long address;
+       pte_t *pte, entry;
+       spinlock_t *ptl;
+       int ret = 0;
+
+       address = vma_address(page, vma);
+       if (address == -EFAULT)
+               goto out;
+
+       pte = page_check_address(page, mm, address, &ptl);
+       if (!pte)
+               goto out;
+
+       if (!pte_dirty(*pte) && !pte_write(*pte))
+               goto unlock;
+
+       entry = ptep_get_and_clear(mm, address, pte);
+       entry = pte_mkclean(entry);
+       entry = pte_wrprotect(entry);
+       ptep_establish(vma, address, pte, entry);
+       lazy_mmu_prot_update(entry);
+       ret = 1;
+
+unlock:
+       pte_unmap_unlock(pte, ptl);
+out:
+       return ret;
+}
+
+static int page_mkclean_file(struct address_space *mapping, struct page *page)
+{
+       pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+       struct vm_area_struct *vma;
+       struct prio_tree_iter iter;
+       int ret = 0;
+
+       BUG_ON(PageAnon(page));
+
+       spin_lock(&mapping->i_mmap_lock);
+       vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
+               if (vma->vm_flags & VM_SHARED)
+                       ret += page_mkclean_one(page, vma);
+       }
+       spin_unlock(&mapping->i_mmap_lock);
+       return ret;
+}
+
+int page_mkclean(struct page *page)
+{
+       int ret = 0;
+
+       BUG_ON(!PageLocked(page));
+
+       if (page_mapped(page)) {
+               struct address_space *mapping = page_mapping(page);
+               if (mapping)
+                       ret = page_mkclean_file(mapping, page);
+       }
+
+       return ret;
+}
+
+/**
+ * page_set_anon_rmap - setup new anonymous rmap
+ * @page:      the page to add the mapping to
+ * @vma:       the vm area in which the mapping is added
+ * @address:   the user virtual address mapped
+ */
+static void __page_set_anon_rmap(struct page *page,
+       struct vm_area_struct *vma, unsigned long address)
+{
+       struct anon_vma *anon_vma = vma->anon_vma;
+
+       BUG_ON(!anon_vma);
+       anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
+       page->mapping = (struct address_space *) anon_vma;
+
+       page->index = linear_page_index(vma, address);
+
+       /*
+        * nr_mapped state can be updated without turning off
+        * interrupts because it is not modified via interrupt.
+        */
+       __inc_zone_page_state(page, NR_ANON_PAGES);
+}
+
 /**
  * page_add_anon_rmap - add pte mapping to an anonymous page
  * @page:      the page to add the mapping to
@@ -445,20 +534,27 @@ int page_referenced(struct page *page, int is_locked)
 void page_add_anon_rmap(struct page *page,
        struct vm_area_struct *vma, unsigned long address)
 {
-       if (atomic_inc_and_test(&page->_mapcount)) {
-               struct anon_vma *anon_vma = vma->anon_vma;
-
-               BUG_ON(!anon_vma);
-               anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
-               page->mapping = (struct address_space *) anon_vma;
-
-               page->index = linear_page_index(vma, address);
-
-               inc_page_state(nr_mapped);
-       }
+       if (atomic_inc_and_test(&page->_mapcount))
+               __page_set_anon_rmap(page, vma, address);
        /* else checking page index and mapping is racy */
 }
 
+/*
+ * page_add_new_anon_rmap - add pte mapping to a new anonymous page
+ * @page:      the page to add the mapping to
+ * @vma:       the vm area in which the mapping is added
+ * @address:   the user virtual address mapped
+ *
+ * Same as page_add_anon_rmap but must only be called on *new* pages.
+ * This means the inc-and-test can be bypassed.
+ */
+void page_add_new_anon_rmap(struct page *page,
+       struct vm_area_struct *vma, unsigned long address)
+{
+       atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
+       __page_set_anon_rmap(page, vma, address);
+}
+
 /**
  * page_add_file_rmap - add pte mapping to a file page
  * @page: the page to add the mapping to
@@ -467,11 +563,8 @@ void page_add_anon_rmap(struct page *page,
  */
 void page_add_file_rmap(struct page *page)
 {
-       BUG_ON(PageAnon(page));
-       BUG_ON(!pfn_valid(page_to_pfn(page)));
-
        if (atomic_inc_and_test(&page->_mapcount))
-               inc_page_state(nr_mapped);
+               __inc_zone_page_state(page, NR_FILE_MAPPED);
 }
 
 /**
@@ -483,6 +576,14 @@ void page_add_file_rmap(struct page *page)
 void page_remove_rmap(struct page *page)
 {
        if (atomic_add_negative(-1, &page->_mapcount)) {
+#ifdef CONFIG_DEBUG_VM
+               if (unlikely(page_mapcount(page) < 0)) {
+                       printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page));
+                       printk (KERN_EMERG "  page->flags = %lx\n", page->flags);
+                       printk (KERN_EMERG "  page->count = %x\n", page_count(page));
+                       printk (KERN_EMERG "  page->mapping = %p\n", page->mapping);
+               }
+#endif
                BUG_ON(page_mapcount(page) < 0);
                /*
                 * It would be tidy to reset the PageAnon mapping here,
@@ -495,7 +596,8 @@ void page_remove_rmap(struct page *page)
                 */
                if (page_test_and_clear_dirty(page))
                        set_page_dirty(page);
-               dec_page_state(nr_mapped);
+               __dec_zone_page_state(page,
+                               PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
        }
 }
 
@@ -503,7 +605,8 @@ void page_remove_rmap(struct page *page)
  * Subfunctions of try_to_unmap: try_to_unmap_one called
  * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
  */
-static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
+static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+                               int migration)
 {
        struct mm_struct *mm = vma->vm_mm;
        unsigned long address;
@@ -525,8 +628,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
         * If it's recently referenced (perhaps page_referenced
         * skipped over this mm) then we should reactivate it.
         */
-       if ((vma->vm_flags & VM_LOCKED) ||
-                       ptep_clear_flush_young(vma, address, pte)) {
+       if (!migration && ((vma->vm_flags & VM_LOCKED) ||
+                       (ptep_clear_flush_young(vma, address, pte)))) {
                ret = SWAP_FAIL;
                goto out_unmap;
        }
@@ -544,24 +647,45 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
 
        if (PageAnon(page)) {
                swp_entry_t entry = { .val = page_private(page) };
-               /*
-                * Store the swap location in the pte.
-                * See handle_pte_fault() ...
-                */
-               BUG_ON(!PageSwapCache(page));
-               swap_duplicate(entry);
-               if (list_empty(&mm->mmlist)) {
-                       spin_lock(&mmlist_lock);
-                       if (list_empty(&mm->mmlist))
-                               list_add(&mm->mmlist, &init_mm.mmlist);
-                       spin_unlock(&mmlist_lock);
+
+               if (PageSwapCache(page)) {
+                       /*
+                        * Store the swap location in the pte.
+                        * See handle_pte_fault() ...
+                        */
+                       swap_duplicate(entry);
+                       if (list_empty(&mm->mmlist)) {
+                               spin_lock(&mmlist_lock);
+                               if (list_empty(&mm->mmlist))
+                                       list_add(&mm->mmlist, &init_mm.mmlist);
+                               spin_unlock(&mmlist_lock);
+                       }
+                       dec_mm_counter(mm, anon_rss);
+#ifdef CONFIG_MIGRATION
+               } else {
+                       /*
+                        * Store the pfn of the page in a special migration
+                        * pte. do_swap_page() will wait until the migration
+                        * pte is removed and then restart fault handling.
+                        */
+                       BUG_ON(!migration);
+                       entry = make_migration_entry(page, pte_write(pteval));
+#endif
                }
                set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
                BUG_ON(pte_file(*pte));
-               dec_mm_counter(mm, anon_rss);
        } else
+#ifdef CONFIG_MIGRATION
+       if (migration) {
+               /* Establish migration entry for a file page */
+               swp_entry_t entry;
+               entry = make_migration_entry(page, pte_write(pteval));
+               set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
+       } else
+#endif
                dec_mm_counter(mm, file_rss);
 
+
        page_remove_rmap(page);
        page_cache_release(page);
 
@@ -641,7 +765,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
                        continue;
 
                /* Nuke the page table entry. */
-               flush_cache_page(vma, address, pfn);
+               flush_cache_page(vma, address, pte_pfn(*pte));
                pteval = ptep_clear_flush(vma, address, pte);
 
                /* If nonlinear, store the file page offset in the pte. */
@@ -660,7 +784,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
        pte_unmap_unlock(pte - 1, ptl);
 }
 
-static int try_to_unmap_anon(struct page *page)
+static int try_to_unmap_anon(struct page *page, int migration)
 {
        struct anon_vma *anon_vma;
        struct vm_area_struct *vma;
@@ -671,7 +795,7 @@ static int try_to_unmap_anon(struct page *page)
                return ret;
 
        list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
-               ret = try_to_unmap_one(page, vma);
+               ret = try_to_unmap_one(page, vma, migration);
                if (ret == SWAP_FAIL || !page_mapped(page))
                        break;
        }
@@ -688,7 +812,7 @@ static int try_to_unmap_anon(struct page *page)
  *
  * This function is only called from try_to_unmap for object-based pages.
  */
-static int try_to_unmap_file(struct page *page)
+static int try_to_unmap_file(struct page *page, int migration)
 {
        struct address_space *mapping = page->mapping;
        pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -702,7 +826,7 @@ static int try_to_unmap_file(struct page *page)
 
        spin_lock(&mapping->i_mmap_lock);
        vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
-               ret = try_to_unmap_one(page, vma);
+               ret = try_to_unmap_one(page, vma, migration);
                if (ret == SWAP_FAIL || !page_mapped(page))
                        goto out;
        }
@@ -712,7 +836,7 @@ static int try_to_unmap_file(struct page *page)
 
        list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
                                                shared.vm_set.list) {
-               if (vma->vm_flags & VM_LOCKED)
+               if ((vma->vm_flags & VM_LOCKED) && !migration)
                        continue;
                cursor = (unsigned long) vma->vm_private_data;
                if (cursor > max_nl_cursor)
@@ -746,7 +870,7 @@ static int try_to_unmap_file(struct page *page)
        do {
                list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
                                                shared.vm_set.list) {
-                       if (vma->vm_flags & VM_LOCKED)
+                       if ((vma->vm_flags & VM_LOCKED) && !migration)
                                continue;
                        cursor = (unsigned long) vma->vm_private_data;
                        while ( cursor < max_nl_cursor &&
@@ -787,16 +911,16 @@ out:
  * SWAP_AGAIN  - we missed a mapping, try again later
  * SWAP_FAIL   - the page is unswappable
  */
-int try_to_unmap(struct page *page)
+int try_to_unmap(struct page *page, int migration)
 {
        int ret;
 
        BUG_ON(!PageLocked(page));
 
        if (PageAnon(page))
-               ret = try_to_unmap_anon(page);
+               ret = try_to_unmap_anon(page, migration);
        else
-               ret = try_to_unmap_file(page);
+               ret = try_to_unmap_file(page, migration);
 
        if (!page_mapped(page))
                ret = SWAP_SUCCESS;