X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;ds=sidebyside;f=mm%2Frmap.c;h=b82146e6dfc9d0e12f15c7cf292893ab6ede43f3;hb=6e1beb3c22496f6e1f1feba8ae74da16f131684c;hp=a33e779d1bd8872be30528f95bcbaee6b6e02c3a;hpb=4c21e2f2441dc5fbb957b030333f5a3f2d02dea7;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/rmap.c b/mm/rmap.c index a33e779..b82146e 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -20,28 +20,22 @@ /* * Lock ordering in mm: * - * inode->i_sem (while writing or truncating, not reading or faulting) - * inode->i_alloc_sem - * - * When a page fault occurs in writing from user to file, down_read - * of mmap_sem nests within i_sem; in sys_msync, i_sem nests within - * down_read of mmap_sem; i_sem and down_write of mmap_sem are never - * taken together; in truncation, i_sem is taken outermost. - * - * mm->mmap_sem - * page->flags PG_locked (lock_page) - * mapping->i_mmap_lock - * anon_vma->lock - * mm->page_table_lock - * zone->lru_lock (in mark_page_accessed) - * swap_lock (in swap_duplicate, swap_info_get) - * mmlist_lock (in mmput, drain_mmlist and others) - * mapping->private_lock (in __set_page_dirty_buffers) - * inode_lock (in set_page_dirty's __mark_inode_dirty) - * sb_lock (within inode_lock in fs/fs-writeback.c) - * mapping->tree_lock (widely used, in set_page_dirty, - * in arch-dependent flush_dcache_mmap_lock, - * within inode_lock in __sync_single_inode) + * inode->i_mutex (while writing or truncating, not reading or faulting) + * inode->i_alloc_sem (vmtruncate_range) + * mm->mmap_sem + * page->flags PG_locked (lock_page) + * mapping->i_mmap_lock + * anon_vma->lock + * mm->page_table_lock or pte_lock + * zone->lru_lock (in mark_page_accessed, isolate_lru_page) + * swap_lock (in swap_duplicate, swap_info_get) + * mmlist_lock (in mmput, drain_mmlist and others) + * mapping->private_lock (in __set_page_dirty_buffers) + * inode_lock (in set_page_dirty's __mark_inode_dirty) + * sb_lock (within inode_lock in fs/fs-writeback.c) + * mapping->tree_lock (widely used, in set_page_dirty, + * in arch-dependent flush_dcache_mmap_lock, + * within inode_lock in __sync_single_inode) */ #include @@ -52,16 +46,16 @@ #include #include #include +#include +#include #include -//#define RMAP_DEBUG /* can be enabled only for debugging */ - -kmem_cache_t *anon_vma_cachep; +struct kmem_cache *anon_vma_cachep; static inline void validate_anon_vma(struct vm_area_struct *find_vma) { -#ifdef RMAP_DEBUG +#ifdef CONFIG_DEBUG_VM struct anon_vma *anon_vma = find_vma->anon_vma; struct vm_area_struct *vma; unsigned int mapcount = 0; @@ -104,7 +98,7 @@ int anon_vma_prepare(struct vm_area_struct *vma) spin_lock(&mm->page_table_lock); if (likely(!vma->anon_vma)) { vma->anon_vma = anon_vma; - list_add(&vma->anon_vma_node, &anon_vma->head); + list_add_tail(&vma->anon_vma_node, &anon_vma->head); allocated = NULL; } spin_unlock(&mm->page_table_lock); @@ -128,7 +122,7 @@ void __anon_vma_link(struct vm_area_struct *vma) struct anon_vma *anon_vma = vma->anon_vma; if (anon_vma) { - list_add(&vma->anon_vma_node, &anon_vma->head); + list_add_tail(&vma->anon_vma_node, &anon_vma->head); validate_anon_vma(vma); } } @@ -139,7 +133,7 @@ void anon_vma_link(struct vm_area_struct *vma) if (anon_vma) { spin_lock(&anon_vma->lock); - list_add(&vma->anon_vma_node, &anon_vma->head); + list_add_tail(&vma->anon_vma_node, &anon_vma->head); validate_anon_vma(vma); spin_unlock(&anon_vma->lock); } @@ -165,7 +159,8 @@ void anon_vma_unlink(struct vm_area_struct *vma) anon_vma_free(anon_vma); } -static void anon_vma_ctor(void *data, kmem_cache_t *cachep, unsigned long flags) +static void anon_vma_ctor(void *data, struct kmem_cache *cachep, + unsigned long flags) { if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR) { @@ -188,7 +183,7 @@ void __init anon_vma_init(void) */ static struct anon_vma *page_lock_anon_vma(struct page *page) { - struct anon_vma *anon_vma = NULL; + struct anon_vma *anon_vma; unsigned long anon_mapping; rcu_read_lock(); @@ -200,9 +195,16 @@ static struct anon_vma *page_lock_anon_vma(struct page *page) anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); spin_lock(&anon_vma->lock); + return anon_vma; out: rcu_read_unlock(); - return anon_vma; + return NULL; +} + +static void page_unlock_anon_vma(struct anon_vma *anon_vma) +{ + spin_unlock(&anon_vma->lock); + rcu_read_unlock(); } /* @@ -225,7 +227,7 @@ vma_address(struct page *page, struct vm_area_struct *vma) /* * At what user virtual address is page expected in vma? checking that the - * page matches the vma: currently only used by unuse_process, on anon pages. + * page matches the vma: currently only used on anon pages, by unuse_vma; */ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) { @@ -234,7 +236,8 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) (void *)page->mapping - PAGE_MAPPING_ANON) return -EFAULT; } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { - if (vma->vm_file->f_mapping != page->mapping) + if (!vma->vm_file || + vma->vm_file->f_mapping != page->mapping) return -EFAULT; } else return -EFAULT; @@ -244,7 +247,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) /* * Check that @page is mapped at @address into @mm. * - * On success returns with mapped pte and locked mm->page_table_lock. + * On success returns with pte mapped and locked. */ pte_t *page_check_address(struct page *page, struct mm_struct *mm, unsigned long address, spinlock_t **ptlp) @@ -289,7 +292,7 @@ pte_t *page_check_address(struct page *page, struct mm_struct *mm, * repeatedly from either page_referenced_anon or page_referenced_file. */ static int page_referenced_one(struct page *page, - struct vm_area_struct *vma, unsigned int *mapcount, int ignore_token) + struct vm_area_struct *vma, unsigned int *mapcount) { struct mm_struct *mm = vma->vm_mm; unsigned long address; @@ -310,7 +313,7 @@ static int page_referenced_one(struct page *page, /* Pretend the page is referenced if the task has the swap token and is in the middle of a page fault. */ - if (mm != current->mm && !ignore_token && has_swap_token(mm) && + if (mm != current->mm && has_swap_token(mm) && rwsem_is_locked(&mm->mmap_sem)) referenced++; @@ -320,7 +323,7 @@ out: return referenced; } -static int page_referenced_anon(struct page *page, int ignore_token) +static int page_referenced_anon(struct page *page) { unsigned int mapcount; struct anon_vma *anon_vma; @@ -333,12 +336,12 @@ static int page_referenced_anon(struct page *page, int ignore_token) mapcount = page_mapcount(page); list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { - referenced += page_referenced_one(page, vma, &mapcount, - ignore_token); + referenced += page_referenced_one(page, vma, &mapcount); if (!mapcount) break; } - spin_unlock(&anon_vma->lock); + + page_unlock_anon_vma(anon_vma); return referenced; } @@ -353,7 +356,7 @@ static int page_referenced_anon(struct page *page, int ignore_token) * * This function is only called from page_referenced for object-based pages. */ -static int page_referenced_file(struct page *page, int ignore_token) +static int page_referenced_file(struct page *page) { unsigned int mapcount; struct address_space *mapping = page->mapping; @@ -391,8 +394,7 @@ static int page_referenced_file(struct page *page, int ignore_token) referenced++; break; } - referenced += page_referenced_one(page, vma, &mapcount, - ignore_token); + referenced += page_referenced_one(page, vma, &mapcount); if (!mapcount) break; } @@ -409,13 +411,10 @@ static int page_referenced_file(struct page *page, int ignore_token) * Quick test_and_clear_referenced for all mappings to a page, * returns the number of ptes which referenced the page. */ -int page_referenced(struct page *page, int is_locked, int ignore_token) +int page_referenced(struct page *page, int is_locked) { int referenced = 0; - if (!swap_token_default_timeout) - ignore_token = 1; - if (page_test_and_clear_young(page)) referenced++; @@ -424,71 +423,179 @@ int page_referenced(struct page *page, int is_locked, int ignore_token) if (page_mapped(page) && page->mapping) { if (PageAnon(page)) - referenced += page_referenced_anon(page, ignore_token); + referenced += page_referenced_anon(page); else if (is_locked) - referenced += page_referenced_file(page, ignore_token); + referenced += page_referenced_file(page); else if (TestSetPageLocked(page)) referenced++; else { if (page->mapping) - referenced += page_referenced_file(page, - ignore_token); + referenced += page_referenced_file(page); unlock_page(page); } } return referenced; } +static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long address; + pte_t *pte; + spinlock_t *ptl; + int ret = 0; + + address = vma_address(page, vma); + if (address == -EFAULT) + goto out; + + pte = page_check_address(page, mm, address, &ptl); + if (!pte) + goto out; + + if (pte_dirty(*pte) || pte_write(*pte)) { + pte_t entry; + + flush_cache_page(vma, address, pte_pfn(*pte)); + entry = ptep_clear_flush(vma, address, pte); + entry = pte_wrprotect(entry); + entry = pte_mkclean(entry); + set_pte_at(mm, address, pte, entry); + lazy_mmu_prot_update(entry); + ret = 1; + } + + pte_unmap_unlock(pte, ptl); +out: + return ret; +} + +static int page_mkclean_file(struct address_space *mapping, struct page *page) +{ + pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + struct vm_area_struct *vma; + struct prio_tree_iter iter; + int ret = 0; + + BUG_ON(PageAnon(page)); + + spin_lock(&mapping->i_mmap_lock); + vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { + if (vma->vm_flags & VM_SHARED) + ret += page_mkclean_one(page, vma); + } + spin_unlock(&mapping->i_mmap_lock); + return ret; +} + +int page_mkclean(struct page *page) +{ + int ret = 0; + + BUG_ON(!PageLocked(page)); + + if (page_mapped(page)) { + struct address_space *mapping = page_mapping(page); + if (mapping) + ret = page_mkclean_file(mapping, page); + if (page_test_and_clear_dirty(page)) + ret = 1; + } + + return ret; +} + /** - * page_add_anon_rmap - add pte mapping to an anonymous page + * page_set_anon_rmap - setup new anonymous rmap * @page: the page to add the mapping to * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped - * - * The caller needs to hold the mm->page_table_lock. */ -void page_add_anon_rmap(struct page *page, +static void __page_set_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { - if (atomic_inc_and_test(&page->_mapcount)) { - struct anon_vma *anon_vma = vma->anon_vma; + struct anon_vma *anon_vma = vma->anon_vma; - BUG_ON(!anon_vma); - anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; - page->mapping = (struct address_space *) anon_vma; + BUG_ON(!anon_vma); + anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; + page->mapping = (struct address_space *) anon_vma; - page->index = linear_page_index(vma, address); + page->index = linear_page_index(vma, address); - inc_page_state(nr_mapped); - } + /* + * nr_mapped state can be updated without turning off + * interrupts because it is not modified via interrupt. + */ + __inc_zone_page_state(page, NR_ANON_PAGES); +} + +/** + * page_add_anon_rmap - add pte mapping to an anonymous page + * @page: the page to add the mapping to + * @vma: the vm area in which the mapping is added + * @address: the user virtual address mapped + * + * The caller needs to hold the pte lock. + */ +void page_add_anon_rmap(struct page *page, + struct vm_area_struct *vma, unsigned long address) +{ + if (atomic_inc_and_test(&page->_mapcount)) + __page_set_anon_rmap(page, vma, address); /* else checking page index and mapping is racy */ } +/* + * page_add_new_anon_rmap - add pte mapping to a new anonymous page + * @page: the page to add the mapping to + * @vma: the vm area in which the mapping is added + * @address: the user virtual address mapped + * + * Same as page_add_anon_rmap but must only be called on *new* pages. + * This means the inc-and-test can be bypassed. + */ +void page_add_new_anon_rmap(struct page *page, + struct vm_area_struct *vma, unsigned long address) +{ + atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ + __page_set_anon_rmap(page, vma, address); +} + /** * page_add_file_rmap - add pte mapping to a file page * @page: the page to add the mapping to * - * The caller needs to hold the mm->page_table_lock. + * The caller needs to hold the pte lock. */ void page_add_file_rmap(struct page *page) { - BUG_ON(PageAnon(page)); - BUG_ON(!pfn_valid(page_to_pfn(page))); - if (atomic_inc_and_test(&page->_mapcount)) - inc_page_state(nr_mapped); + __inc_zone_page_state(page, NR_FILE_MAPPED); } /** * page_remove_rmap - take down pte mapping from a page * @page: page to remove mapping from * - * Caller needs to hold the mm->page_table_lock. + * The caller needs to hold the pte lock. */ -void page_remove_rmap(struct page *page) +void page_remove_rmap(struct page *page, struct vm_area_struct *vma) { if (atomic_add_negative(-1, &page->_mapcount)) { - BUG_ON(page_mapcount(page) < 0); + if (unlikely(page_mapcount(page) < 0)) { + printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page)); + printk (KERN_EMERG " page pfn = %lx\n", page_to_pfn(page)); + printk (KERN_EMERG " page->flags = %lx\n", page->flags); + printk (KERN_EMERG " page->count = %x\n", page_count(page)); + printk (KERN_EMERG " page->mapping = %p\n", page->mapping); + print_symbol (KERN_EMERG " vma->vm_ops = %s\n", (unsigned long)vma->vm_ops); + if (vma->vm_ops) + print_symbol (KERN_EMERG " vma->vm_ops->nopage = %s\n", (unsigned long)vma->vm_ops->nopage); + if (vma->vm_file && vma->vm_file->f_op) + print_symbol (KERN_EMERG " vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma->vm_file->f_op->mmap); + BUG(); + } + /* * It would be tidy to reset the PageAnon mapping here, * but that might overwrite a racing page_add_anon_rmap @@ -500,7 +607,8 @@ void page_remove_rmap(struct page *page) */ if (page_test_and_clear_dirty(page)) set_page_dirty(page); - dec_page_state(nr_mapped); + __dec_zone_page_state(page, + PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); } } @@ -508,7 +616,8 @@ void page_remove_rmap(struct page *page) * Subfunctions of try_to_unmap: try_to_unmap_one called * repeatedly from either try_to_unmap_anon or try_to_unmap_file. */ -static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) +static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, + int migration) { struct mm_struct *mm = vma->vm_mm; unsigned long address; @@ -529,11 +638,9 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) * If the page is mlock()d, we cannot swap it out. * If it's recently referenced (perhaps page_referenced * skipped over this mm) then we should reactivate it. - * - * Pages belonging to VM_RESERVED regions should not happen here. */ - if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) || - ptep_clear_flush_young(vma, address, pte)) { + if (!migration && ((vma->vm_flags & VM_LOCKED) || + (ptep_clear_flush_young(vma, address, pte)))) { ret = SWAP_FAIL; goto out_unmap; } @@ -551,24 +658,46 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) if (PageAnon(page)) { swp_entry_t entry = { .val = page_private(page) }; - /* - * Store the swap location in the pte. - * See handle_pte_fault() ... - */ - BUG_ON(!PageSwapCache(page)); - swap_duplicate(entry); - if (list_empty(&mm->mmlist)) { - spin_lock(&mmlist_lock); - list_add(&mm->mmlist, &init_mm.mmlist); - spin_unlock(&mmlist_lock); + + if (PageSwapCache(page)) { + /* + * Store the swap location in the pte. + * See handle_pte_fault() ... + */ + swap_duplicate(entry); + if (list_empty(&mm->mmlist)) { + spin_lock(&mmlist_lock); + if (list_empty(&mm->mmlist)) + list_add(&mm->mmlist, &init_mm.mmlist); + spin_unlock(&mmlist_lock); + } + dec_mm_counter(mm, anon_rss); +#ifdef CONFIG_MIGRATION + } else { + /* + * Store the pfn of the page in a special migration + * pte. do_swap_page() will wait until the migration + * pte is removed and then restart fault handling. + */ + BUG_ON(!migration); + entry = make_migration_entry(page, pte_write(pteval)); +#endif } set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); BUG_ON(pte_file(*pte)); - dec_mm_counter(mm, anon_rss); } else +#ifdef CONFIG_MIGRATION + if (migration) { + /* Establish migration entry for a file page */ + swp_entry_t entry; + entry = make_migration_entry(page, pte_write(pteval)); + set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); + } else +#endif dec_mm_counter(mm, file_rss); - page_remove_rmap(page); + + page_remove_rmap(page, vma); page_cache_release(page); out_unmap: @@ -612,7 +741,6 @@ static void try_to_unmap_cluster(unsigned long cursor, struct page *page; unsigned long address; unsigned long end; - unsigned long pfn; address = (vma->vm_start + cursor) & CLUSTER_MASK; end = address + CLUSTER_SIZE; @@ -641,21 +769,14 @@ static void try_to_unmap_cluster(unsigned long cursor, for (; address < end; pte++, address += PAGE_SIZE) { if (!pte_present(*pte)) continue; - - pfn = pte_pfn(*pte); - if (unlikely(!pfn_valid(pfn))) { - print_bad_pte(vma, *pte, address); - continue; - } - - page = pfn_to_page(pfn); - BUG_ON(PageAnon(page)); + page = vm_normal_page(vma, address, *pte); + BUG_ON(!page || PageAnon(page)); if (ptep_clear_flush_young(vma, address, pte)) continue; /* Nuke the page table entry. */ - flush_cache_page(vma, address, pfn); + flush_cache_page(vma, address, pte_pfn(*pte)); pteval = ptep_clear_flush(vma, address, pte); /* If nonlinear, store the file page offset in the pte. */ @@ -666,7 +787,7 @@ static void try_to_unmap_cluster(unsigned long cursor, if (pte_dirty(pteval)) set_page_dirty(page); - page_remove_rmap(page); + page_remove_rmap(page, vma); page_cache_release(page); dec_mm_counter(mm, file_rss); (*mapcount)--; @@ -674,7 +795,7 @@ static void try_to_unmap_cluster(unsigned long cursor, pte_unmap_unlock(pte - 1, ptl); } -static int try_to_unmap_anon(struct page *page) +static int try_to_unmap_anon(struct page *page, int migration) { struct anon_vma *anon_vma; struct vm_area_struct *vma; @@ -685,11 +806,12 @@ static int try_to_unmap_anon(struct page *page) return ret; list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { - ret = try_to_unmap_one(page, vma); + ret = try_to_unmap_one(page, vma, migration); if (ret == SWAP_FAIL || !page_mapped(page)) break; } - spin_unlock(&anon_vma->lock); + + page_unlock_anon_vma(anon_vma); return ret; } @@ -702,7 +824,7 @@ static int try_to_unmap_anon(struct page *page) * * This function is only called from try_to_unmap for object-based pages. */ -static int try_to_unmap_file(struct page *page) +static int try_to_unmap_file(struct page *page, int migration) { struct address_space *mapping = page->mapping; pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); @@ -716,7 +838,7 @@ static int try_to_unmap_file(struct page *page) spin_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { - ret = try_to_unmap_one(page, vma); + ret = try_to_unmap_one(page, vma, migration); if (ret == SWAP_FAIL || !page_mapped(page)) goto out; } @@ -726,7 +848,7 @@ static int try_to_unmap_file(struct page *page) list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) { - if (vma->vm_flags & (VM_LOCKED|VM_RESERVED)) + if ((vma->vm_flags & VM_LOCKED) && !migration) continue; cursor = (unsigned long) vma->vm_private_data; if (cursor > max_nl_cursor) @@ -760,7 +882,7 @@ static int try_to_unmap_file(struct page *page) do { list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) { - if (vma->vm_flags & (VM_LOCKED|VM_RESERVED)) + if ((vma->vm_flags & VM_LOCKED) && !migration) continue; cursor = (unsigned long) vma->vm_private_data; while ( cursor < max_nl_cursor && @@ -782,11 +904,8 @@ static int try_to_unmap_file(struct page *page) * in locked vmas). Reset cursor on all unreserved nonlinear * vmas, now forgetting on which ones it had fallen behind. */ - list_for_each_entry(vma, &mapping->i_mmap_nonlinear, - shared.vm_set.list) { - if (!(vma->vm_flags & VM_RESERVED)) - vma->vm_private_data = NULL; - } + list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) + vma->vm_private_data = NULL; out: spin_unlock(&mapping->i_mmap_lock); return ret; @@ -804,16 +923,16 @@ out: * SWAP_AGAIN - we missed a mapping, try again later * SWAP_FAIL - the page is unswappable */ -int try_to_unmap(struct page *page) +int try_to_unmap(struct page *page, int migration) { int ret; BUG_ON(!PageLocked(page)); if (PageAnon(page)) - ret = try_to_unmap_anon(page); + ret = try_to_unmap_anon(page, migration); else - ret = try_to_unmap_file(page); + ret = try_to_unmap_file(page, migration); if (!page_mapped(page)) ret = SWAP_SUCCESS;