X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Fmigrate.c;h=4e0eccca5e265ac19bc507a171a2f720d27f8c21;hb=33327948782bcef89c78eb47af86b6a2df9fd4a5;hp=6196f45c52634b0e16b6cfccf72d423538cf0bb1;hpb=3d99cfb5f46191fc68f1343feeb2cf835001f7d7;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/migrate.c b/mm/migrate.c index 6196f45..4e0eccc 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -28,6 +29,7 @@ #include #include #include +#include #include "internal.h" @@ -49,9 +51,8 @@ int isolate_lru_page(struct page *page, struct list_head *pagelist) struct zone *zone = page_zone(page); spin_lock_irq(&zone->lru_lock); - if (PageLRU(page)) { + if (PageLRU(page) && get_page_unless_zero(page)) { ret = 0; - get_page(page); ClearPageLRU(page); if (PageActive(page)) del_page_from_active_list(zone, page); @@ -115,11 +116,6 @@ int putback_lru_pages(struct list_head *l) return count; } -static inline int is_swap_pte(pte_t pte) -{ - return !pte_none(pte) && !pte_present(pte) && !pte_file(pte); -} - /* * Restore a potential migration pte to a working pte entry */ @@ -168,10 +164,25 @@ static void remove_migration_pte(struct vm_area_struct *vma, if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old) goto out; + /* + * Yes, ignore the return value from a GFP_ATOMIC mem_cgroup_charge. + * Failure is not an option here: we're now expected to remove every + * migration pte, and will cause crashes otherwise. Normally this + * is not an issue: mem_cgroup_prepare_migration bumped up the old + * page_cgroup count for safety, that's now attached to the new page, + * so this charge should just be another incrementation of the count, + * to keep in balance with rmap.c's mem_cgroup_uncharging. But if + * there's been a force_empty, those reference counts may no longer + * be reliable, and this charge can actually fail: oh well, we don't + * make the situation any worse by proceeding as if it had succeeded. + */ + mem_cgroup_charge(new, mm, GFP_ATOMIC); + get_page(new); pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); if (is_write_migration_entry(entry)) pte = pte_mkwrite(pte); + flush_cache_page(vma, addr, pte_pfn(pte)); set_pte_at(mm, addr, ptep, pte); if (PageAnon(new)) @@ -181,7 +192,6 @@ static void remove_migration_pte(struct vm_area_struct *vma, /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, addr, pte); - lazy_mmu_prot_update(pte); out: pte_unmap_unlock(ptep, ptl); @@ -294,10 +304,10 @@ out: static int migrate_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page) { - struct page **radix_pointer; + void **pslot; if (!mapping) { - /* Anonymous page */ + /* Anonymous page without mapping */ if (page_count(page) != 1) return -EAGAIN; return 0; @@ -305,12 +315,11 @@ static int migrate_page_move_mapping(struct address_space *mapping, write_lock_irq(&mapping->tree_lock); - radix_pointer = (struct page **)radix_tree_lookup_slot( - &mapping->page_tree, - page_index(page)); + pslot = radix_tree_lookup_slot(&mapping->page_tree, + page_index(page)); if (page_count(page) != 2 + !!PagePrivate(page) || - *radix_pointer != page) { + (struct page *)radix_tree_deref_slot(pslot) != page) { write_unlock_irq(&mapping->tree_lock); return -EAGAIN; } @@ -318,7 +327,7 @@ static int migrate_page_move_mapping(struct address_space *mapping, /* * Now we know that no one else is looking at the page. */ - get_page(newpage); + get_page(newpage); /* add cache reference */ #ifdef CONFIG_SWAP if (PageSwapCache(page)) { SetPageSwapCache(newpage); @@ -326,8 +335,27 @@ static int migrate_page_move_mapping(struct address_space *mapping, } #endif - *radix_pointer = newpage; + radix_tree_replace_slot(pslot, newpage); + + /* + * Drop cache reference from old page. + * We know this isn't the last reference. + */ __put_page(page); + + /* + * If moved to a different zone then also account + * the page for that zone. Other VM counters will be + * taken care of when we establish references to the + * new page and drop references to the old page. + * + * Note that anonymous pages are accounted for + * via NR_FILE_PAGES and NR_ANON_PAGES if they + * are mapped to swap space. + */ + __dec_zone_page_state(page, NR_FILE_PAGES); + __inc_zone_page_state(newpage, NR_FILE_PAGES); + write_unlock_irq(&mapping->tree_lock); return 0; @@ -409,6 +437,7 @@ int migrate_page(struct address_space *mapping, } EXPORT_SYMBOL(migrate_page); +#ifdef CONFIG_BLOCK /* * Migration function for pages with buffers. This function can only be used * if the underlying filesystem guarantees that no other references to "page" @@ -466,6 +495,7 @@ int buffer_migrate_page(struct address_space *mapping, return 0; } EXPORT_SYMBOL(buffer_migrate_page); +#endif /* * Writeback a page to clean the dirty state @@ -525,7 +555,7 @@ static int fallback_migrate_page(struct address_space *mapping, * Buffers may be managed in a filesystem specific way. * We must have no buffers or drop them. */ - if (page_has_buffers(page) && + if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL)) return -EAGAIN; @@ -572,9 +602,10 @@ static int move_to_new_page(struct page *newpage, struct page *page) else rc = fallback_migrate_page(mapping, newpage, page); - if (!rc) + if (!rc) { + mem_cgroup_page_migration(page, newpage); remove_migration_ptes(page, newpage); - else + } else newpage->mapping = NULL; unlock_page(newpage); @@ -592,6 +623,8 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, int rc = 0; int *result = NULL; struct page *newpage = get_new_page(page, private, &result); + int rcu_locked = 0; + int charge = 0; if (!newpage) return -ENOMEM; @@ -612,18 +645,64 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, goto unlock; wait_on_page_writeback(page); } + /* + * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, + * we cannot notice that anon_vma is freed while we migrates a page. + * This rcu_read_lock() delays freeing anon_vma pointer until the end + * of migration. File cache pages are no problem because of page_lock() + * File Caches may use write_page() or lock_page() in migration, then, + * just care Anon page here. + */ + if (PageAnon(page)) { + rcu_read_lock(); + rcu_locked = 1; + } /* - * Establish migration ptes or remove ptes + * Corner case handling: + * 1. When a new swap-cache page is read into, it is added to the LRU + * and treated as swapcache but it has no rmap yet. + * Calling try_to_unmap() against a page->mapping==NULL page will + * trigger a BUG. So handle it here. + * 2. An orphaned page (see truncate_complete_page) might have + * fs-private metadata. The page can be picked up due to memory + * offlining. Everywhere else except page reclaim, the page is + * invisible to the vm, so the page can not be migrated. So try to + * free the metadata, so the page can be freed. */ + if (!page->mapping) { + if (!PageAnon(page) && PagePrivate(page)) { + /* + * Go direct to try_to_free_buffers() here because + * a) that's what try_to_release_page() would do anyway + * b) we may be under rcu_read_lock() here, so we can't + * use GFP_KERNEL which is what try_to_release_page() + * needs to be effective. + */ + try_to_free_buffers(page); + } + goto rcu_unlock; + } + + charge = mem_cgroup_prepare_migration(page); + /* Establish migration ptes or remove ptes */ try_to_unmap(page, 1); + if (!page_mapped(page)) rc = move_to_new_page(newpage, page); - if (rc) + if (rc) { remove_migration_ptes(page, page); + if (charge) + mem_cgroup_end_migration(page); + } else if (charge) + mem_cgroup_end_migration(newpage); +rcu_unlock: + if (rcu_locked) + rcu_read_unlock(); unlock: + unlock_page(page); if (rc != -EAGAIN) { @@ -662,7 +741,7 @@ move_newpage: * The function returns after 10 attempts or if no pages * are movable anymore because to has become empty * or no retryable pages exist anymore. All pages will be - * retruned to the LRU or freed. + * returned to the LRU or freed. * * Return: Number of pages not migrated or error code. */ @@ -742,8 +821,7 @@ static struct page *new_page_node(struct page *p, unsigned long private, *result = &pm->status; return alloc_pages_node(pm->node, - GFP_HIGHUSER | __GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY, - 0); + GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); } /* @@ -776,7 +854,7 @@ static int do_move_pages(struct mm_struct *mm, struct page_to_node *pm, err = -EFAULT; vma = find_vma(mm, pp->addr); - if (!vma) + if (!vma || !vma_migratable(vma)) goto set_status; page = follow_page(vma, pp->addr, FOLL_GET); @@ -882,7 +960,7 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, /* Find the mm_struct */ read_lock(&tasklist_lock); - task = pid ? find_task_by_pid(pid) : current; + task = pid ? find_task_by_vpid(pid) : current; if (!task) { read_unlock(&tasklist_lock); return -ESRCH; @@ -930,7 +1008,7 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, * array. Return various errors if the user did something wrong. */ for (i = 0; i < nr_pages; i++) { - const void *p; + const void __user *p; err = -EFAULT; if (get_user(p, pages + i)) @@ -944,7 +1022,7 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, goto out; err = -ENODEV; - if (!node_online(node)) + if (!node_state(node, N_HIGH_MEMORY)) goto out; err = -EACCES; @@ -952,7 +1030,8 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, goto out; pm[i].node = node; - } + } else + pm[i].node = 0; /* anything to not match MAX_NUMNODES */ } /* End marker */ pm[nr_pages].node = MAX_NUMNODES;