X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Fmigrate.c;h=e9b161bde95b4c382092a1c94a8fcfbd4bcf58f0;hb=03c6d130f690dba46387480de80acf458a6fd14c;hp=033a12f4c9499657aa07b10860484757291cc0d0;hpb=742755a1d8ce2b548428f7aacf1758b4bba50080;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/migrate.c b/mm/migrate.c index 033a12f..e9b161b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -27,6 +27,7 @@ #include #include #include +#include #include "internal.h" @@ -293,7 +294,7 @@ out: static int migrate_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page) { - struct page **radix_pointer; + void **pslot; if (!mapping) { /* Anonymous page */ @@ -304,12 +305,11 @@ static int migrate_page_move_mapping(struct address_space *mapping, write_lock_irq(&mapping->tree_lock); - radix_pointer = (struct page **)radix_tree_lookup_slot( - &mapping->page_tree, - page_index(page)); + pslot = radix_tree_lookup_slot(&mapping->page_tree, + page_index(page)); if (page_count(page) != 2 + !!PagePrivate(page) || - *radix_pointer != page) { + (struct page *)radix_tree_deref_slot(pslot) != page) { write_unlock_irq(&mapping->tree_lock); return -EAGAIN; } @@ -317,7 +317,7 @@ static int migrate_page_move_mapping(struct address_space *mapping, /* * Now we know that no one else is looking at the page. */ - get_page(newpage); + get_page(newpage); /* add cache reference */ #ifdef CONFIG_SWAP if (PageSwapCache(page)) { SetPageSwapCache(newpage); @@ -325,8 +325,14 @@ static int migrate_page_move_mapping(struct address_space *mapping, } #endif - *radix_pointer = newpage; + radix_tree_replace_slot(pslot, newpage); + + /* + * Drop cache reference from old page. + * We know this isn't the last reference. + */ __put_page(page); + write_unlock_irq(&mapping->tree_lock); return 0; @@ -408,6 +414,7 @@ int migrate_page(struct address_space *mapping, } EXPORT_SYMBOL(migrate_page); +#ifdef CONFIG_BLOCK /* * Migration function for pages with buffers. This function can only be used * if the underlying filesystem guarantees that no other references to "page" @@ -465,6 +472,7 @@ int buffer_migrate_page(struct address_space *mapping, return 0; } EXPORT_SYMBOL(buffer_migrate_page); +#endif /* * Writeback a page to clean the dirty state @@ -524,7 +532,7 @@ static int fallback_migrate_page(struct address_space *mapping, * Buffers may be managed in a filesystem specific way. * We must have no buffers or drop them. */ - if (page_has_buffers(page) && + if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL)) return -EAGAIN; @@ -615,15 +623,13 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, /* * Establish migration ptes or remove ptes */ - if (try_to_unmap(page, 1) != SWAP_FAIL) { - if (!page_mapped(page)) - rc = move_to_new_page(newpage, page); - } else - /* A vma has VM_LOCKED set -> permanent failure */ - rc = -EPERM; + try_to_unmap(page, 1); + if (!page_mapped(page)) + rc = move_to_new_page(newpage, page); if (rc) remove_migration_ptes(page, page); + unlock: unlock_page(page); @@ -742,7 +748,7 @@ static struct page *new_page_node(struct page *p, unsigned long private, *result = &pm->status; - return alloc_pages_node(pm->node, GFP_HIGHUSER, 0); + return alloc_pages_node(pm->node, GFP_HIGHUSER | GFP_THISNODE, 0); } /* @@ -905,6 +911,11 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, goto out2; } + err = security_task_movememory(task); + if (err) + goto out2; + + task_nodes = cpuset_mems_allowed(task); /* Limit nr_pages so that the multiplication may not overflow */ @@ -946,7 +957,8 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, goto out; pm[i].node = node; - } + } else + pm[i].node = 0; /* anything to not match MAX_NUMNODES */ } /* End marker */ pm[nr_pages].node = MAX_NUMNODES; @@ -970,3 +982,23 @@ out2: } #endif +/* + * Call migration functions in the vma_ops that may prepare + * memory in a vm for migration. migration functions may perform + * the migration for vmas that do not have an underlying page struct. + */ +int migrate_vmas(struct mm_struct *mm, const nodemask_t *to, + const nodemask_t *from, unsigned long flags) +{ + struct vm_area_struct *vma; + int err = 0; + + for(vma = mm->mmap; vma->vm_next && !err; vma = vma->vm_next) { + if (vma->vm_ops && vma->vm_ops->migrate) { + err = vma->vm_ops->migrate(vma, to, from, flags); + if (err) + break; + } + } + return err; +}