X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Fpage_alloc.c;h=53242344a774f1c4e2db14a7bc12477523a9d67c;hb=15c8b6c1aaaf1c4edd67e2f02e4d8e1bd1a51c0d;hp=f9b5d6d5f4d6cf7643d1c6f39b1d112e8bd18c33;hpb=4037d452202e34214e8a939fa5621b2b3bbb45b7;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f9b5d6d..5324234 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -27,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -41,24 +43,39 @@ #include #include #include +#include +#include +#include #include #include #include "internal.h" /* - * MCD - HACK: Find somewhere to initialize this EARLY, or make this - * initializer cleaner + * Array of node states. */ -nodemask_t node_online_map __read_mostly = { { [0] = 1UL } }; -EXPORT_SYMBOL(node_online_map); -nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL; -EXPORT_SYMBOL(node_possible_map); +nodemask_t node_states[NR_NODE_STATES] __read_mostly = { + [N_POSSIBLE] = NODE_MASK_ALL, + [N_ONLINE] = { { [0] = 1UL } }, +#ifndef CONFIG_NUMA + [N_NORMAL_MEMORY] = { { [0] = 1UL } }, +#ifdef CONFIG_HIGHMEM + [N_HIGH_MEMORY] = { { [0] = 1UL } }, +#endif + [N_CPU] = { { [0] = 1UL } }, +#endif /* NUMA */ +}; +EXPORT_SYMBOL(node_states); + unsigned long totalram_pages __read_mostly; unsigned long totalreserve_pages __read_mostly; long nr_swap_pages; int percpu_pagelist_fraction; +#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE +int pageblock_order __read_mostly; +#endif + static void __free_pages_ok(struct page *page, unsigned int order); /* @@ -80,8 +97,9 @@ int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, #endif #ifdef CONFIG_HIGHMEM - 32 + 32, #endif + 32, }; EXPORT_SYMBOL(totalram_pages); @@ -95,8 +113,9 @@ static char * const zone_names[MAX_NR_ZONES] = { #endif "Normal", #ifdef CONFIG_HIGHMEM - "HighMem" + "HighMem", #endif + "Movable", }; int min_free_kbytes = 1024; @@ -107,7 +126,7 @@ static unsigned long __meminitdata dma_reserve; #ifdef CONFIG_ARCH_POPULATES_NODE_MAP /* - * MAX_ACTIVE_REGIONS determines the maxmimum number of distinct + * MAX_ACTIVE_REGIONS determines the maximum number of distinct * ranges of memory (RAM) that may be registered with add_active_range(). * Ranges passed to add_active_range() will be merged if possible * so the number of times add_active_range() can be called is @@ -126,16 +145,36 @@ static unsigned long __meminitdata dma_reserve; #endif #endif - struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS]; - int __meminitdata nr_nodemap_entries; - unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; - unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; + static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS]; + static int __meminitdata nr_nodemap_entries; + static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; + static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE - unsigned long __initdata node_boundary_start_pfn[MAX_NUMNODES]; - unsigned long __initdata node_boundary_end_pfn[MAX_NUMNODES]; + static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES]; + static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES]; #endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ + unsigned long __initdata required_kernelcore; + static unsigned long __initdata required_movablecore; + unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; + + /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ + int movable_zone; + EXPORT_SYMBOL(movable_zone); #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ +#if MAX_NUMNODES > 1 +int nr_node_ids __read_mostly = MAX_NUMNODES; +EXPORT_SYMBOL(nr_node_ids); +#endif + +int page_group_by_mobility_disabled __read_mostly; + +static void set_pageblock_migratetype(struct page *page, int migratetype) +{ + set_pageblock_flags_group(page, (unsigned long)migratetype, + PB_migrate, PB_migrate_end); +} + #ifdef CONFIG_DEBUG_VM static int page_outside_zone_boundaries(struct zone *zone, struct page *page) { @@ -184,24 +223,21 @@ static inline int bad_range(struct zone *zone, struct page *page) static void bad_page(struct page *page) { - printk(KERN_EMERG "Bad page state in process '%s'\n" - KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" - KERN_EMERG "Trying to fix it up, but a reboot is needed\n" - KERN_EMERG "Backtrace:\n", + void *pc = page_get_page_cgroup(page); + + printk(KERN_EMERG "Bad page state in process '%s'\n" KERN_EMERG + "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n", current->comm, page, (int)(2*sizeof(unsigned long)), (unsigned long)page->flags, page->mapping, page_mapcount(page), page_count(page)); + if (pc) { + printk(KERN_EMERG "cgroup:%p\n", pc); + page_reset_bad_cgroup(page); + } + printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n" + KERN_EMERG "Backtrace:\n"); dump_stack(); - page->flags &= ~(1 << PG_lru | - 1 << PG_private | - 1 << PG_locked | - 1 << PG_active | - 1 << PG_dirty | - 1 << PG_reclaim | - 1 << PG_slab | - 1 << PG_swapcache | - 1 << PG_writeback | - 1 << PG_buddy ); + page->flags &= ~PAGE_FLAGS_CLEAR_WHEN_BAD; set_page_count(page, 0); reset_page_mapcount(page); page->mapping = NULL; @@ -269,7 +305,6 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) { int i; - VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM); /* * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO * and __GFP_HIGHMEM from hard or soft interrupt context. @@ -279,16 +314,6 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) clear_highpage(page + i); } -/* - * function for dealing with page's order in buddy system. - * zone->lock is already acquired when we use these. - * So, we don't need atomic page->flags operations here. - */ -static inline unsigned long page_order(struct page *page) -{ - return page_private(page); -} - static inline void set_page_order(struct page *page, int order) { set_page_private(page, order); @@ -390,6 +415,7 @@ static inline void __free_one_page(struct page *page, { unsigned long page_idx; int order_size = 1 << order; + int migratetype = get_pageblock_migratetype(page); if (unlikely(PageCompound(page))) destroy_compound_page(page, order); @@ -402,7 +428,6 @@ static inline void __free_one_page(struct page *page, __mod_zone_page_state(zone, NR_FREE_PAGES, order_size); while (order < MAX_ORDER-1) { unsigned long combined_idx; - struct free_area *area; struct page *buddy; buddy = __page_find_buddy(page, page_idx, order); @@ -410,8 +435,7 @@ static inline void __free_one_page(struct page *page, break; /* Move the buddy up one level. */ list_del(&buddy->lru); - area = zone->free_area + order; - area->nr_free--; + zone->free_area[order].nr_free--; rmv_page_order(buddy); combined_idx = __find_combined_index(page_idx, order); page = page + (combined_idx - page_idx); @@ -419,7 +443,8 @@ static inline void __free_one_page(struct page *page, order++; } set_page_order(page, order); - list_add(&page->lru, &zone->free_area[order].free_list); + list_add(&page->lru, + &zone->free_area[order].free_list[migratetype]); zone->free_area[order].nr_free++; } @@ -427,23 +452,9 @@ static inline int free_pages_check(struct page *page) { if (unlikely(page_mapcount(page) | (page->mapping != NULL) | + (page_get_page_cgroup(page) != NULL) | (page_count(page) != 0) | - (page->flags & ( - 1 << PG_lru | - 1 << PG_private | - 1 << PG_locked | - 1 << PG_active | - 1 << PG_slab | - 1 << PG_swapcache | - 1 << PG_writeback | - 1 << PG_reserved | - 1 << PG_buddy )))) - bad_page(page); - /* - * PageReclaim == PageTail. It is only an error - * for PageReclaim to be set if PageCompound is clear. - */ - if (unlikely(!PageCompound(page) && PageReclaim(page))) + (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) bad_page(page); if (PageDirty(page)) __ClearPageDirty(page); @@ -470,7 +481,7 @@ static void free_pages_bulk(struct zone *zone, int count, struct list_head *list, int order) { spin_lock(&zone->lock); - zone->all_unreclaimable = 0; + zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); zone->pages_scanned = 0; while (count--) { struct page *page; @@ -487,7 +498,7 @@ static void free_pages_bulk(struct zone *zone, int count, static void free_one_page(struct zone *zone, struct page *page, int order) { spin_lock(&zone->lock); - zone->all_unreclaimable = 0; + zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); zone->pages_scanned = 0; __free_one_page(page, zone, order); spin_unlock(&zone->lock); @@ -504,8 +515,11 @@ static void __free_pages_ok(struct page *page, unsigned int order) if (reserved) return; - if (!PageHighMem(page)) + if (!PageHighMem(page)) { debug_check_no_locks_freed(page_address(page),PAGE_SIZE<>= 1; VM_BUG_ON(bad_range(zone, &page[size])); - list_add(&page[size].lru, &area->free_list); + list_add(&page[size].lru, &area->free_list[migratetype]); area->nr_free++; set_page_order(&page[size], high); } @@ -581,19 +596,9 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) { if (unlikely(page_mapcount(page) | (page->mapping != NULL) | + (page_get_page_cgroup(page) != NULL) | (page_count(page) != 0) | - (page->flags & ( - 1 << PG_lru | - 1 << PG_private | - 1 << PG_locked | - 1 << PG_active | - 1 << PG_dirty | - 1 << PG_reclaim | - 1 << PG_slab | - 1 << PG_swapcache | - 1 << PG_writeback | - 1 << PG_reserved | - 1 << PG_buddy )))) + (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) bad_page(page); /* @@ -603,7 +608,7 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) if (PageReserved(page)) return 1; - page->flags &= ~(1 << PG_uptodate | 1 << PG_error | + page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_reclaim | 1 << PG_referenced | 1 << PG_arch_1 | 1 << PG_owner_priv_1 | 1 << PG_mappedtodisk); set_page_private(page, 0); @@ -621,74 +626,231 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) return 0; } -/* - * Do the hard work of removing an element from the buddy allocator. - * Call me with the zone->lock already held. +/* + * Go through the free lists for the given migratetype and remove + * the smallest available page from the freelists */ -static struct page *__rmqueue(struct zone *zone, unsigned int order) +static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, + int migratetype) { - struct free_area * area; unsigned int current_order; + struct free_area * area; struct page *page; + /* Find a page of the appropriate size in the preferred list */ for (current_order = order; current_order < MAX_ORDER; ++current_order) { - area = zone->free_area + current_order; - if (list_empty(&area->free_list)) + area = &(zone->free_area[current_order]); + if (list_empty(&area->free_list[migratetype])) continue; - page = list_entry(area->free_list.next, struct page, lru); + page = list_entry(area->free_list[migratetype].next, + struct page, lru); list_del(&page->lru); rmv_page_order(page); area->nr_free--; __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order)); - expand(zone, page, order, current_order, area); + expand(zone, page, order, current_order, area, migratetype); return page; } return NULL; } + +/* + * This array describes the order lists are fallen back to when + * the free lists for the desirable migrate type are depleted + */ +static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = { + [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, + [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, + [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE }, + [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */ +}; + +/* + * Move the free pages in a range to the free lists of the requested type. + * Note that start_page and end_pages are not aligned on a pageblock + * boundary. If alignment is required, use move_freepages_block() + */ +int move_freepages(struct zone *zone, + struct page *start_page, struct page *end_page, + int migratetype) +{ + struct page *page; + unsigned long order; + int pages_moved = 0; + +#ifndef CONFIG_HOLES_IN_ZONE + /* + * page_zone is not safe to call in this context when + * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant + * anyway as we check zone boundaries in move_freepages_block(). + * Remove at a later date when no bug reports exist related to + * grouping pages by mobility + */ + BUG_ON(page_zone(start_page) != page_zone(end_page)); +#endif + + for (page = start_page; page <= end_page;) { + if (!pfn_valid_within(page_to_pfn(page))) { + page++; + continue; + } + + if (!PageBuddy(page)) { + page++; + continue; + } + + order = page_order(page); + list_del(&page->lru); + list_add(&page->lru, + &zone->free_area[order].free_list[migratetype]); + page += 1 << order; + pages_moved += 1 << order; + } + + return pages_moved; +} + +int move_freepages_block(struct zone *zone, struct page *page, int migratetype) +{ + unsigned long start_pfn, end_pfn; + struct page *start_page, *end_page; + + start_pfn = page_to_pfn(page); + start_pfn = start_pfn & ~(pageblock_nr_pages-1); + start_page = pfn_to_page(start_pfn); + end_page = start_page + pageblock_nr_pages - 1; + end_pfn = start_pfn + pageblock_nr_pages - 1; + + /* Do not cross zone boundaries */ + if (start_pfn < zone->zone_start_pfn) + start_page = page; + if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages) + return 0; + + return move_freepages(zone, start_page, end_page, migratetype); +} + +/* Remove an element from the buddy allocator from the fallback list */ +static struct page *__rmqueue_fallback(struct zone *zone, int order, + int start_migratetype) +{ + struct free_area * area; + int current_order; + struct page *page; + int migratetype, i; + + /* Find the largest possible block of pages in the other list */ + for (current_order = MAX_ORDER-1; current_order >= order; + --current_order) { + for (i = 0; i < MIGRATE_TYPES - 1; i++) { + migratetype = fallbacks[start_migratetype][i]; + + /* MIGRATE_RESERVE handled later if necessary */ + if (migratetype == MIGRATE_RESERVE) + continue; + + area = &(zone->free_area[current_order]); + if (list_empty(&area->free_list[migratetype])) + continue; + + page = list_entry(area->free_list[migratetype].next, + struct page, lru); + area->nr_free--; + + /* + * If breaking a large block of pages, move all free + * pages to the preferred allocation list. If falling + * back for a reclaimable kernel allocation, be more + * agressive about taking ownership of free pages + */ + if (unlikely(current_order >= (pageblock_order >> 1)) || + start_migratetype == MIGRATE_RECLAIMABLE) { + unsigned long pages; + pages = move_freepages_block(zone, page, + start_migratetype); + + /* Claim the whole block if over half of it is free */ + if (pages >= (1 << (pageblock_order-1))) + set_pageblock_migratetype(page, + start_migratetype); + + migratetype = start_migratetype; + } + + /* Remove the page from the freelists */ + list_del(&page->lru); + rmv_page_order(page); + __mod_zone_page_state(zone, NR_FREE_PAGES, + -(1UL << order)); + + if (current_order == pageblock_order) + set_pageblock_migratetype(page, + start_migratetype); + + expand(zone, page, order, current_order, area, migratetype); + return page; + } + } + + /* Use MIGRATE_RESERVE rather than fail an allocation */ + return __rmqueue_smallest(zone, order, MIGRATE_RESERVE); +} + +/* + * Do the hard work of removing an element from the buddy allocator. + * Call me with the zone->lock already held. + */ +static struct page *__rmqueue(struct zone *zone, unsigned int order, + int migratetype) +{ + struct page *page; + + page = __rmqueue_smallest(zone, order, migratetype); + + if (unlikely(!page)) + page = __rmqueue_fallback(zone, order, migratetype); + + return page; +} + /* * Obtain a specified number of elements from the buddy allocator, all under * a single hold of the lock, for efficiency. Add them to the supplied list. * Returns the number of new pages which were placed at *list. */ static int rmqueue_bulk(struct zone *zone, unsigned int order, - unsigned long count, struct list_head *list) + unsigned long count, struct list_head *list, + int migratetype) { int i; spin_lock(&zone->lock); for (i = 0; i < count; ++i) { - struct page *page = __rmqueue(zone, order); + struct page *page = __rmqueue(zone, order, migratetype); if (unlikely(page == NULL)) break; - list_add_tail(&page->lru, list); + + /* + * Split buddy pages returned by expand() are received here + * in physical page order. The page is added to the callers and + * list and the list head then moves forward. From the callers + * perspective, the linked list is ordered by page number in + * some conditions. This is useful for IO devices that can + * merge IO requests if the physical pages are ordered + * properly. + */ + list_add(&page->lru, list); + set_page_private(page, migratetype); + list = &page->lru; } spin_unlock(&zone->lock); return i; } -#if MAX_NUMNODES > 1 -int nr_node_ids __read_mostly = MAX_NUMNODES; -EXPORT_SYMBOL(nr_node_ids); - -/* - * Figure out the number of possible node ids. - */ -static void __init setup_nr_node_ids(void) -{ - unsigned int node; - unsigned int highest = 0; - - for_each_node_mask(node, node_possible_map) - highest = node; - nr_node_ids = highest + 1; -} -#else -static void __init setup_nr_node_ids(void) {} -#endif - #ifdef CONFIG_NUMA /* * Called from the vmstat counter updater to drain pagesets of this @@ -714,38 +876,58 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) } #endif -static void __drain_pages(unsigned int cpu) +/* + * Drain pages of the indicated processor. + * + * The processor must either be the current processor and the + * thread pinned to the current processor or a processor that + * is not online. + */ +static void drain_pages(unsigned int cpu) { unsigned long flags; struct zone *zone; - int i; for_each_zone(zone) { struct per_cpu_pageset *pset; + struct per_cpu_pages *pcp; if (!populated_zone(zone)) continue; pset = zone_pcp(zone, cpu); - for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { - struct per_cpu_pages *pcp; - - pcp = &pset->pcp[i]; - local_irq_save(flags); - free_pages_bulk(zone, pcp->count, &pcp->list, 0); - pcp->count = 0; - local_irq_restore(flags); - } + + pcp = &pset->pcp; + local_irq_save(flags); + free_pages_bulk(zone, pcp->count, &pcp->list, 0); + pcp->count = 0; + local_irq_restore(flags); } } -#ifdef CONFIG_PM +/* + * Spill all of this CPU's per-cpu pages back into the buddy allocator. + */ +void drain_local_pages(void *arg) +{ + drain_pages(smp_processor_id()); +} + +/* + * Spill all the per-cpu pages from all CPUs back into the buddy allocator + */ +void drain_all_pages(void) +{ + on_each_cpu(drain_local_pages, NULL, 1); +} + +#ifdef CONFIG_HIBERNATION void mark_free_pages(struct zone *zone) { unsigned long pfn, max_zone_pfn; unsigned long flags; - int order; + int order, t; struct list_head *curr; if (!zone->spanned_pages) @@ -762,35 +944,23 @@ void mark_free_pages(struct zone *zone) swsusp_unset_page_free(page); } - for (order = MAX_ORDER - 1; order >= 0; --order) - list_for_each(curr, &zone->free_area[order].free_list) { + for_each_migratetype_order(order, t) { + list_for_each(curr, &zone->free_area[order].free_list[t]) { unsigned long i; pfn = page_to_pfn(list_entry(curr, struct page, lru)); for (i = 0; i < (1UL << order); i++) swsusp_set_page_free(pfn_to_page(pfn + i)); } - + } spin_unlock_irqrestore(&zone->lock, flags); } - -/* - * Spill all of this CPU's per-cpu pages back into the buddy allocator. - */ -void drain_local_pages(void) -{ - unsigned long flags; - - local_irq_save(flags); - __drain_pages(smp_processor_id()); - local_irq_restore(flags); -} #endif /* CONFIG_PM */ /* * Free a 0-order page */ -static void fastcall free_hot_cold_page(struct page *page, int cold) +static void free_hot_cold_page(struct page *page, int cold) { struct zone *zone = page_zone(page); struct per_cpu_pages *pcp; @@ -801,15 +971,21 @@ static void fastcall free_hot_cold_page(struct page *page, int cold) if (free_pages_check(page)) return; - if (!PageHighMem(page)) + if (!PageHighMem(page)) { debug_check_no_locks_freed(page_address(page), PAGE_SIZE); + debug_check_no_obj_freed(page_address(page), PAGE_SIZE); + } arch_free_page(page, 0); kernel_map_pages(page, 1, 0); - pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; + pcp = &zone_pcp(zone, get_cpu())->pcp; local_irq_save(flags); __count_vm_event(PGFREE); - list_add(&page->lru, &pcp->list); + if (cold) + list_add_tail(&page->lru, &pcp->list); + else + list_add(&page->lru, &pcp->list); + set_page_private(page, get_pageblock_migratetype(page)); pcp->count++; if (pcp->count >= pcp->high) { free_pages_bulk(zone, pcp->batch, &pcp->list, 0); @@ -819,12 +995,12 @@ static void fastcall free_hot_cold_page(struct page *page, int cold) put_cpu(); } -void fastcall free_hot_page(struct page *page) +void free_hot_page(struct page *page) { free_hot_cold_page(page, 0); } -void fastcall free_cold_page(struct page *page) +void free_cold_page(struct page *page) { free_hot_cold_page(page, 1); } @@ -852,40 +1028,59 @@ void split_page(struct page *page, unsigned int order) * we cheat by calling it from here, in the order > 0 path. Saves a branch * or two. */ -static struct page *buffered_rmqueue(struct zonelist *zonelist, +static struct page *buffered_rmqueue(struct zone *preferred_zone, struct zone *zone, int order, gfp_t gfp_flags) { unsigned long flags; struct page *page; int cold = !!(gfp_flags & __GFP_COLD); int cpu; + int migratetype = allocflags_to_migratetype(gfp_flags); again: cpu = get_cpu(); if (likely(order == 0)) { struct per_cpu_pages *pcp; - pcp = &zone_pcp(zone, cpu)->pcp[cold]; + pcp = &zone_pcp(zone, cpu)->pcp; local_irq_save(flags); if (!pcp->count) { pcp->count = rmqueue_bulk(zone, 0, - pcp->batch, &pcp->list); + pcp->batch, &pcp->list, migratetype); if (unlikely(!pcp->count)) goto failed; } - page = list_entry(pcp->list.next, struct page, lru); + + /* Find a page of the appropriate migrate type */ + if (cold) { + list_for_each_entry_reverse(page, &pcp->list, lru) + if (page_private(page) == migratetype) + break; + } else { + list_for_each_entry(page, &pcp->list, lru) + if (page_private(page) == migratetype) + break; + } + + /* Allocate more to the pcp list if necessary */ + if (unlikely(&page->lru == &pcp->list)) { + pcp->count += rmqueue_bulk(zone, 0, + pcp->batch, &pcp->list, migratetype); + page = list_entry(pcp->list.next, struct page, lru); + } + list_del(&page->lru); pcp->count--; } else { spin_lock_irqsave(&zone->lock, flags); - page = __rmqueue(zone, order); + page = __rmqueue(zone, order, migratetype); spin_unlock(&zone->lock); if (!page) goto failed; } __count_zone_vm_events(PGALLOC, zone, 1 << order); - zone_statistics(zonelist, zone); + zone_statistics(preferred_zone, zone); local_irq_restore(flags); put_cpu(); @@ -915,11 +1110,13 @@ static struct fail_page_alloc_attr { u32 ignore_gfp_highmem; u32 ignore_gfp_wait; + u32 min_order; #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS struct dentry *ignore_gfp_highmem_file; struct dentry *ignore_gfp_wait_file; + struct dentry *min_order_file; #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ @@ -927,6 +1124,7 @@ static struct fail_page_alloc_attr { .attr = FAULT_ATTR_INITIALIZER, .ignore_gfp_wait = 1, .ignore_gfp_highmem = 1, + .min_order = 1, }; static int __init setup_fail_page_alloc(char *str) @@ -937,6 +1135,8 @@ __setup("fail_page_alloc=", setup_fail_page_alloc); static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) { + if (order < fail_page_alloc.min_order) + return 0; if (gfp_mask & __GFP_NOFAIL) return 0; if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) @@ -968,12 +1168,17 @@ static int __init fail_page_alloc_debugfs(void) fail_page_alloc.ignore_gfp_highmem_file = debugfs_create_bool("ignore-gfp-highmem", mode, dir, &fail_page_alloc.ignore_gfp_highmem); + fail_page_alloc.min_order_file = + debugfs_create_u32("min-order", mode, dir, + &fail_page_alloc.min_order); if (!fail_page_alloc.ignore_gfp_wait_file || - !fail_page_alloc.ignore_gfp_highmem_file) { + !fail_page_alloc.ignore_gfp_highmem_file || + !fail_page_alloc.min_order_file) { err = -ENOMEM; debugfs_remove(fail_page_alloc.ignore_gfp_wait_file); debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file); + debugfs_remove(fail_page_alloc.min_order_file); cleanup_fault_attr_dentries(&fail_page_alloc.attr); } @@ -1031,11 +1236,11 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark, * skip over zones that are not allowed by the cpuset, or that have * been recently (in last second) found to be nearly full. See further * comments in mmzone.h. Reduces cache footprint of zonelist scans - * that have to skip over alot of full or unallowed zones. + * that have to skip over a lot of full or unallowed zones. * * If the zonelist cache is present in the passed in zonelist, then * returns a pointer to the allowed node mask (either the current - * tasks mems_allowed, or node_online_map.) + * tasks mems_allowed, or node_states[N_HIGH_MEMORY].) * * If the zonelist cache is not available for this zonelist, does * nothing and returns NULL. @@ -1057,14 +1262,14 @@ static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) if (!zlc) return NULL; - if (jiffies - zlc->last_full_zap > 1 * HZ) { + if (time_after(jiffies, zlc->last_full_zap + HZ)) { bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); zlc->last_full_zap = jiffies; } allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ? &cpuset_current_mems_allowed : - &node_online_map; + &node_states[N_HIGH_MEMORY]; return allowednodes; } @@ -1090,7 +1295,7 @@ static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) * We are low on memory in the second scan, and should leave no stone * unturned looking for a free page. */ -static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z, +static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z, nodemask_t *allowednodes) { struct zonelist_cache *zlc; /* cached zonelist speedup info */ @@ -1101,7 +1306,7 @@ static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z, if (!zlc) return 1; - i = z - zonelist->zones; + i = z - zonelist->_zonerefs; n = zlc->z_to_n[i]; /* This zone is worth trying if it is allowed but not full */ @@ -1113,7 +1318,7 @@ static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z, * zlc->fullzones, so that subsequent attempts to allocate a page * from that zone don't waste time re-examining it. */ -static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z) +static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z) { struct zonelist_cache *zlc; /* cached zonelist speedup info */ int i; /* index of *z in zonelist zones */ @@ -1122,7 +1327,7 @@ static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z) if (!zlc) return; - i = z - zonelist->zones; + i = z - zonelist->_zonerefs; set_bit(i, zlc->fullzones); } @@ -1134,13 +1339,13 @@ static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) return NULL; } -static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z, +static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z, nodemask_t *allowednodes) { return 1; } -static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z) +static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z) { } #endif /* CONFIG_NUMA */ @@ -1150,32 +1355,34 @@ static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z) * a page. */ static struct page * -get_page_from_freelist(gfp_t gfp_mask, unsigned int order, - struct zonelist *zonelist, int alloc_flags) +get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, + struct zonelist *zonelist, int high_zoneidx, int alloc_flags) { - struct zone **z; + struct zoneref *z; struct page *page = NULL; - int classzone_idx = zone_idx(zonelist->zones[0]); - struct zone *zone; + int classzone_idx; + struct zone *zone, *preferred_zone; nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */ int zlc_active = 0; /* set if using zonelist_cache */ int did_zlc_setup = 0; /* just call zlc_setup() one time */ + (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask, + &preferred_zone); + if (!preferred_zone) + return NULL; + + classzone_idx = zone_idx(preferred_zone); + zonelist_scan: /* * Scan zonelist, looking for a zone with enough free. * See also cpuset_zone_allowed() comment in kernel/cpuset.c. */ - z = zonelist->zones; - - do { + for_each_zone_zonelist_nodemask(zone, z, zonelist, + high_zoneidx, nodemask) { if (NUMA_BUILD && zlc_active && !zlc_zone_worth_trying(zonelist, z, allowednodes)) continue; - zone = *z; - if (unlikely(NUMA_BUILD && (gfp_mask & __GFP_THISNODE) && - zone->zone_pgdat != zonelist->zones[0]->zone_pgdat)) - break; if ((alloc_flags & ALLOC_CPUSET) && !cpuset_zone_allowed_softwall(zone, gfp_mask)) goto try_next_zone; @@ -1196,7 +1403,7 @@ zonelist_scan: } } - page = buffered_rmqueue(zonelist, zone, order, gfp_mask); + page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask); if (page) break; this_zone_full: @@ -1209,7 +1416,7 @@ try_next_zone: zlc_active = 1; did_zlc_setup = 1; } - } while (*(++z) != NULL); + } if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) { /* Disable zlc cache for second zonelist scan */ @@ -1222,18 +1429,21 @@ try_next_zone: /* * This is the 'heart' of the zoned buddy allocator. */ -struct page * fastcall -__alloc_pages(gfp_t gfp_mask, unsigned int order, - struct zonelist *zonelist) +static struct page * +__alloc_pages_internal(gfp_t gfp_mask, unsigned int order, + struct zonelist *zonelist, nodemask_t *nodemask) { const gfp_t wait = gfp_mask & __GFP_WAIT; - struct zone **z; + enum zone_type high_zoneidx = gfp_zone(gfp_mask); + struct zoneref *z; + struct zone *zone; struct page *page; struct reclaim_state reclaim_state; struct task_struct *p = current; int do_retry; int alloc_flags; - int did_some_progress; + unsigned long did_some_progress; + unsigned long pages_reclaimed = 0; might_sleep_if(wait); @@ -1241,15 +1451,18 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order, return NULL; restart: - z = zonelist->zones; /* the list of zones suitable for gfp_mask */ + z = zonelist->_zonerefs; /* the list of zones suitable for gfp_mask */ - if (unlikely(*z == NULL)) { - /* Should this ever happen?? */ + if (unlikely(!z->zone)) { + /* + * Happens if we have an empty zonelist as a result of + * GFP_THISNODE being used on a memoryless node + */ return NULL; } - page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, - zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET); + page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, + zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET); if (page) goto got_pg; @@ -1264,8 +1477,8 @@ restart: if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE) goto nopage; - for (z = zonelist->zones; *z; z++) - wakeup_kswapd(*z, order); + for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) + wakeup_kswapd(zone, order); /* * OK, we're below the kswapd watermark and have kicked background @@ -1293,7 +1506,8 @@ restart: * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. * See also cpuset_zone_allowed() comment in kernel/cpuset.c. */ - page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags); + page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, + high_zoneidx, alloc_flags); if (page) goto got_pg; @@ -1305,8 +1519,8 @@ rebalance: if (!(gfp_mask & __GFP_NOMEMALLOC)) { nofail_alloc: /* go through the zonelist yet again, ignoring mins */ - page = get_page_from_freelist(gfp_mask, order, - zonelist, ALLOC_NO_WATERMARKS); + page = get_page_from_freelist(gfp_mask, nodemask, order, + zonelist, high_zoneidx, ALLOC_NO_WATERMARKS); if (page) goto got_pg; if (gfp_mask & __GFP_NOFAIL) { @@ -1329,31 +1543,49 @@ nofail_alloc: reclaim_state.reclaimed_slab = 0; p->reclaim_state = &reclaim_state; - did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask); + did_some_progress = try_to_free_pages(zonelist, order, gfp_mask); p->reclaim_state = NULL; p->flags &= ~PF_MEMALLOC; cond_resched(); + if (order != 0) + drain_all_pages(); + if (likely(did_some_progress)) { - page = get_page_from_freelist(gfp_mask, order, - zonelist, alloc_flags); + page = get_page_from_freelist(gfp_mask, nodemask, order, + zonelist, high_zoneidx, alloc_flags); if (page) goto got_pg; } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { + if (!try_set_zone_oom(zonelist, gfp_mask)) { + schedule_timeout_uninterruptible(1); + goto restart; + } + /* * Go through the zonelist yet one more time, keep * very high watermark here, this is only to catch * a parallel oom killing, we must fail if we're still * under heavy pressure. */ - page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, - zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET); - if (page) + page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, + order, zonelist, high_zoneidx, + ALLOC_WMARK_HIGH|ALLOC_CPUSET); + if (page) { + clear_zonelist_oom(zonelist, gfp_mask); goto got_pg; + } + + /* The OOM killer will not help higher order allocs so fail */ + if (order > PAGE_ALLOC_COSTLY_ORDER) { + clear_zonelist_oom(zonelist, gfp_mask); + goto nopage; + } out_of_memory(zonelist, gfp_mask, order); + clear_zonelist_oom(zonelist, gfp_mask); goto restart; } @@ -1361,13 +1593,26 @@ nofail_alloc: * Don't let big-order allocations loop unless the caller explicitly * requests that. Wait for some write requests to complete then retry. * - * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order - * <= 3, but that may not be true in other implementations. + * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER + * means __GFP_NOFAIL, but that may not be true in other + * implementations. + * + * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is + * specified, then we retry until we no longer reclaim any pages + * (above), or we've reclaimed an order of pages at least as + * large as the allocation's order. In both cases, if the + * allocation still fails, we stop retrying. */ + pages_reclaimed += did_some_progress; do_retry = 0; if (!(gfp_mask & __GFP_NORETRY)) { - if ((order <= 3) || (gfp_mask & __GFP_REPEAT)) + if (order <= PAGE_ALLOC_COSTLY_ORDER) { do_retry = 1; + } else { + if (gfp_mask & __GFP_REPEAT && + pages_reclaimed < (1 << order)) + do_retry = 1; + } if (gfp_mask & __GFP_NOFAIL) do_retry = 1; } @@ -1388,12 +1633,26 @@ got_pg: return page; } +struct page * +__alloc_pages(gfp_t gfp_mask, unsigned int order, + struct zonelist *zonelist) +{ + return __alloc_pages_internal(gfp_mask, order, zonelist, NULL); +} + +struct page * +__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, + struct zonelist *zonelist, nodemask_t *nodemask) +{ + return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask); +} + EXPORT_SYMBOL(__alloc_pages); /* * Common helper functions. */ -fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) +unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) { struct page * page; page = alloc_pages(gfp_mask, order); @@ -1404,7 +1663,7 @@ fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) EXPORT_SYMBOL(__get_free_pages); -fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) +unsigned long get_zeroed_page(gfp_t gfp_mask) { struct page * page; @@ -1430,7 +1689,7 @@ void __pagevec_free(struct pagevec *pvec) free_hot_cold_page(pvec->pages[i], pvec->cold); } -fastcall void __free_pages(struct page *page, unsigned int order) +void __free_pages(struct page *page, unsigned int order) { if (put_page_testzero(page)) { if (order == 0) @@ -1442,7 +1701,7 @@ fastcall void __free_pages(struct page *page, unsigned int order) EXPORT_SYMBOL(__free_pages); -fastcall void free_pages(unsigned long addr, unsigned int order) +void free_pages(unsigned long addr, unsigned int order) { if (addr != 0) { VM_BUG_ON(!virt_addr_valid((void *)addr)); @@ -1454,15 +1713,15 @@ EXPORT_SYMBOL(free_pages); static unsigned int nr_free_zone_pages(int offset) { + struct zoneref *z; + struct zone *zone; + /* Just pick one node, since fallback list is circular */ - pg_data_t *pgdat = NODE_DATA(numa_node_id()); unsigned int sum = 0; - struct zonelist *zonelist = pgdat->node_zonelists + offset; - struct zone **zonep = zonelist->zones; - struct zone *zone; + struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); - for (zone = *zonep++; zone; zone = *zonep++) { + for_each_zone_zonelist(zone, z, zonelist, offset) { unsigned long size = zone->present_pages; unsigned long high = zone->pages_high; if (size > high) @@ -1479,13 +1738,14 @@ unsigned int nr_free_buffer_pages(void) { return nr_free_zone_pages(gfp_zone(GFP_USER)); } +EXPORT_SYMBOL_GPL(nr_free_buffer_pages); /* * Amount of free RAM allocatable within all zones */ unsigned int nr_free_pagecache_pages(void) { - return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER)); + return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); } static inline void show_node(struct zone *zone) @@ -1550,12 +1810,9 @@ void show_free_areas(void) pageset = zone_pcp(zone, cpu); - printk("CPU %4d: Hot: hi:%5d, btch:%4d usd:%4d " - "Cold: hi:%5d, btch:%4d usd:%4d\n", - cpu, pageset->pcp[0].high, - pageset->pcp[0].batch, pageset->pcp[0].count, - pageset->pcp[1].high, pageset->pcp[1].batch, - pageset->pcp[1].count); + printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n", + cpu, pageset->pcp.high, + pageset->pcp.batch, pageset->pcp.count); } } @@ -1600,7 +1857,7 @@ void show_free_areas(void) K(zone_page_state(zone, NR_INACTIVE)), K(zone->present_pages), zone->pages_scanned, - (zone->all_unreclaimable ? "yes" : "no") + (zone_is_all_unreclaimable(zone) ? "yes" : "no") ); printk("lowmem_reserve[]:"); for (i = 0; i < MAX_NR_ZONES; i++) @@ -1628,16 +1885,24 @@ void show_free_areas(void) printk("= %lukB\n", K(total)); } + printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES)); + show_swap_cache_info(); } +static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) +{ + zoneref->zone = zone; + zoneref->zone_idx = zone_idx(zone); +} + /* * Builds allocation fallback zone lists. * * Add all populated zones of a node to the zonelist. */ -static int __meminit build_zonelists_node(pg_data_t *pgdat, - struct zonelist *zonelist, int nr_zones, enum zone_type zone_type) +static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, + int nr_zones, enum zone_type zone_type) { struct zone *zone; @@ -1648,7 +1913,8 @@ static int __meminit build_zonelists_node(pg_data_t *pgdat, zone_type--; zone = pgdat->node_zones + zone_type; if (populated_zone(zone)) { - zonelist->zones[nr_zones++] = zone; + zoneref_set_zone(zone, + &zonelist->_zonerefs[nr_zones++]); check_highest_zone(zone_type); } @@ -1656,9 +1922,102 @@ static int __meminit build_zonelists_node(pg_data_t *pgdat, return nr_zones; } + +/* + * zonelist_order: + * 0 = automatic detection of better ordering. + * 1 = order by ([node] distance, -zonetype) + * 2 = order by (-zonetype, [node] distance) + * + * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create + * the same zonelist. So only NUMA can configure this param. + */ +#define ZONELIST_ORDER_DEFAULT 0 +#define ZONELIST_ORDER_NODE 1 +#define ZONELIST_ORDER_ZONE 2 + +/* zonelist order in the kernel. + * set_zonelist_order() will set this to NODE or ZONE. + */ +static int current_zonelist_order = ZONELIST_ORDER_DEFAULT; +static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"}; + + #ifdef CONFIG_NUMA +/* The value user specified ....changed by config */ +static int user_zonelist_order = ZONELIST_ORDER_DEFAULT; +/* string for sysctl */ +#define NUMA_ZONELIST_ORDER_LEN 16 +char numa_zonelist_order[16] = "default"; + +/* + * interface for configure zonelist ordering. + * command line option "numa_zonelist_order" + * = "[dD]efault - default, automatic configuration. + * = "[nN]ode - order by node locality, then by zone within node + * = "[zZ]one - order by zone, then by locality within zone + */ + +static int __parse_numa_zonelist_order(char *s) +{ + if (*s == 'd' || *s == 'D') { + user_zonelist_order = ZONELIST_ORDER_DEFAULT; + } else if (*s == 'n' || *s == 'N') { + user_zonelist_order = ZONELIST_ORDER_NODE; + } else if (*s == 'z' || *s == 'Z') { + user_zonelist_order = ZONELIST_ORDER_ZONE; + } else { + printk(KERN_WARNING + "Ignoring invalid numa_zonelist_order value: " + "%s\n", s); + return -EINVAL; + } + return 0; +} + +static __init int setup_numa_zonelist_order(char *s) +{ + if (s) + return __parse_numa_zonelist_order(s); + return 0; +} +early_param("numa_zonelist_order", setup_numa_zonelist_order); + +/* + * sysctl handler for numa_zonelist_order + */ +int numa_zonelist_order_handler(ctl_table *table, int write, + struct file *file, void __user *buffer, size_t *length, + loff_t *ppos) +{ + char saved_string[NUMA_ZONELIST_ORDER_LEN]; + int ret; + + if (write) + strncpy(saved_string, (char*)table->data, + NUMA_ZONELIST_ORDER_LEN); + ret = proc_dostring(table, write, file, buffer, length, ppos); + if (ret) + return ret; + if (write) { + int oldval = user_zonelist_order; + if (__parse_numa_zonelist_order((char*)table->data)) { + /* + * bogus value. restore saved string + */ + strncpy((char*)table->data, saved_string, + NUMA_ZONELIST_ORDER_LEN); + user_zonelist_order = oldval; + } else if (oldval != user_zonelist_order) + build_all_zonelists(); + } + return 0; +} + + #define MAX_NODE_LOAD (num_online_nodes()) -static int __meminitdata node_load[MAX_NUMNODES]; +static int node_load[MAX_NUMNODES]; + /** * find_next_best_node - find the next node that should appear in a given node's fallback list * @node: node whose fallback list we're appending @@ -1673,11 +2032,12 @@ static int __meminitdata node_load[MAX_NUMNODES]; * on them otherwise. * It returns -1 if no node is found. */ -static int __meminit find_next_best_node(int node, nodemask_t *used_node_mask) +static int find_next_best_node(int node, nodemask_t *used_node_mask) { int n, val; int min_val = INT_MAX; int best_node = -1; + node_to_cpumask_ptr(tmp, 0); /* Use the local node if we haven't already */ if (!node_isset(node, *used_node_mask)) { @@ -1685,8 +2045,7 @@ static int __meminit find_next_best_node(int node, nodemask_t *used_node_mask) return node; } - for_each_online_node(n) { - cpumask_t tmp; + for_each_node_state(n, N_HIGH_MEMORY) { /* Don't want a node to appear more than once */ if (node_isset(n, *used_node_mask)) @@ -1699,8 +2058,8 @@ static int __meminit find_next_best_node(int node, nodemask_t *used_node_mask) val += (n < node); /* Give preference to headless and unused nodes */ - tmp = node_to_cpumask(n); - if (!cpus_empty(tmp)) + node_to_cpumask_ptr_next(tmp, n); + if (!cpus_empty(*tmp)) val += PENALTY_FOR_NODE_WITH_CPUS; /* Slight preference for less loaded node */ @@ -1719,27 +2078,162 @@ static int __meminit find_next_best_node(int node, nodemask_t *used_node_mask) return best_node; } -static void __meminit build_zonelists(pg_data_t *pgdat) + +/* + * Build zonelists ordered by node and zones within node. + * This results in maximum locality--normal zone overflows into local + * DMA zone, if any--but risks exhausting DMA zone. + */ +static void build_zonelists_in_node_order(pg_data_t *pgdat, int node) { - int j, node, local_node; - enum zone_type i; - int prev_node, load; + int j; struct zonelist *zonelist; - nodemask_t used_mask; - /* initialize zonelists */ - for (i = 0; i < MAX_NR_ZONES; i++) { - zonelist = pgdat->node_zonelists + i; - zonelist->zones[0] = NULL; - } + zonelist = &pgdat->node_zonelists[0]; + for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++) + ; + j = build_zonelists_node(NODE_DATA(node), zonelist, j, + MAX_NR_ZONES - 1); + zonelist->_zonerefs[j].zone = NULL; + zonelist->_zonerefs[j].zone_idx = 0; +} - /* NUMA-aware ordering of nodes */ - local_node = pgdat->node_id; - load = num_online_nodes(); - prev_node = local_node; - nodes_clear(used_mask); - while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { - int distance = node_distance(local_node, node); +/* + * Build gfp_thisnode zonelists + */ +static void build_thisnode_zonelists(pg_data_t *pgdat) +{ + int j; + struct zonelist *zonelist; + + zonelist = &pgdat->node_zonelists[1]; + j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1); + zonelist->_zonerefs[j].zone = NULL; + zonelist->_zonerefs[j].zone_idx = 0; +} + +/* + * Build zonelists ordered by zone and nodes within zones. + * This results in conserving DMA zone[s] until all Normal memory is + * exhausted, but results in overflowing to remote node while memory + * may still exist in local DMA zone. + */ +static int node_order[MAX_NUMNODES]; + +static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes) +{ + int pos, j, node; + int zone_type; /* needs to be signed */ + struct zone *z; + struct zonelist *zonelist; + + zonelist = &pgdat->node_zonelists[0]; + pos = 0; + for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) { + for (j = 0; j < nr_nodes; j++) { + node = node_order[j]; + z = &NODE_DATA(node)->node_zones[zone_type]; + if (populated_zone(z)) { + zoneref_set_zone(z, + &zonelist->_zonerefs[pos++]); + check_highest_zone(zone_type); + } + } + } + zonelist->_zonerefs[pos].zone = NULL; + zonelist->_zonerefs[pos].zone_idx = 0; +} + +static int default_zonelist_order(void) +{ + int nid, zone_type; + unsigned long low_kmem_size,total_size; + struct zone *z; + int average_size; + /* + * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem. + * If they are really small and used heavily, the system can fall + * into OOM very easily. + * This function detect ZONE_DMA/DMA32 size and confgigures zone order. + */ + /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */ + low_kmem_size = 0; + total_size = 0; + for_each_online_node(nid) { + for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { + z = &NODE_DATA(nid)->node_zones[zone_type]; + if (populated_zone(z)) { + if (zone_type < ZONE_NORMAL) + low_kmem_size += z->present_pages; + total_size += z->present_pages; + } + } + } + if (!low_kmem_size || /* there are no DMA area. */ + low_kmem_size > total_size/2) /* DMA/DMA32 is big. */ + return ZONELIST_ORDER_NODE; + /* + * look into each node's config. + * If there is a node whose DMA/DMA32 memory is very big area on + * local memory, NODE_ORDER may be suitable. + */ + average_size = total_size / + (nodes_weight(node_states[N_HIGH_MEMORY]) + 1); + for_each_online_node(nid) { + low_kmem_size = 0; + total_size = 0; + for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { + z = &NODE_DATA(nid)->node_zones[zone_type]; + if (populated_zone(z)) { + if (zone_type < ZONE_NORMAL) + low_kmem_size += z->present_pages; + total_size += z->present_pages; + } + } + if (low_kmem_size && + total_size > average_size && /* ignore small node */ + low_kmem_size > total_size * 70/100) + return ZONELIST_ORDER_NODE; + } + return ZONELIST_ORDER_ZONE; +} + +static void set_zonelist_order(void) +{ + if (user_zonelist_order == ZONELIST_ORDER_DEFAULT) + current_zonelist_order = default_zonelist_order(); + else + current_zonelist_order = user_zonelist_order; +} + +static void build_zonelists(pg_data_t *pgdat) +{ + int j, node, load; + enum zone_type i; + nodemask_t used_mask; + int local_node, prev_node; + struct zonelist *zonelist; + int order = current_zonelist_order; + + /* initialize zonelists */ + for (i = 0; i < MAX_ZONELISTS; i++) { + zonelist = pgdat->node_zonelists + i; + zonelist->_zonerefs[0].zone = NULL; + zonelist->_zonerefs[0].zone_idx = 0; + } + + /* NUMA-aware ordering of nodes */ + local_node = pgdat->node_id; + load = num_online_nodes(); + prev_node = local_node; + nodes_clear(used_mask); + + memset(node_load, 0, sizeof(node_load)); + memset(node_order, 0, sizeof(node_order)); + j = 0; + + while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { + int distance = node_distance(local_node, node); /* * If another node is sufficiently far away then it is better @@ -1753,113 +2247,141 @@ static void __meminit build_zonelists(pg_data_t *pgdat) * So adding penalty to the first node in same * distance group to make it round-robin. */ - if (distance != node_distance(local_node, prev_node)) - node_load[node] += load; + node_load[node] = load; + prev_node = node; load--; - for (i = 0; i < MAX_NR_ZONES; i++) { - zonelist = pgdat->node_zonelists + i; - for (j = 0; zonelist->zones[j] != NULL; j++); + if (order == ZONELIST_ORDER_NODE) + build_zonelists_in_node_order(pgdat, node); + else + node_order[j++] = node; /* remember order */ + } - j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); - zonelist->zones[j] = NULL; - } + if (order == ZONELIST_ORDER_ZONE) { + /* calculate node order -- i.e., DMA last! */ + build_zonelists_in_zone_order(pgdat, j); } + + build_thisnode_zonelists(pgdat); } /* Construct the zonelist performance cache - see further mmzone.h */ -static void __meminit build_zonelist_cache(pg_data_t *pgdat) +static void build_zonelist_cache(pg_data_t *pgdat) { - int i; - - for (i = 0; i < MAX_NR_ZONES; i++) { - struct zonelist *zonelist; - struct zonelist_cache *zlc; - struct zone **z; - - zonelist = pgdat->node_zonelists + i; - zonelist->zlcache_ptr = zlc = &zonelist->zlcache; - bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); - for (z = zonelist->zones; *z; z++) - zlc->z_to_n[z - zonelist->zones] = zone_to_nid(*z); - } + struct zonelist *zonelist; + struct zonelist_cache *zlc; + struct zoneref *z; + + zonelist = &pgdat->node_zonelists[0]; + zonelist->zlcache_ptr = zlc = &zonelist->zlcache; + bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); + for (z = zonelist->_zonerefs; z->zone; z++) + zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z); } + #else /* CONFIG_NUMA */ -static void __meminit build_zonelists(pg_data_t *pgdat) +static void set_zonelist_order(void) +{ + current_zonelist_order = ZONELIST_ORDER_ZONE; +} + +static void build_zonelists(pg_data_t *pgdat) { int node, local_node; - enum zone_type i,j; + enum zone_type j; + struct zonelist *zonelist; local_node = pgdat->node_id; - for (i = 0; i < MAX_NR_ZONES; i++) { - struct zonelist *zonelist; - zonelist = pgdat->node_zonelists + i; + zonelist = &pgdat->node_zonelists[0]; + j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1); - j = build_zonelists_node(pgdat, zonelist, 0, i); - /* - * Now we build the zonelist so that it contains the zones - * of all the other nodes. - * We don't want to pressure a particular node, so when - * building the zones for node N, we make sure that the - * zones coming right after the local ones are those from - * node N+1 (modulo N) - */ - for (node = local_node + 1; node < MAX_NUMNODES; node++) { - if (!node_online(node)) - continue; - j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); - } - for (node = 0; node < local_node; node++) { - if (!node_online(node)) - continue; - j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); - } - - zonelist->zones[j] = NULL; + /* + * Now we build the zonelist so that it contains the zones + * of all the other nodes. + * We don't want to pressure a particular node, so when + * building the zones for node N, we make sure that the + * zones coming right after the local ones are those from + * node N+1 (modulo N) + */ + for (node = local_node + 1; node < MAX_NUMNODES; node++) { + if (!node_online(node)) + continue; + j = build_zonelists_node(NODE_DATA(node), zonelist, j, + MAX_NR_ZONES - 1); + } + for (node = 0; node < local_node; node++) { + if (!node_online(node)) + continue; + j = build_zonelists_node(NODE_DATA(node), zonelist, j, + MAX_NR_ZONES - 1); } + + zonelist->_zonerefs[j].zone = NULL; + zonelist->_zonerefs[j].zone_idx = 0; } /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */ -static void __meminit build_zonelist_cache(pg_data_t *pgdat) +static void build_zonelist_cache(pg_data_t *pgdat) { - int i; - - for (i = 0; i < MAX_NR_ZONES; i++) - pgdat->node_zonelists[i].zlcache_ptr = NULL; + pgdat->node_zonelists[0].zlcache_ptr = NULL; + pgdat->node_zonelists[1].zlcache_ptr = NULL; } #endif /* CONFIG_NUMA */ /* return values int ....just for stop_machine_run() */ -static int __meminit __build_all_zonelists(void *dummy) +static int __build_all_zonelists(void *dummy) { int nid; for_each_online_node(nid) { - build_zonelists(NODE_DATA(nid)); - build_zonelist_cache(NODE_DATA(nid)); + pg_data_t *pgdat = NODE_DATA(nid); + + build_zonelists(pgdat); + build_zonelist_cache(pgdat); } return 0; } -void __meminit build_all_zonelists(void) +void build_all_zonelists(void) { + set_zonelist_order(); + if (system_state == SYSTEM_BOOTING) { __build_all_zonelists(NULL); cpuset_init_current_mems_allowed(); } else { - /* we have to stop all cpus to guaranntee there is no user + /* we have to stop all cpus to guarantee there is no user of zonelist */ stop_machine_run(__build_all_zonelists, NULL, NR_CPUS); /* cpuset refresh routine should be here */ } vm_total_pages = nr_free_pagecache_pages(); - printk("Built %i zonelists. Total pages: %ld\n", - num_online_nodes(), vm_total_pages); + /* + * Disable grouping by mobility if the number of pages in the + * system is too low to allow the mechanism to work. It would be + * more accurate, but expensive to check per-zone. This check is + * made on memory-hotadd so a system can start with mobility + * disabled and enable it later + */ + if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) + page_group_by_mobility_disabled = 1; + else + page_group_by_mobility_disabled = 0; + + printk("Built %i zonelists in %s order, mobility grouping %s. " + "Total pages: %ld\n", + num_online_nodes(), + zonelist_order_name[current_zonelist_order], + page_group_by_mobility_disabled ? "off" : "on", + vm_total_pages); +#ifdef CONFIG_NUMA + printk("Policy zone: %s\n", zone_names[policy_zone]); +#endif } /* @@ -1931,6 +2453,61 @@ static inline unsigned long wait_table_bits(unsigned long size) #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) /* + * Mark a number of pageblocks as MIGRATE_RESERVE. The number + * of blocks reserved is based on zone->pages_min. The memory within the + * reserve will tend to store contiguous free pages. Setting min_free_kbytes + * higher will lead to a bigger reserve which will get freed as contiguous + * blocks as reclaim kicks in + */ +static void setup_zone_migrate_reserve(struct zone *zone) +{ + unsigned long start_pfn, pfn, end_pfn; + struct page *page; + unsigned long reserve, block_migratetype; + + /* Get the start pfn, end pfn and the number of blocks to reserve */ + start_pfn = zone->zone_start_pfn; + end_pfn = start_pfn + zone->spanned_pages; + reserve = roundup(zone->pages_min, pageblock_nr_pages) >> + pageblock_order; + + for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { + if (!pfn_valid(pfn)) + continue; + page = pfn_to_page(pfn); + + /* Blocks with reserved pages will never free, skip them. */ + if (PageReserved(page)) + continue; + + block_migratetype = get_pageblock_migratetype(page); + + /* If this block is reserved, account for it */ + if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) { + reserve--; + continue; + } + + /* Suitable for reserving if this block is movable */ + if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) { + set_pageblock_migratetype(page, MIGRATE_RESERVE); + move_freepages_block(zone, page, MIGRATE_RESERVE); + reserve--; + continue; + } + + /* + * If the reserve is met and this is a previous reserved block, + * take it back + */ + if (block_migratetype == MIGRATE_RESERVE) { + set_pageblock_migratetype(page, MIGRATE_MOVABLE); + move_freepages_block(zone, page, MIGRATE_MOVABLE); + } + } +} + +/* * Initially all pages are reserved - free ones are freed * up by free_all_bootmem() once the early boot process is * done. Non-atomic initialization, single-pass. @@ -1941,7 +2518,9 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, struct page *page; unsigned long end_pfn = start_pfn + size; unsigned long pfn; + struct zone *z; + z = &NODE_DATA(nid)->node_zones[zone]; for (pfn = start_pfn; pfn < end_pfn; pfn++) { /* * There can be holes in boot-time mem_map[]s @@ -1959,6 +2538,25 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, init_page_count(page); reset_page_mapcount(page); SetPageReserved(page); + /* + * Mark the block movable so that blocks are reserved for + * movable at startup. This will force kernel allocations + * to reserve their blocks rather than leaking throughout + * the address space during boot when many long-lived + * kernel allocations are made. Later some blocks near + * the start are marked MIGRATE_RESERVE by + * setup_zone_migrate_reserve() + * + * bitmap is created for zone's valid pfn range. but memmap + * can be created for invalid pages (for alignment) + * check here not to call set_pageblock_migratetype() against + * pfn out of zone. + */ + if ((z->zone_start_pfn <= pfn) + && (pfn < z->zone_start_pfn + z->spanned_pages) + && !(pfn & (pageblock_nr_pages - 1))) + set_pageblock_migratetype(page, MIGRATE_MOVABLE); + INIT_LIST_HEAD(&page->lru); #ifdef WANT_PAGE_VIRTUAL /* The shift won't overflow because ZONE_NORMAL is below 4G. */ @@ -1968,12 +2566,11 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, } } -void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone, - unsigned long size) +static void __meminit zone_init_free_lists(struct zone *zone) { - int order; - for (order = 0; order < MAX_ORDER ; order++) { - INIT_LIST_HEAD(&zone->free_area[order].free_list); + int order, t; + for_each_migratetype_order(order, t) { + INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); zone->free_area[order].nr_free = 0; } } @@ -1983,7 +2580,7 @@ void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone, memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) #endif -static int __cpuinit zone_batchsize(struct zone *zone) +static int zone_batchsize(struct zone *zone) { int batch; @@ -2021,17 +2618,11 @@ inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) memset(p, 0, sizeof(*p)); - pcp = &p->pcp[0]; /* hot */ + pcp = &p->pcp; pcp->count = 0; pcp->high = 6 * batch; pcp->batch = max(1UL, 1 * batch); INIT_LIST_HEAD(&pcp->list); - - pcp = &p->pcp[1]; /* cold*/ - pcp->count = 0; - pcp->high = 2 * batch; - pcp->batch = max(1UL, batch/2); - INIT_LIST_HEAD(&pcp->list); } /* @@ -2044,7 +2635,7 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p, { struct per_cpu_pages *pcp; - pcp = &p->pcp[0]; /* hot list */ + pcp = &p->pcp; pcp->high = high; pcp->batch = max(1UL, high/4); if ((high/4) > (PAGE_SHIFT * 8)) @@ -2079,6 +2670,9 @@ static struct per_cpu_pageset boot_pageset[NR_CPUS]; static int __cpuinit process_zones(int cpu) { struct zone *zone, *dzone; + int node = cpu_to_node(cpu); + + node_set_state(node, N_CPU); /* this node has a cpu */ for_each_zone(zone) { @@ -2086,7 +2680,7 @@ static int __cpuinit process_zones(int cpu) continue; zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset), - GFP_KERNEL, cpu_to_node(cpu)); + GFP_KERNEL, node); if (!zone_pcp(zone, cpu)) goto bad; @@ -2100,6 +2694,8 @@ static int __cpuinit process_zones(int cpu) return 0; bad: for_each_zone(dzone) { + if (!populated_zone(dzone)) + continue; if (dzone == zone) break; kfree(zone_pcp(dzone, cpu)); @@ -2165,7 +2761,7 @@ void __init setup_per_cpu_pageset(void) #endif -static __meminit noinline +static noinline __init_refok int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) { int i; @@ -2183,7 +2779,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) alloc_size = zone->wait_table_hash_nr_entries * sizeof(wait_queue_head_t); - if (system_state == SYSTEM_BOOTING) { + if (!slab_is_available()) { zone->wait_table = (wait_queue_head_t *) alloc_bootmem_node(pgdat, alloc_size); } else { @@ -2197,7 +2793,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) * To use this new node's memory, further consideration will be * necessary. */ - zone->wait_table = (wait_queue_head_t *)vmalloc(alloc_size); + zone->wait_table = vmalloc(alloc_size); } if (!zone->wait_table) return -ENOMEM; @@ -2241,9 +2837,7 @@ __meminit int init_currently_empty_zone(struct zone *zone, zone->zone_start_pfn = zone_start_pfn; - memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn); - - zone_init_free_lists(pgdat, zone, zone->spanned_pages); + zone_init_free_lists(zone); return 0; } @@ -2266,7 +2860,7 @@ static int __meminit first_active_region_index_in_nid(int nid) /* * Basic iterator support. Return the next active range of PFNs for a node - * Note: nid == MAX_NUMNODES returns next region regardles of node + * Note: nid == MAX_NUMNODES returns next region regardless of node */ static int __meminit next_active_region_index_in_nid(int index, int nid) { @@ -2284,7 +2878,7 @@ static int __meminit next_active_region_index_in_nid(int index, int nid) * was used and there are no special requirements, this is a convenient * alternative */ -int __init early_pfn_to_nid(unsigned long pfn) +int __meminit early_pfn_to_nid(unsigned long pfn) { int i; @@ -2385,7 +2979,7 @@ void __init push_node_boundaries(unsigned int nid, } /* If necessary, push the node boundary out for reserve hotadd */ -static void __init account_node_boundary(unsigned int nid, +static void __meminit account_node_boundary(unsigned int nid, unsigned long *start_pfn, unsigned long *end_pfn) { printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n", @@ -2405,7 +2999,7 @@ static void __init account_node_boundary(unsigned int nid, void __init push_node_boundaries(unsigned int nid, unsigned long start_pfn, unsigned long end_pfn) {} -static void __init account_node_boundary(unsigned int nid, +static void __meminit account_node_boundary(unsigned int nid, unsigned long *start_pfn, unsigned long *end_pfn) {} #endif @@ -2433,20 +3027,75 @@ void __meminit get_pfn_range_for_nid(unsigned int nid, *end_pfn = max(*end_pfn, early_node_map[i].end_pfn); } - if (*start_pfn == -1UL) { - printk(KERN_WARNING "Node %u active with no memory\n", nid); + if (*start_pfn == -1UL) *start_pfn = 0; - } /* Push the node boundaries out if requested */ account_node_boundary(nid, start_pfn, end_pfn); } /* + * This finds a zone that can be used for ZONE_MOVABLE pages. The + * assumption is made that zones within a node are ordered in monotonic + * increasing memory addresses so that the "highest" populated zone is used + */ +void __init find_usable_zone_for_movable(void) +{ + int zone_index; + for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { + if (zone_index == ZONE_MOVABLE) + continue; + + if (arch_zone_highest_possible_pfn[zone_index] > + arch_zone_lowest_possible_pfn[zone_index]) + break; + } + + VM_BUG_ON(zone_index == -1); + movable_zone = zone_index; +} + +/* + * The zone ranges provided by the architecture do not include ZONE_MOVABLE + * because it is sized independant of architecture. Unlike the other zones, + * the starting point for ZONE_MOVABLE is not fixed. It may be different + * in each node depending on the size of each node and how evenly kernelcore + * is distributed. This helper function adjusts the zone ranges + * provided by the architecture for a given node by using the end of the + * highest usable zone for ZONE_MOVABLE. This preserves the assumption that + * zones within a node are in order of monotonic increases memory addresses + */ +void __meminit adjust_zone_range_for_zone_movable(int nid, + unsigned long zone_type, + unsigned long node_start_pfn, + unsigned long node_end_pfn, + unsigned long *zone_start_pfn, + unsigned long *zone_end_pfn) +{ + /* Only adjust if ZONE_MOVABLE is on this node */ + if (zone_movable_pfn[nid]) { + /* Size ZONE_MOVABLE */ + if (zone_type == ZONE_MOVABLE) { + *zone_start_pfn = zone_movable_pfn[nid]; + *zone_end_pfn = min(node_end_pfn, + arch_zone_highest_possible_pfn[movable_zone]); + + /* Adjust for ZONE_MOVABLE starting within this range */ + } else if (*zone_start_pfn < zone_movable_pfn[nid] && + *zone_end_pfn > zone_movable_pfn[nid]) { + *zone_end_pfn = zone_movable_pfn[nid]; + + /* Check if this whole range is within ZONE_MOVABLE */ + } else if (*zone_start_pfn >= zone_movable_pfn[nid]) + *zone_start_pfn = *zone_end_pfn; + } +} + +/* * Return the number of pages a zone spans in a node, including holes * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() */ -unsigned long __meminit zone_spanned_pages_in_node(int nid, +static unsigned long __meminit zone_spanned_pages_in_node(int nid, unsigned long zone_type, unsigned long *ignored) { @@ -2457,6 +3106,9 @@ unsigned long __meminit zone_spanned_pages_in_node(int nid, get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; + adjust_zone_range_for_zone_movable(nid, zone_type, + node_start_pfn, node_end_pfn, + &zone_start_pfn, &zone_end_pfn); /* Check that this node has pages within the zone's required range */ if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn) @@ -2487,11 +3139,11 @@ unsigned long __meminit __absent_pages_in_range(int nid, if (i == -1) return 0; + prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn); + /* Account for ranges before physical memory on this node */ if (early_node_map[i].start_pfn > range_start_pfn) - hole_pages = early_node_map[i].start_pfn - range_start_pfn; - - prev_end_pfn = early_node_map[i].start_pfn; + hole_pages = prev_end_pfn - range_start_pfn; /* Find all holes for the zone within the node */ for (; i != -1; i = next_active_region_index_in_nid(i, nid)) { @@ -2534,7 +3186,7 @@ unsigned long __init absent_pages_in_range(unsigned long start_pfn, } /* Return the number of page frames in holes in a zone on a node */ -unsigned long __meminit zone_absent_pages_in_node(int nid, +static unsigned long __meminit zone_absent_pages_in_node(int nid, unsigned long zone_type, unsigned long *ignored) { @@ -2547,18 +3199,21 @@ unsigned long __meminit zone_absent_pages_in_node(int nid, zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type], node_end_pfn); + adjust_zone_range_for_zone_movable(nid, zone_type, + node_start_pfn, node_end_pfn, + &zone_start_pfn, &zone_end_pfn); return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); } #else -static inline unsigned long zone_spanned_pages_in_node(int nid, +static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, unsigned long zone_type, unsigned long *zones_size) { return zones_size[zone_type]; } -static inline unsigned long zone_absent_pages_in_node(int nid, +static inline unsigned long __meminit zone_absent_pages_in_node(int nid, unsigned long zone_type, unsigned long *zholes_size) { @@ -2591,13 +3246,88 @@ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, realtotalpages); } +#ifndef CONFIG_SPARSEMEM +/* + * Calculate the size of the zone->blockflags rounded to an unsigned long + * Start by making sure zonesize is a multiple of pageblock_order by rounding + * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally + * round what is now in bits to nearest long in bits, then return it in + * bytes. + */ +static unsigned long __init usemap_size(unsigned long zonesize) +{ + unsigned long usemapsize; + + usemapsize = roundup(zonesize, pageblock_nr_pages); + usemapsize = usemapsize >> pageblock_order; + usemapsize *= NR_PAGEBLOCK_BITS; + usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); + + return usemapsize / 8; +} + +static void __init setup_usemap(struct pglist_data *pgdat, + struct zone *zone, unsigned long zonesize) +{ + unsigned long usemapsize = usemap_size(zonesize); + zone->pageblock_flags = NULL; + if (usemapsize) { + zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); + memset(zone->pageblock_flags, 0, usemapsize); + } +} +#else +static void inline setup_usemap(struct pglist_data *pgdat, + struct zone *zone, unsigned long zonesize) {} +#endif /* CONFIG_SPARSEMEM */ + +#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE + +/* Return a sensible default order for the pageblock size. */ +static inline int pageblock_default_order(void) +{ + if (HPAGE_SHIFT > PAGE_SHIFT) + return HUGETLB_PAGE_ORDER; + + return MAX_ORDER-1; +} + +/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ +static inline void __init set_pageblock_order(unsigned int order) +{ + /* Check that pageblock_nr_pages has not already been setup */ + if (pageblock_order) + return; + + /* + * Assume the largest contiguous order of interest is a huge page. + * This value may be variable depending on boot parameters on IA64 + */ + pageblock_order = order; +} +#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ + +/* + * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() + * and pageblock_default_order() are unused as pageblock_order is set + * at compile-time. See include/linux/pageblock-flags.h for the values of + * pageblock_order based on the kernel config + */ +static inline int pageblock_default_order(unsigned int order) +{ + return MAX_ORDER-1; +} +#define set_pageblock_order(x) do {} while (0) + +#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ + /* * Set up the zone data structures: * - mark all pages reserved * - mark all memory queues empty * - clear the memory bitmaps */ -static void __meminit free_area_init_core(struct pglist_data *pgdat, +static void __paginginit free_area_init_core(struct pglist_data *pgdat, unsigned long *zones_size, unsigned long *zholes_size) { enum zone_type j; @@ -2623,7 +3353,8 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat, * is used by this zone for memmap. This affects the watermark * and per-cpu initialisations */ - memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT; + memmap_pages = + PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT; if (realsize >= memmap_pages) { realsize -= memmap_pages; printk(KERN_DEBUG @@ -2667,18 +3398,21 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat, zone->nr_scan_active = 0; zone->nr_scan_inactive = 0; zap_zone_vm_stats(zone); - atomic_set(&zone->reclaim_in_progress, 0); + zone->flags = 0; if (!size) continue; + set_pageblock_order(pageblock_default_order()); + setup_usemap(pgdat, zone, size); ret = init_currently_empty_zone(zone, zone_start_pfn, size, MEMMAP_EARLY); BUG_ON(ret); + memmap_init(size, nid, j, zone_start_pfn); zone_start_pfn += size; } } -static void __meminit alloc_node_mem_map(struct pglist_data *pgdat) +static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) { /* Skip empty nodes */ if (!pgdat->node_spanned_pages) @@ -2704,7 +3438,7 @@ static void __meminit alloc_node_mem_map(struct pglist_data *pgdat) map = alloc_bootmem_node(pgdat, size); pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); } -#ifdef CONFIG_FLATMEM +#ifndef CONFIG_NEED_MULTIPLE_NODES /* * With no DISCONTIG, the global mem_map is just set as node 0's */ @@ -2712,14 +3446,14 @@ static void __meminit alloc_node_mem_map(struct pglist_data *pgdat) mem_map = NODE_DATA(0)->node_mem_map; #ifdef CONFIG_ARCH_POPULATES_NODE_MAP if (page_to_pfn(mem_map) != pgdat->node_start_pfn) - mem_map -= pgdat->node_start_pfn; + mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET); #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ } #endif #endif /* CONFIG_FLAT_NODE_MEM_MAP */ } -void __meminit free_area_init_node(int nid, struct pglist_data *pgdat, +void __paginginit free_area_init_node(int nid, struct pglist_data *pgdat, unsigned long *zones_size, unsigned long node_start_pfn, unsigned long *zholes_size) { @@ -2733,6 +3467,26 @@ void __meminit free_area_init_node(int nid, struct pglist_data *pgdat, } #ifdef CONFIG_ARCH_POPULATES_NODE_MAP + +#if MAX_NUMNODES > 1 +/* + * Figure out the number of possible node ids. + */ +static void __init setup_nr_node_ids(void) +{ + unsigned int node; + unsigned int highest = 0; + + for_each_node_mask(node, node_possible_map) + highest = node; + nr_node_ids = highest + 1; +} +#else +static inline void setup_nr_node_ids(void) +{ +} +#endif + /** * add_active_range - Register a range of PFNs backed by physical memory * @nid: The node ID the range resides on @@ -2904,6 +3658,179 @@ unsigned long __init find_max_pfn_with_active_regions(void) return max_pfn; } +/* + * early_calculate_totalpages() + * Sum pages in active regions for movable zone. + * Populate N_HIGH_MEMORY for calculating usable_nodes. + */ +static unsigned long __init early_calculate_totalpages(void) +{ + int i; + unsigned long totalpages = 0; + + for (i = 0; i < nr_nodemap_entries; i++) { + unsigned long pages = early_node_map[i].end_pfn - + early_node_map[i].start_pfn; + totalpages += pages; + if (pages) + node_set_state(early_node_map[i].nid, N_HIGH_MEMORY); + } + return totalpages; +} + +/* + * Find the PFN the Movable zone begins in each node. Kernel memory + * is spread evenly between nodes as long as the nodes have enough + * memory. When they don't, some nodes will have more kernelcore than + * others + */ +void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn) +{ + int i, nid; + unsigned long usable_startpfn; + unsigned long kernelcore_node, kernelcore_remaining; + unsigned long totalpages = early_calculate_totalpages(); + int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]); + + /* + * If movablecore was specified, calculate what size of + * kernelcore that corresponds so that memory usable for + * any allocation type is evenly spread. If both kernelcore + * and movablecore are specified, then the value of kernelcore + * will be used for required_kernelcore if it's greater than + * what movablecore would have allowed. + */ + if (required_movablecore) { + unsigned long corepages; + + /* + * Round-up so that ZONE_MOVABLE is at least as large as what + * was requested by the user + */ + required_movablecore = + roundup(required_movablecore, MAX_ORDER_NR_PAGES); + corepages = totalpages - required_movablecore; + + required_kernelcore = max(required_kernelcore, corepages); + } + + /* If kernelcore was not specified, there is no ZONE_MOVABLE */ + if (!required_kernelcore) + return; + + /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ + find_usable_zone_for_movable(); + usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; + +restart: + /* Spread kernelcore memory as evenly as possible throughout nodes */ + kernelcore_node = required_kernelcore / usable_nodes; + for_each_node_state(nid, N_HIGH_MEMORY) { + /* + * Recalculate kernelcore_node if the division per node + * now exceeds what is necessary to satisfy the requested + * amount of memory for the kernel + */ + if (required_kernelcore < kernelcore_node) + kernelcore_node = required_kernelcore / usable_nodes; + + /* + * As the map is walked, we track how much memory is usable + * by the kernel using kernelcore_remaining. When it is + * 0, the rest of the node is usable by ZONE_MOVABLE + */ + kernelcore_remaining = kernelcore_node; + + /* Go through each range of PFNs within this node */ + for_each_active_range_index_in_nid(i, nid) { + unsigned long start_pfn, end_pfn; + unsigned long size_pages; + + start_pfn = max(early_node_map[i].start_pfn, + zone_movable_pfn[nid]); + end_pfn = early_node_map[i].end_pfn; + if (start_pfn >= end_pfn) + continue; + + /* Account for what is only usable for kernelcore */ + if (start_pfn < usable_startpfn) { + unsigned long kernel_pages; + kernel_pages = min(end_pfn, usable_startpfn) + - start_pfn; + + kernelcore_remaining -= min(kernel_pages, + kernelcore_remaining); + required_kernelcore -= min(kernel_pages, + required_kernelcore); + + /* Continue if range is now fully accounted */ + if (end_pfn <= usable_startpfn) { + + /* + * Push zone_movable_pfn to the end so + * that if we have to rebalance + * kernelcore across nodes, we will + * not double account here + */ + zone_movable_pfn[nid] = end_pfn; + continue; + } + start_pfn = usable_startpfn; + } + + /* + * The usable PFN range for ZONE_MOVABLE is from + * start_pfn->end_pfn. Calculate size_pages as the + * number of pages used as kernelcore + */ + size_pages = end_pfn - start_pfn; + if (size_pages > kernelcore_remaining) + size_pages = kernelcore_remaining; + zone_movable_pfn[nid] = start_pfn + size_pages; + + /* + * Some kernelcore has been met, update counts and + * break if the kernelcore for this node has been + * satisified + */ + required_kernelcore -= min(required_kernelcore, + size_pages); + kernelcore_remaining -= size_pages; + if (!kernelcore_remaining) + break; + } + } + + /* + * If there is still required_kernelcore, we do another pass with one + * less node in the count. This will push zone_movable_pfn[nid] further + * along on the nodes that still have memory until kernelcore is + * satisified + */ + usable_nodes--; + if (usable_nodes && required_kernelcore > usable_nodes) + goto restart; + + /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ + for (nid = 0; nid < MAX_NUMNODES; nid++) + zone_movable_pfn[nid] = + roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); +} + +/* Any regular memory on that node ? */ +static void check_for_regular_memory(pg_data_t *pgdat) +{ +#ifdef CONFIG_HIGHMEM + enum zone_type zone_type; + + for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) { + struct zone *zone = &pgdat->node_zones[zone_type]; + if (zone->present_pages) + node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY); + } +#endif +} + /** * free_area_init_nodes - Initialise all pg_data_t and zone data * @max_zone_pfn: an array of max PFNs for each zone @@ -2933,19 +3860,37 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions(); arch_zone_highest_possible_pfn[0] = max_zone_pfn[0]; for (i = 1; i < MAX_NR_ZONES; i++) { + if (i == ZONE_MOVABLE) + continue; arch_zone_lowest_possible_pfn[i] = arch_zone_highest_possible_pfn[i-1]; arch_zone_highest_possible_pfn[i] = max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); } + arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0; + arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0; + + /* Find the PFNs that ZONE_MOVABLE begins at in each node */ + memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); + find_zone_movable_pfns_for_nodes(zone_movable_pfn); /* Print out the zone ranges */ printk("Zone PFN ranges:\n"); - for (i = 0; i < MAX_NR_ZONES; i++) + for (i = 0; i < MAX_NR_ZONES; i++) { + if (i == ZONE_MOVABLE) + continue; printk(" %-8s %8lu -> %8lu\n", zone_names[i], arch_zone_lowest_possible_pfn[i], arch_zone_highest_possible_pfn[i]); + } + + /* Print out the PFNs ZONE_MOVABLE begins at in each node */ + printk("Movable zone start PFN for each node\n"); + for (i = 0; i < MAX_NUMNODES; i++) { + if (zone_movable_pfn[i]) + printk(" Node %d: %lu\n", i, zone_movable_pfn[i]); + } /* Print out the early_node_map[] */ printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries); @@ -2960,8 +3905,50 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) pg_data_t *pgdat = NODE_DATA(nid); free_area_init_node(nid, pgdat, NULL, find_min_pfn_for_node(nid), NULL); + + /* Any memory on that node */ + if (pgdat->node_present_pages) + node_set_state(nid, N_HIGH_MEMORY); + check_for_regular_memory(pgdat); } } + +static int __init cmdline_parse_core(char *p, unsigned long *core) +{ + unsigned long long coremem; + if (!p) + return -EINVAL; + + coremem = memparse(p, &p); + *core = coremem >> PAGE_SHIFT; + + /* Paranoid check that UL is enough for the coremem value */ + WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); + + return 0; +} + +/* + * kernelcore=size sets the amount of memory for use for allocations that + * cannot be reclaimed or migrated. + */ +static int __init cmdline_parse_kernelcore(char *p) +{ + return cmdline_parse_core(p, &required_kernelcore); +} + +/* + * movablecore=size sets the amount of memory for use for allocations that + * can be reclaimed or migrated. + */ +static int __init cmdline_parse_movablecore(char *p) +{ + return cmdline_parse_core(p, &required_movablecore); +} + +early_param("kernelcore", cmdline_parse_kernelcore); +early_param("movablecore", cmdline_parse_movablecore); + #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ /** @@ -2999,10 +3986,23 @@ static int page_alloc_cpu_notify(struct notifier_block *self, int cpu = (unsigned long)hcpu; if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { - local_irq_disable(); - __drain_pages(cpu); + drain_pages(cpu); + + /* + * Spill the event counters of the dead processor + * into the current processors event counters. + * This artificially elevates the count of the current + * processor. + */ vm_events_fold_cpu(cpu); - local_irq_enable(); + + /* + * Zero the differential counters of the dead processor + * so that the vm statistics are consistent. + * + * This is only okay since the processor is dead and cannot + * race with what we are doing. + */ refresh_cpu_vm_stats(cpu); } return NOTIFY_OK; @@ -3137,6 +4137,7 @@ void setup_per_zone_pages_min(void) zone->pages_low = zone->pages_min + (tmp >> 2); zone->pages_high = zone->pages_min + (tmp >> 1); + setup_zone_migrate_reserve(zone); spin_unlock_irqrestore(&zone->lru_lock, flags); } @@ -3346,17 +4347,30 @@ void *__init alloc_large_system_hash(const char *tablename, else if (hashdist) table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); else { - unsigned long order; - for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++) - ; + unsigned long order = get_order(size); table = (void*) __get_free_pages(GFP_ATOMIC, order); + /* + * If bucketsize is not a power-of-two, we may free + * some pages at the end of hash table. + */ + if (table) { + unsigned long alloc_end = (unsigned long)table + + (PAGE_SIZE << order); + unsigned long used = (unsigned long)table + + PAGE_ALIGN(size); + split_page(virt_to_page(table), order); + while (used < alloc_end) { + free_page(used); + used += PAGE_SIZE; + } + } } } while (!table && size > PAGE_SIZE && --log2qty); if (!table) panic("Failed to allocate %s hash table\n", tablename); - printk("%s hash table entries: %d (order: %d, %lu bytes)\n", + printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n", tablename, (1U << log2qty), ilog2(size) - PAGE_SHIFT, @@ -3383,4 +4397,171 @@ EXPORT_SYMBOL(pfn_to_page); EXPORT_SYMBOL(page_to_pfn); #endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */ +/* Return a pointer to the bitmap storing bits affecting a block of pages */ +static inline unsigned long *get_pageblock_bitmap(struct zone *zone, + unsigned long pfn) +{ +#ifdef CONFIG_SPARSEMEM + return __pfn_to_section(pfn)->pageblock_flags; +#else + return zone->pageblock_flags; +#endif /* CONFIG_SPARSEMEM */ +} + +static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) +{ +#ifdef CONFIG_SPARSEMEM + pfn &= (PAGES_PER_SECTION-1); + return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; +#else + pfn = pfn - zone->zone_start_pfn; + return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; +#endif /* CONFIG_SPARSEMEM */ +} + +/** + * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages + * @page: The page within the block of interest + * @start_bitidx: The first bit of interest to retrieve + * @end_bitidx: The last bit of interest + * returns pageblock_bits flags + */ +unsigned long get_pageblock_flags_group(struct page *page, + int start_bitidx, int end_bitidx) +{ + struct zone *zone; + unsigned long *bitmap; + unsigned long pfn, bitidx; + unsigned long flags = 0; + unsigned long value = 1; + + zone = page_zone(page); + pfn = page_to_pfn(page); + bitmap = get_pageblock_bitmap(zone, pfn); + bitidx = pfn_to_bitidx(zone, pfn); + + for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) + if (test_bit(bitidx + start_bitidx, bitmap)) + flags |= value; + + return flags; +} +/** + * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages + * @page: The page within the block of interest + * @start_bitidx: The first bit of interest + * @end_bitidx: The last bit of interest + * @flags: The flags to set + */ +void set_pageblock_flags_group(struct page *page, unsigned long flags, + int start_bitidx, int end_bitidx) +{ + struct zone *zone; + unsigned long *bitmap; + unsigned long pfn, bitidx; + unsigned long value = 1; + + zone = page_zone(page); + pfn = page_to_pfn(page); + bitmap = get_pageblock_bitmap(zone, pfn); + bitidx = pfn_to_bitidx(zone, pfn); + VM_BUG_ON(pfn < zone->zone_start_pfn); + VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages); + + for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) + if (flags & value) + __set_bit(bitidx + start_bitidx, bitmap); + else + __clear_bit(bitidx + start_bitidx, bitmap); +} + +/* + * This is designed as sub function...plz see page_isolation.c also. + * set/clear page block's type to be ISOLATE. + * page allocater never alloc memory from ISOLATE block. + */ + +int set_migratetype_isolate(struct page *page) +{ + struct zone *zone; + unsigned long flags; + int ret = -EBUSY; + + zone = page_zone(page); + spin_lock_irqsave(&zone->lock, flags); + /* + * In future, more migrate types will be able to be isolation target. + */ + if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE) + goto out; + set_pageblock_migratetype(page, MIGRATE_ISOLATE); + move_freepages_block(zone, page, MIGRATE_ISOLATE); + ret = 0; +out: + spin_unlock_irqrestore(&zone->lock, flags); + if (!ret) + drain_all_pages(); + return ret; +} + +void unset_migratetype_isolate(struct page *page) +{ + struct zone *zone; + unsigned long flags; + zone = page_zone(page); + spin_lock_irqsave(&zone->lock, flags); + if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) + goto out; + set_pageblock_migratetype(page, MIGRATE_MOVABLE); + move_freepages_block(zone, page, MIGRATE_MOVABLE); +out: + spin_unlock_irqrestore(&zone->lock, flags); +} + +#ifdef CONFIG_MEMORY_HOTREMOVE +/* + * All pages in the range must be isolated before calling this. + */ +void +__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) +{ + struct page *page; + struct zone *zone; + int order, i; + unsigned long pfn; + unsigned long flags; + /* find the first valid pfn */ + for (pfn = start_pfn; pfn < end_pfn; pfn++) + if (pfn_valid(pfn)) + break; + if (pfn == end_pfn) + return; + zone = page_zone(pfn_to_page(pfn)); + spin_lock_irqsave(&zone->lock, flags); + pfn = start_pfn; + while (pfn < end_pfn) { + if (!pfn_valid(pfn)) { + pfn++; + continue; + } + page = pfn_to_page(pfn); + BUG_ON(page_count(page)); + BUG_ON(!PageBuddy(page)); + order = page_order(page); +#ifdef CONFIG_DEBUG_VM + printk(KERN_INFO "remove from free list %lx %d %lx\n", + pfn, 1 << order, end_pfn); +#endif + list_del(&page->lru); + rmv_page_order(page); + zone->free_area[order].nr_free--; + __mod_zone_page_state(zone, NR_FREE_PAGES, + - (1UL << order)); + for (i = 0; i < (1 << order); i++) + SetPageReserved((page+i)); + pfn += (1 << order); + } + spin_unlock_irqrestore(&zone->lock, flags); +} +#endif