X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Fmemory_hotplug.c;h=c083cf5fd6df920b8f984351f11942cfa2f9b317;hb=2443462b0a04ef0f82ad48f4fd0ef4ac5b24c4b7;hp=cba36ef0d5063beea3b8e7edb92d415683d66f27;hpb=04753278769f3b6c3b79a080edb52f21d83bf6e2;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index cba36ef..c083cf5 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -22,13 +22,15 @@ #include #include #include -#include #include #include #include +#include #include +#include "internal.h" + /* add this memory to iomem resource */ static struct resource *register_memory_resource(u64 start, u64 size) { @@ -60,9 +62,9 @@ static void release_memory_resource(struct resource *res) #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE #ifndef CONFIG_SPARSEMEM_VMEMMAP -static void get_page_bootmem(unsigned long info, struct page *page, int magic) +static void get_page_bootmem(unsigned long info, struct page *page, int type) { - atomic_set(&page->_mapcount, magic); + atomic_set(&page->_mapcount, type); SetPagePrivate(page); set_page_private(page, info); atomic_inc(&page->_count); @@ -70,10 +72,10 @@ static void get_page_bootmem(unsigned long info, struct page *page, int magic) void put_page_bootmem(struct page *page) { - int magic; + int type; - magic = atomic_read(&page->_mapcount); - BUG_ON(magic >= -1); + type = atomic_read(&page->_mapcount); + BUG_ON(type >= -1); if (atomic_dec_return(&page->_count) == 1) { ClearPagePrivate(page); @@ -84,7 +86,7 @@ void put_page_bootmem(struct page *page) } -void register_page_bootmem_info_section(unsigned long start_pfn) +static void register_page_bootmem_info_section(unsigned long start_pfn) { unsigned long *usemap, mapsize, section_nr, i; struct mem_section *ms; @@ -117,7 +119,7 @@ void register_page_bootmem_info_section(unsigned long start_pfn) mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; for (i = 0; i < mapsize; i++, page++) - get_page_bootmem(section_nr, page, MIX_INFO); + get_page_bootmem(section_nr, page, MIX_SECTION_INFO); } @@ -157,27 +159,65 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat) } #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ -static int __add_zone(struct zone *zone, unsigned long phys_start_pfn) +static void grow_zone_span(struct zone *zone, unsigned long start_pfn, + unsigned long end_pfn) +{ + unsigned long old_zone_end_pfn; + + zone_span_writelock(zone); + + old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; + if (start_pfn < zone->zone_start_pfn) + zone->zone_start_pfn = start_pfn; + + zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - + zone->zone_start_pfn; + + zone_span_writeunlock(zone); +} + +static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn, + unsigned long end_pfn) +{ + unsigned long old_pgdat_end_pfn = + pgdat->node_start_pfn + pgdat->node_spanned_pages; + + if (start_pfn < pgdat->node_start_pfn) + pgdat->node_start_pfn = start_pfn; + + pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - + pgdat->node_start_pfn; +} + +static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn) { struct pglist_data *pgdat = zone->zone_pgdat; int nr_pages = PAGES_PER_SECTION; int nid = pgdat->node_id; int zone_type; + unsigned long flags; zone_type = zone - pgdat->node_zones; if (!zone->wait_table) { - int ret = 0; + int ret; + ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages, MEMMAP_HOTPLUG); - if (ret < 0) + if (ret) return ret; } + pgdat_resize_lock(zone->zone_pgdat, &flags); + grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages); + grow_pgdat_span(zone->zone_pgdat, phys_start_pfn, + phys_start_pfn + nr_pages); + pgdat_resize_unlock(zone->zone_pgdat, &flags); memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn, MEMMAP_HOTPLUG); return 0; } -static int __add_section(struct zone *zone, unsigned long phys_start_pfn) +static int __meminit __add_section(int nid, struct zone *zone, + unsigned long phys_start_pfn) { int nr_pages = PAGES_PER_SECTION; int ret; @@ -195,9 +235,19 @@ static int __add_section(struct zone *zone, unsigned long phys_start_pfn) if (ret < 0) return ret; - return register_new_memory(__pfn_to_section(phys_start_pfn)); + return register_new_memory(nid, __pfn_to_section(phys_start_pfn)); } +#ifdef CONFIG_SPARSEMEM_VMEMMAP +static int __remove_section(struct zone *zone, struct mem_section *ms) +{ + /* + * XXX: Freeing memmap with vmemmap is not implement yet. + * This should be removed later. + */ + return -EBUSY; +} +#else static int __remove_section(struct zone *zone, struct mem_section *ms) { unsigned long flags; @@ -216,6 +266,7 @@ static int __remove_section(struct zone *zone, struct mem_section *ms) pgdat_resize_unlock(pgdat, &flags); return 0; } +#endif /* * Reasonably generic function for adding memory. It is @@ -223,8 +274,8 @@ static int __remove_section(struct zone *zone, struct mem_section *ms) * call this function after deciding the zone to which to * add the new pages. */ -int __add_pages(struct zone *zone, unsigned long phys_start_pfn, - unsigned long nr_pages) +int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn, + unsigned long nr_pages) { unsigned long i; int err = 0; @@ -234,7 +285,7 @@ int __add_pages(struct zone *zone, unsigned long phys_start_pfn, end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); for (i = start_sec; i <= end_sec; i++) { - err = __add_section(zone, i << PFN_SECTION_SHIFT); + err = __add_section(nid, zone, i << PFN_SECTION_SHIFT); /* * EEXIST is finally dealt with by ioresource collision @@ -273,11 +324,11 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK); BUG_ON(nr_pages % PAGES_PER_SECTION); - release_mem_region(phys_start_pfn << PAGE_SHIFT, nr_pages * PAGE_SIZE); - sections_to_remove = nr_pages / PAGES_PER_SECTION; for (i = 0; i < sections_to_remove; i++) { unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; + release_mem_region(pfn << PAGE_SHIFT, + PAGES_PER_SECTION << PAGE_SHIFT); ret = __remove_section(zone, __pfn_to_section(pfn)); if (ret) break; @@ -286,36 +337,6 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, } EXPORT_SYMBOL_GPL(__remove_pages); -static void grow_zone_span(struct zone *zone, - unsigned long start_pfn, unsigned long end_pfn) -{ - unsigned long old_zone_end_pfn; - - zone_span_writelock(zone); - - old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; - if (start_pfn < zone->zone_start_pfn) - zone->zone_start_pfn = start_pfn; - - zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - - zone->zone_start_pfn; - - zone_span_writeunlock(zone); -} - -static void grow_pgdat_span(struct pglist_data *pgdat, - unsigned long start_pfn, unsigned long end_pfn) -{ - unsigned long old_pgdat_end_pfn = - pgdat->node_start_pfn + pgdat->node_spanned_pages; - - if (start_pfn < pgdat->node_start_pfn) - pgdat->node_start_pfn = start_pfn; - - pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - - pgdat->node_start_pfn; -} - void online_page(struct page *page) { totalram_pages++; @@ -354,7 +375,6 @@ static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, int online_pages(unsigned long pfn, unsigned long nr_pages) { - unsigned long flags; unsigned long onlined_pages = 0; struct zone *zone; int need_zonelists_rebuild = 0; @@ -382,11 +402,6 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) * memory_block->state_mutex. */ zone = page_zone(pfn_to_page(pfn)); - pgdat_resize_lock(zone->zone_pgdat, &flags); - grow_zone_span(zone, pfn, pfn + nr_pages); - grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages); - pgdat_resize_unlock(zone->zone_pgdat, &flags); - /* * If this zone is not populated, then it is not in zonelist. * This means the page allocator ignores this zone. @@ -395,8 +410,15 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) if (!populated_zone(zone)) need_zonelists_rebuild = 1; - walk_memory_resource(pfn, nr_pages, &onlined_pages, + ret = walk_memory_resource(pfn, nr_pages, &onlined_pages, online_pages_range); + if (ret) { + printk(KERN_DEBUG "online_pages %lx at %lx failed\n", + nr_pages, pfn); + memory_notify(MEM_CANCEL_ONLINE, &arg); + return ret; + } + zone->present_pages += onlined_pages; zone->zone_pgdat->node_present_pages += onlined_pages; @@ -408,7 +430,9 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) if (need_zonelists_rebuild) build_all_zonelists(); - vm_total_pages = nr_free_pagecache_pages(); + else + vm_total_pages = nr_free_pagecache_pages(); + writeback_set_ratelimit(); if (onlined_pages) @@ -434,7 +458,7 @@ static pg_data_t *hotadd_new_pgdat(int nid, u64 start) /* we can use NODE_DATA(nid) from here */ /* init node's zones as empty zones, we don't have any present pages.*/ - free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size); + free_area_init_node(nid, zones_size, start_pfn, zholes_size); return pgdat; } @@ -447,7 +471,8 @@ static void rollback_node_hotadd(int nid, pg_data_t *pgdat) } -int add_memory(int nid, u64 start, u64 size) +/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ +int __ref add_memory(int nid, u64 start, u64 size) { pg_data_t *pgdat = NULL; int new_pgdat = 0; @@ -474,8 +499,6 @@ int add_memory(int nid, u64 start, u64 size) /* we online node here. we can't roll back from here. */ node_set_online(nid); - cpuset_track_online_nodes(); - if (new_pgdat) { ret = register_one_node(nid); /* @@ -500,6 +523,66 @@ EXPORT_SYMBOL_GPL(add_memory); #ifdef CONFIG_MEMORY_HOTREMOVE /* + * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy + * set and the size of the free page is given by page_order(). Using this, + * the function determines if the pageblock contains only free pages. + * Due to buddy contraints, a free page at least the size of a pageblock will + * be located at the start of the pageblock + */ +static inline int pageblock_free(struct page *page) +{ + return PageBuddy(page) && page_order(page) >= pageblock_order; +} + +/* Return the start of the next active pageblock after a given page */ +static struct page *next_active_pageblock(struct page *page) +{ + int pageblocks_stride; + + /* Ensure the starting page is pageblock-aligned */ + BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); + + /* Move forward by at least 1 * pageblock_nr_pages */ + pageblocks_stride = 1; + + /* If the entire pageblock is free, move to the end of free page */ + if (pageblock_free(page)) + pageblocks_stride += page_order(page) - pageblock_order; + + return page + (pageblocks_stride * pageblock_nr_pages); +} + +/* Checks if this range of memory is likely to be hot-removable. */ +int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) +{ + int type; + struct page *page = pfn_to_page(start_pfn); + struct page *end_page = page + nr_pages; + + /* Check the starting page of each pageblock within the range */ + for (; page < end_page; page = next_active_pageblock(page)) { + type = get_pageblock_migratetype(page); + + /* + * A pageblock containing MOVABLE or free pages is considered + * removable + */ + if (type != MIGRATE_MOVABLE && !pageblock_free(page)) + return 0; + + /* + * A pageblock starting with a PageReserved page is not + * considered removable. + */ + if (PageReserved(page)) + return 0; + } + + /* All pageblocks in the memory block are likely to be hot-removable */ + return 1; +} + +/* * Confirm all pages in a range [start, end) is belongs to the same zone. */ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) @@ -544,15 +627,12 @@ int scan_lru_pages(unsigned long start, unsigned long end) } static struct page * -hotremove_migrate_alloc(struct page *page, - unsigned long private, - int **x) +hotremove_migrate_alloc(struct page *page, unsigned long private, int **x) { - /* This should be improoooooved!! */ - return alloc_page(GFP_HIGHUSER_PAGECACHE); + /* This should be improooooved!! */ + return alloc_page(GFP_HIGHUSER_MOVABLE); } - #define NR_OFFLINE_AT_ONCE_PAGES (256) static int do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) @@ -574,8 +654,9 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) * We can skip free pages. And we can only deal with pages on * LRU. */ - ret = isolate_lru_page(page, &source); + ret = isolate_lru_page(page); if (!ret) { /* Success */ + list_add_tail(&page->lru, &source); move_pages--; } else { /* Becasue we don't have big zone->lock. we should @@ -766,10 +847,19 @@ failed_removal: return ret; } + +int remove_memory(u64 start, u64 size) +{ + unsigned long start_pfn, end_pfn; + + start_pfn = PFN_DOWN(start); + end_pfn = start_pfn + PFN_DOWN(size); + return offline_pages(start_pfn, end_pfn, 120 * HZ); +} #else int remove_memory(u64 start, u64 size) { return -EINVAL; } -EXPORT_SYMBOL_GPL(remove_memory); #endif /* CONFIG_MEMORY_HOTREMOVE */ +EXPORT_SYMBOL_GPL(remove_memory);