X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Fmemory_hotplug.c;h=be211a582930d87bba2f959e2729399e364f6ae9;hb=b809b9ca3e6b47561514ae9767177cc482e90fc6;hp=cba36ef0d5063beea3b8e7edb92d415683d66f27;hpb=04753278769f3b6c3b79a080edb52f21d83bf6e2;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index cba36ef..be211a5 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -22,13 +22,18 @@ #include #include #include -#include #include #include #include +#include +#include +#include +#include #include +#include "internal.h" + /* add this memory to iomem resource */ static struct resource *register_memory_resource(u64 start, u64 size) { @@ -60,20 +65,22 @@ static void release_memory_resource(struct resource *res) #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE #ifndef CONFIG_SPARSEMEM_VMEMMAP -static void get_page_bootmem(unsigned long info, struct page *page, int magic) +static void get_page_bootmem(unsigned long info, struct page *page, int type) { - atomic_set(&page->_mapcount, magic); + atomic_set(&page->_mapcount, type); SetPagePrivate(page); set_page_private(page, info); atomic_inc(&page->_count); } -void put_page_bootmem(struct page *page) +/* reference to __meminit __free_pages_bootmem is valid + * so use __ref to tell modpost not to generate a warning */ +void __ref put_page_bootmem(struct page *page) { - int magic; + int type; - magic = atomic_read(&page->_mapcount); - BUG_ON(magic >= -1); + type = atomic_read(&page->_mapcount); + BUG_ON(type >= -1); if (atomic_dec_return(&page->_count) == 1) { ClearPagePrivate(page); @@ -84,7 +91,7 @@ void put_page_bootmem(struct page *page) } -void register_page_bootmem_info_section(unsigned long start_pfn) +static void register_page_bootmem_info_section(unsigned long start_pfn) { unsigned long *usemap, mapsize, section_nr, i; struct mem_section *ms; @@ -117,7 +124,7 @@ void register_page_bootmem_info_section(unsigned long start_pfn) mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; for (i = 0; i < mapsize; i++, page++) - get_page_bootmem(section_nr, page, MIX_INFO); + get_page_bootmem(section_nr, page, MIX_SECTION_INFO); } @@ -157,27 +164,65 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat) } #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ -static int __add_zone(struct zone *zone, unsigned long phys_start_pfn) +static void grow_zone_span(struct zone *zone, unsigned long start_pfn, + unsigned long end_pfn) +{ + unsigned long old_zone_end_pfn; + + zone_span_writelock(zone); + + old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; + if (start_pfn < zone->zone_start_pfn) + zone->zone_start_pfn = start_pfn; + + zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - + zone->zone_start_pfn; + + zone_span_writeunlock(zone); +} + +static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn, + unsigned long end_pfn) +{ + unsigned long old_pgdat_end_pfn = + pgdat->node_start_pfn + pgdat->node_spanned_pages; + + if (start_pfn < pgdat->node_start_pfn) + pgdat->node_start_pfn = start_pfn; + + pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - + pgdat->node_start_pfn; +} + +static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn) { struct pglist_data *pgdat = zone->zone_pgdat; int nr_pages = PAGES_PER_SECTION; int nid = pgdat->node_id; int zone_type; + unsigned long flags; zone_type = zone - pgdat->node_zones; if (!zone->wait_table) { - int ret = 0; + int ret; + ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages, MEMMAP_HOTPLUG); - if (ret < 0) + if (ret) return ret; } + pgdat_resize_lock(zone->zone_pgdat, &flags); + grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages); + grow_pgdat_span(zone->zone_pgdat, phys_start_pfn, + phys_start_pfn + nr_pages); + pgdat_resize_unlock(zone->zone_pgdat, &flags); memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn, MEMMAP_HOTPLUG); return 0; } -static int __add_section(struct zone *zone, unsigned long phys_start_pfn) +static int __meminit __add_section(int nid, struct zone *zone, + unsigned long phys_start_pfn) { int nr_pages = PAGES_PER_SECTION; int ret; @@ -195,9 +240,19 @@ static int __add_section(struct zone *zone, unsigned long phys_start_pfn) if (ret < 0) return ret; - return register_new_memory(__pfn_to_section(phys_start_pfn)); + return register_new_memory(nid, __pfn_to_section(phys_start_pfn)); } +#ifdef CONFIG_SPARSEMEM_VMEMMAP +static int __remove_section(struct zone *zone, struct mem_section *ms) +{ + /* + * XXX: Freeing memmap with vmemmap is not implement yet. + * This should be removed later. + */ + return -EBUSY; +} +#else static int __remove_section(struct zone *zone, struct mem_section *ms) { unsigned long flags; @@ -216,6 +271,7 @@ static int __remove_section(struct zone *zone, struct mem_section *ms) pgdat_resize_unlock(pgdat, &flags); return 0; } +#endif /* * Reasonably generic function for adding memory. It is @@ -223,8 +279,8 @@ static int __remove_section(struct zone *zone, struct mem_section *ms) * call this function after deciding the zone to which to * add the new pages. */ -int __add_pages(struct zone *zone, unsigned long phys_start_pfn, - unsigned long nr_pages) +int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn, + unsigned long nr_pages) { unsigned long i; int err = 0; @@ -234,7 +290,7 @@ int __add_pages(struct zone *zone, unsigned long phys_start_pfn, end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); for (i = start_sec; i <= end_sec; i++) { - err = __add_section(zone, i << PFN_SECTION_SHIFT); + err = __add_section(nid, zone, i << PFN_SECTION_SHIFT); /* * EEXIST is finally dealt with by ioresource collision @@ -273,11 +329,11 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK); BUG_ON(nr_pages % PAGES_PER_SECTION); - release_mem_region(phys_start_pfn << PAGE_SHIFT, nr_pages * PAGE_SIZE); - sections_to_remove = nr_pages / PAGES_PER_SECTION; for (i = 0; i < sections_to_remove; i++) { unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; + release_mem_region(pfn << PAGE_SHIFT, + PAGES_PER_SECTION << PAGE_SHIFT); ret = __remove_section(zone, __pfn_to_section(pfn)); if (ret) break; @@ -286,40 +342,13 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, } EXPORT_SYMBOL_GPL(__remove_pages); -static void grow_zone_span(struct zone *zone, - unsigned long start_pfn, unsigned long end_pfn) -{ - unsigned long old_zone_end_pfn; - - zone_span_writelock(zone); - - old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; - if (start_pfn < zone->zone_start_pfn) - zone->zone_start_pfn = start_pfn; - - zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - - zone->zone_start_pfn; - - zone_span_writeunlock(zone); -} - -static void grow_pgdat_span(struct pglist_data *pgdat, - unsigned long start_pfn, unsigned long end_pfn) -{ - unsigned long old_pgdat_end_pfn = - pgdat->node_start_pfn + pgdat->node_spanned_pages; - - if (start_pfn < pgdat->node_start_pfn) - pgdat->node_start_pfn = start_pfn; - - pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - - pgdat->node_start_pfn; -} - void online_page(struct page *page) { + unsigned long pfn = page_to_pfn(page); + totalram_pages++; - num_physpages++; + if (pfn >= num_physpages) + num_physpages = pfn + 1; #ifdef CONFIG_HIGHMEM if (PageHighMem(page)) @@ -354,7 +383,6 @@ static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, int online_pages(unsigned long pfn, unsigned long nr_pages) { - unsigned long flags; unsigned long onlined_pages = 0; struct zone *zone; int need_zonelists_rebuild = 0; @@ -382,11 +410,6 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) * memory_block->state_mutex. */ zone = page_zone(pfn_to_page(pfn)); - pgdat_resize_lock(zone->zone_pgdat, &flags); - grow_zone_span(zone, pfn, pfn + nr_pages); - grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages); - pgdat_resize_unlock(zone->zone_pgdat, &flags); - /* * If this zone is not populated, then it is not in zonelist. * This means the page allocator ignores this zone. @@ -395,12 +418,21 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) if (!populated_zone(zone)) need_zonelists_rebuild = 1; - walk_memory_resource(pfn, nr_pages, &onlined_pages, + ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages, online_pages_range); + if (ret) { + printk(KERN_DEBUG "online_pages %lx at %lx failed\n", + nr_pages, pfn); + memory_notify(MEM_CANCEL_ONLINE, &arg); + return ret; + } + zone->present_pages += onlined_pages; zone->zone_pgdat->node_present_pages += onlined_pages; - setup_per_zone_pages_min(); + zone_pcp_update(zone); + setup_per_zone_wmarks(); + calculate_zone_inactive_ratio(zone); if (onlined_pages) { kswapd_run(zone_to_nid(zone)); node_set_state(zone_to_nid(zone), N_HIGH_MEMORY); @@ -408,7 +440,9 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) if (need_zonelists_rebuild) build_all_zonelists(); - vm_total_pages = nr_free_pagecache_pages(); + else + vm_total_pages = nr_free_pagecache_pages(); + writeback_set_ratelimit(); if (onlined_pages) @@ -418,7 +452,8 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) } #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ -static pg_data_t *hotadd_new_pgdat(int nid, u64 start) +/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ +static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) { struct pglist_data *pgdat; unsigned long zones_size[MAX_NR_ZONES] = {0}; @@ -434,7 +469,7 @@ static pg_data_t *hotadd_new_pgdat(int nid, u64 start) /* we can use NODE_DATA(nid) from here */ /* init node's zones as empty zones, we don't have any present pages.*/ - free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size); + free_area_init_node(nid, zones_size, start_pfn, zholes_size); return pgdat; } @@ -447,21 +482,26 @@ static void rollback_node_hotadd(int nid, pg_data_t *pgdat) } -int add_memory(int nid, u64 start, u64 size) +/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ +int __ref add_memory(int nid, u64 start, u64 size) { pg_data_t *pgdat = NULL; int new_pgdat = 0; struct resource *res; int ret; + lock_system_sleep(); + res = register_memory_resource(start, size); + ret = -EEXIST; if (!res) - return -EEXIST; + goto out; if (!node_online(nid)) { pgdat = hotadd_new_pgdat(nid, start); + ret = -ENOMEM; if (!pgdat) - return -ENOMEM; + goto out; new_pgdat = 1; } @@ -474,8 +514,6 @@ int add_memory(int nid, u64 start, u64 size) /* we online node here. we can't roll back from here. */ node_set_online(nid); - cpuset_track_online_nodes(); - if (new_pgdat) { ret = register_one_node(nid); /* @@ -486,7 +524,11 @@ int add_memory(int nid, u64 start, u64 size) BUG_ON(ret); } - return ret; + /* create new memmap entry */ + firmware_map_add_hotplug(start, start + size, "System RAM"); + + goto out; + error: /* rollback pgdat allocation and others */ if (new_pgdat) @@ -494,12 +536,74 @@ error: if (res) release_memory_resource(res); +out: + unlock_system_sleep(); return ret; } EXPORT_SYMBOL_GPL(add_memory); #ifdef CONFIG_MEMORY_HOTREMOVE /* + * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy + * set and the size of the free page is given by page_order(). Using this, + * the function determines if the pageblock contains only free pages. + * Due to buddy contraints, a free page at least the size of a pageblock will + * be located at the start of the pageblock + */ +static inline int pageblock_free(struct page *page) +{ + return PageBuddy(page) && page_order(page) >= pageblock_order; +} + +/* Return the start of the next active pageblock after a given page */ +static struct page *next_active_pageblock(struct page *page) +{ + int pageblocks_stride; + + /* Ensure the starting page is pageblock-aligned */ + BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); + + /* Move forward by at least 1 * pageblock_nr_pages */ + pageblocks_stride = 1; + + /* If the entire pageblock is free, move to the end of free page */ + if (pageblock_free(page)) + pageblocks_stride += page_order(page) - pageblock_order; + + return page + (pageblocks_stride * pageblock_nr_pages); +} + +/* Checks if this range of memory is likely to be hot-removable. */ +int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) +{ + int type; + struct page *page = pfn_to_page(start_pfn); + struct page *end_page = page + nr_pages; + + /* Check the starting page of each pageblock within the range */ + for (; page < end_page; page = next_active_pageblock(page)) { + type = get_pageblock_migratetype(page); + + /* + * A pageblock containing MOVABLE or free pages is considered + * removable + */ + if (type != MIGRATE_MOVABLE && !pageblock_free(page)) + return 0; + + /* + * A pageblock starting with a PageReserved page is not + * considered removable. + */ + if (PageReserved(page)) + return 0; + } + + /* All pageblocks in the memory block are likely to be hot-removable */ + return 1; +} + +/* * Confirm all pages in a range [start, end) is belongs to the same zone. */ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) @@ -544,15 +648,12 @@ int scan_lru_pages(unsigned long start, unsigned long end) } static struct page * -hotremove_migrate_alloc(struct page *page, - unsigned long private, - int **x) +hotremove_migrate_alloc(struct page *page, unsigned long private, int **x) { - /* This should be improoooooved!! */ - return alloc_page(GFP_HIGHUSER_PAGECACHE); + /* This should be improooooved!! */ + return alloc_page(GFP_HIGHUSER_MOVABLE); } - #define NR_OFFLINE_AT_ONCE_PAGES (256) static int do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) @@ -574,18 +675,22 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) * We can skip free pages. And we can only deal with pages on * LRU. */ - ret = isolate_lru_page(page, &source); + ret = isolate_lru_page(page); if (!ret) { /* Success */ + list_add_tail(&page->lru, &source); move_pages--; + inc_zone_page_state(page, NR_ISOLATED_ANON + + page_is_file_cache(page)); + } else { /* Becasue we don't have big zone->lock. we should check this again here. */ if (page_count(page)) not_managed++; #ifdef CONFIG_DEBUG_VM - printk(KERN_INFO "removing from LRU failed" - " %lx/%d/%lx\n", - pfn, page_count(page), page->flags); + printk(KERN_ALERT "removing pfn %lx from LRU failed\n", + pfn); + dump_page(page); #endif } } @@ -599,7 +704,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) if (list_empty(&source)) goto out; /* this function returns # of failed pages */ - ret = migrate_pages(&source, hotremove_migrate_alloc, 0); + ret = migrate_pages(&source, hotremove_migrate_alloc, 0, 1); out: return ret; @@ -619,7 +724,7 @@ offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, static void offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) { - walk_memory_resource(start_pfn, end_pfn - start_pfn, NULL, + walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL, offline_isolated_pages_cb); } @@ -645,14 +750,14 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) long offlined = 0; int ret; - ret = walk_memory_resource(start_pfn, end_pfn - start_pfn, &offlined, + ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined, check_pages_isolated_cb); if (ret < 0) offlined = (long)ret; return offlined; } -int offline_pages(unsigned long start_pfn, +static int offline_pages(unsigned long start_pfn, unsigned long end_pfn, unsigned long timeout) { unsigned long pfn, nr_pages, expire; @@ -672,6 +777,8 @@ int offline_pages(unsigned long start_pfn, if (!test_pages_in_a_zone(start_pfn, end_pfn)) return -EINVAL; + lock_system_sleep(); + zone = page_zone(pfn_to_page(start_pfn)); node = zone_to_nid(zone); nr_pages = end_pfn - start_pfn; @@ -679,7 +786,7 @@ int offline_pages(unsigned long start_pfn, /* set above range as isolated */ ret = start_isolate_page_range(start_pfn, end_pfn); if (ret) - return ret; + goto out; arg.start_pfn = start_pfn; arg.nr_pages = nr_pages; @@ -749,12 +856,19 @@ repeat: zone->present_pages -= offlined_pages; zone->zone_pgdat->node_present_pages -= offlined_pages; totalram_pages -= offlined_pages; - num_physpages -= offlined_pages; + + setup_per_zone_wmarks(); + calculate_zone_inactive_ratio(zone); + if (!node_present_pages(node)) { + node_clear_state(node, N_HIGH_MEMORY); + kswapd_stop(node); + } vm_total_pages = nr_free_pagecache_pages(); writeback_set_ratelimit(); memory_notify(MEM_OFFLINE, &arg); + unlock_system_sleep(); return 0; failed_removal: @@ -764,12 +878,23 @@ failed_removal: /* pushback to free area */ undo_isolate_page_range(start_pfn, end_pfn); +out: + unlock_system_sleep(); return ret; } + +int remove_memory(u64 start, u64 size) +{ + unsigned long start_pfn, end_pfn; + + start_pfn = PFN_DOWN(start); + end_pfn = start_pfn + PFN_DOWN(size); + return offline_pages(start_pfn, end_pfn, 120 * HZ); +} #else int remove_memory(u64 start, u64 size) { return -EINVAL; } -EXPORT_SYMBOL_GPL(remove_memory); #endif /* CONFIG_MEMORY_HOTREMOVE */ +EXPORT_SYMBOL_GPL(remove_memory);