X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Fsparse.c;h=36511c7b5e2c797067ccb539646f580aa8ec1012;hb=cdd16d0265c9234228fd37fbbad844d7e894b278;hp=b3c82ba300124ea28d1cb952f337e387b1c4b9eb;hpb=f2d0aa5bf8d4f7ae4cb1a7feebf5b1afddd0b9b0;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/sparse.c b/mm/sparse.c index b3c82ba..36511c7 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -8,7 +8,10 @@ #include #include #include +#include "internal.h" #include +#include +#include /* * Permanent SPARSEMEM data: @@ -24,8 +27,36 @@ struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] #endif EXPORT_SYMBOL(mem_section); +#ifdef NODE_NOT_IN_PAGE_FLAGS +/* + * If we did not store the node number in the page then we have to + * do a lookup in the section_to_node_table in order to find which + * node the page belongs to. + */ +#if MAX_NUMNODES <= 256 +static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; +#else +static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; +#endif + +int page_to_nid(struct page *page) +{ + return section_to_node_table[page_to_section(page)]; +} +EXPORT_SYMBOL(page_to_nid); + +static void set_section_nid(unsigned long section_nr, int nid) +{ + section_to_node_table[section_nr] = nid; +} +#else /* !NODE_NOT_IN_PAGE_FLAGS */ +static inline void set_section_nid(unsigned long section_nr, int nid) +{ +} +#endif + #ifdef CONFIG_SPARSEMEM_EXTREME -static struct mem_section *sparse_index_alloc(int nid) +static struct mem_section noinline __init_refok *sparse_index_alloc(int nid) { struct mem_section *section = NULL; unsigned long array_size = SECTIONS_PER_ROOT * @@ -42,7 +73,7 @@ static struct mem_section *sparse_index_alloc(int nid) return section; } -static int sparse_index_init(unsigned long section_nr, int nid) +static int __meminit sparse_index_init(unsigned long section_nr, int nid) { static DEFINE_SPINLOCK(index_init_lock); unsigned long root = SECTION_NR_TO_ROOT(section_nr); @@ -53,6 +84,8 @@ static int sparse_index_init(unsigned long section_nr, int nid) return -EEXIST; section = sparse_index_alloc(nid); + if (!section) + return -ENOMEM; /* * This lock keeps two different sections from * reallocating for the same index @@ -78,7 +111,7 @@ static inline int sparse_index_init(unsigned long section_nr, int nid) /* * Although written for the SPARSEMEM_EXTREME case, this happens - * to also work for the flat array case becase + * to also work for the flat array case because * NR_SECTION_ROOTS==NR_MEM_SECTIONS. */ int __section_nr(struct mem_section* ms) @@ -115,16 +148,27 @@ static inline int sparse_early_nid(struct mem_section *section) } /* Record a memory area against a node. */ -void memory_present(int nid, unsigned long start, unsigned long end) +void __init memory_present(int nid, unsigned long start, unsigned long end) { + unsigned long max_arch_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); unsigned long pfn; + /* + * Sanity checks - do not allow an architecture to pass + * in larger pfns than the maximum scope of sparsemem: + */ + if (start >= max_arch_pfn) + return; + if (end >= max_arch_pfn) + end = max_arch_pfn; + start &= PAGE_SECTION_MASK; for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { unsigned long section = pfn_to_section_nr(pfn); struct mem_section *ms; sparse_index_init(section, nid); + set_section_nid(section, nid); ms = __nr_to_section(section); if (!ms->section_mem_map) @@ -147,7 +191,7 @@ unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, if (nid != early_pfn_to_nid(pfn)) continue; - if (pfn_valid(pfn)) + if (pfn_present(pfn)) nr_pages += PAGES_PER_SECTION; } @@ -165,47 +209,167 @@ static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long p } /* - * We need this if we ever free the mem_maps. While not implemented yet, - * this function is included for parity with its sibling. + * Decode mem_map from the coded memmap */ -static __attribute((unused)) struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) { + /* mask off the extra low bits of information */ + coded_mem_map &= SECTION_MAP_MASK; return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); } -static int sparse_init_one_section(struct mem_section *ms, - unsigned long pnum, struct page *mem_map) +static int __meminit sparse_init_one_section(struct mem_section *ms, + unsigned long pnum, struct page *mem_map, + unsigned long *pageblock_bitmap) { - if (!valid_section(ms)) + if (!present_section(ms)) return -EINVAL; ms->section_mem_map &= ~SECTION_MAP_MASK; - ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum); + ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | + SECTION_HAS_MEM_MAP; + ms->pageblock_flags = pageblock_bitmap; return 1; } -static struct page *sparse_early_mem_map_alloc(unsigned long pnum) +unsigned long usemap_size(void) { - struct page *map; + unsigned long size_bytes; + size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8; + size_bytes = roundup(size_bytes, sizeof(unsigned long)); + return size_bytes; +} + +#ifdef CONFIG_MEMORY_HOTPLUG +static unsigned long *__kmalloc_section_usemap(void) +{ + return kmalloc(usemap_size(), GFP_KERNEL); +} +#endif /* CONFIG_MEMORY_HOTPLUG */ + +static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum) +{ + unsigned long *usemap; struct mem_section *ms = __nr_to_section(pnum); int nid = sparse_early_nid(ms); + usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size()); + if (usemap) + return usemap; + + /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */ + nid = 0; + + printk(KERN_WARNING "%s: allocation failed\n", __func__); + return NULL; +} + +#ifndef CONFIG_SPARSEMEM_VMEMMAP +struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) +{ + struct page *map; + map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); if (map) return map; - map = alloc_bootmem_node(NODE_DATA(nid), - sizeof(struct page) * PAGES_PER_SECTION); + map = alloc_bootmem_pages_node(NODE_DATA(nid), + PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION)); + return map; +} +#endif /* !CONFIG_SPARSEMEM_VMEMMAP */ + +struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) +{ + struct page *map; + struct mem_section *ms = __nr_to_section(pnum); + int nid = sparse_early_nid(ms); + + map = sparse_mem_map_populate(pnum, nid); if (map) return map; - printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__); + printk(KERN_ERR "%s: sparsemem memory map backing failed " + "some memory will not be available.\n", __func__); ms->section_mem_map = 0; return NULL; } +void __attribute__((weak)) __meminit vmemmap_populate_print_last(void) +{ +} +/* + * Allocate the accumulated non-linear sections, allocate a mem_map + * for each and record the physical to section mapping. + */ +void __init sparse_init(void) +{ + unsigned long pnum; + struct page *map; + unsigned long *usemap; + unsigned long **usemap_map; + int size; + + /* + * map is using big page (aka 2M in x86 64 bit) + * usemap is less one page (aka 24 bytes) + * so alloc 2M (with 2M align) and 24 bytes in turn will + * make next 2M slip to one more 2M later. + * then in big system, the memory will have a lot of holes... + * here try to allocate 2M pages continously. + * + * powerpc need to call sparse_init_one_section right after each + * sparse_early_mem_map_alloc, so allocate usemap_map at first. + */ + size = sizeof(unsigned long *) * NR_MEM_SECTIONS; + usemap_map = alloc_bootmem(size); + if (!usemap_map) + panic("can not allocate usemap_map\n"); + + for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { + if (!present_section_nr(pnum)) + continue; + usemap_map[pnum] = sparse_early_usemap_alloc(pnum); + } + + for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { + if (!present_section_nr(pnum)) + continue; + + usemap = usemap_map[pnum]; + if (!usemap) + continue; + + map = sparse_early_mem_map_alloc(pnum); + if (!map) + continue; + + sparse_init_one_section(__nr_to_section(pnum), pnum, map, + usemap); + } + + vmemmap_populate_print_last(); + + free_bootmem(__pa(usemap_map), size); +} + +#ifdef CONFIG_MEMORY_HOTPLUG +#ifdef CONFIG_SPARSEMEM_VMEMMAP +static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, + unsigned long nr_pages) +{ + /* This will make the necessary allocations eventually. */ + return sparse_mem_map_populate(pnum, nid); +} +static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) +{ + return; /* XXX: Not implemented yet */ +} +static void free_map_bootmem(struct page *page, unsigned long nr_pages) +{ +} +#else static struct page *__kmalloc_section_memmap(unsigned long nr_pages) { struct page *page, *ret; @@ -228,40 +392,80 @@ got_map_ptr: return ret; } -static int vaddr_in_vmalloc_area(void *addr) +static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, + unsigned long nr_pages) { - if (addr >= (void *)VMALLOC_START && - addr < (void *)VMALLOC_END) - return 1; - return 0; + return __kmalloc_section_memmap(nr_pages); } static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) { - if (vaddr_in_vmalloc_area(memmap)) + if (is_vmalloc_addr(memmap)) vfree(memmap); else free_pages((unsigned long)memmap, get_order(sizeof(struct page) * nr_pages)); } -/* - * Allocate the accumulated non-linear sections, allocate a mem_map - * for each and record the physical to section mapping. - */ -void sparse_init(void) +static void free_map_bootmem(struct page *page, unsigned long nr_pages) { - unsigned long pnum; - struct page *map; + unsigned long maps_section_nr, removing_section_nr, i; + int magic; + + for (i = 0; i < nr_pages; i++, page++) { + magic = atomic_read(&page->_mapcount); + + BUG_ON(magic == NODE_INFO); + + maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); + removing_section_nr = page->private; + + /* + * When this function is called, the removing section is + * logical offlined state. This means all pages are isolated + * from page allocator. If removing section's memmap is placed + * on the same section, it must not be freed. + * If it is freed, page allocator may allocate it which will + * be removed physically soon. + */ + if (maps_section_nr != removing_section_nr) + put_page_bootmem(page); + } +} +#endif /* CONFIG_SPARSEMEM_VMEMMAP */ - for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { - if (!valid_section_nr(pnum)) - continue; +static void free_section_usemap(struct page *memmap, unsigned long *usemap) +{ + struct page *usemap_page; + unsigned long nr_pages; - map = sparse_early_mem_map_alloc(pnum); - if (!map) - continue; - sparse_init_one_section(__nr_to_section(pnum), pnum, map); + if (!usemap) + return; + + usemap_page = virt_to_page(usemap); + /* + * Check to see if allocation came from hot-plug-add + */ + if (PageSlab(usemap_page)) { + kfree(usemap); + if (memmap) + __kfree_section_memmap(memmap, PAGES_PER_SECTION); + return; + } + + /* + * The usemap came from bootmem. This is packed with other usemaps + * on the section which has pgdat at boot time. Just keep it as is now. + */ + + if (memmap) { + struct page *memmap_page; + memmap_page = virt_to_page(memmap); + + nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) + >> PAGE_SHIFT; + + free_map_bootmem(memmap_page, nr_pages); } } @@ -277,6 +481,7 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, struct pglist_data *pgdat = zone->zone_pgdat; struct mem_section *ms; struct page *memmap; + unsigned long *usemap; unsigned long flags; int ret; @@ -284,8 +489,17 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, * no locking for this, because it does its own * plus, it does a kmalloc */ - sparse_index_init(section_nr, pgdat->node_id); - memmap = __kmalloc_section_memmap(nr_pages); + ret = sparse_index_init(section_nr, pgdat->node_id); + if (ret < 0 && ret != -EEXIST) + return ret; + memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages); + if (!memmap) + return -ENOMEM; + usemap = __kmalloc_section_usemap(); + if (!usemap) { + __kfree_section_memmap(memmap, nr_pages); + return -ENOMEM; + } pgdat_resize_lock(pgdat, &flags); @@ -294,13 +508,33 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, ret = -EEXIST; goto out; } + ms->section_mem_map |= SECTION_MARKED_PRESENT; - ret = sparse_init_one_section(ms, section_nr, memmap); + ret = sparse_init_one_section(ms, section_nr, memmap, usemap); out: pgdat_resize_unlock(pgdat, &flags); - if (ret <= 0) + if (ret <= 0) { + kfree(usemap); __kfree_section_memmap(memmap, nr_pages); + } return ret; } + +void sparse_remove_one_section(struct zone *zone, struct mem_section *ms) +{ + struct page *memmap = NULL; + unsigned long *usemap = NULL; + + if (ms->section_mem_map) { + usemap = ms->pageblock_flags; + memmap = sparse_decode_mem_map(ms->section_mem_map, + __section_nr(ms)); + ms->section_mem_map = 0; + ms->pageblock_flags = NULL; + } + + free_section_usemap(memmap, usemap); +} +#endif