X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Fsparse.c;h=98d6b39c34722c1734bee587a612ce0f74d7fbfa;hb=5d7d03b81af05f3c291b5c6be621a2b53d187e09;hp=1f4dbb867b8af7a04040cb3c8c4195d2bab7e4ed;hpb=5c0e3066474b57c56ff0d88ca31d95bd14232fee;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/sparse.c b/mm/sparse.c index 1f4dbb8..98d6b39 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -83,6 +83,8 @@ static int __meminit sparse_index_init(unsigned long section_nr, int nid) return -EEXIST; section = sparse_index_alloc(nid); + if (!section) + return -ENOMEM; /* * This lock keeps two different sections from * reallocating for the same index @@ -147,8 +149,18 @@ static inline int sparse_early_nid(struct mem_section *section) /* Record a memory area against a node. */ void __init memory_present(int nid, unsigned long start, unsigned long end) { + unsigned long max_arch_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); unsigned long pfn; + /* + * Sanity checks - do not allow an architecture to pass + * in larger pfns than the maximum scope of sparsemem: + */ + if (start >= max_arch_pfn) + return; + if (end >= max_arch_pfn) + end = max_arch_pfn; + start &= PAGE_SECTION_MASK; for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { unsigned long section = pfn_to_section_nr(pfn); @@ -220,12 +232,6 @@ static int __meminit sparse_init_one_section(struct mem_section *ms, return 1; } -__attribute__((weak)) __init -void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size) -{ - return NULL; -} - static unsigned long usemap_size(void) { unsigned long size_bytes; @@ -241,7 +247,7 @@ static unsigned long *__kmalloc_section_usemap(void) } #endif /* CONFIG_MEMORY_HOTPLUG */ -static unsigned long *sparse_early_usemap_alloc(unsigned long pnum) +static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum) { unsigned long *usemap; struct mem_section *ms = __nr_to_section(pnum); @@ -259,7 +265,7 @@ static unsigned long *sparse_early_usemap_alloc(unsigned long pnum) } #ifndef CONFIG_SPARSEMEM_VMEMMAP -struct page __init *sparse_early_mem_map_populate(unsigned long pnum, int nid) +struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) { struct page *map; @@ -267,11 +273,6 @@ struct page __init *sparse_early_mem_map_populate(unsigned long pnum, int nid) if (map) return map; - map = alloc_bootmem_high_node(NODE_DATA(nid), - sizeof(struct page) * PAGES_PER_SECTION); - if (map) - return map; - map = alloc_bootmem_node(NODE_DATA(nid), sizeof(struct page) * PAGES_PER_SECTION); return map; @@ -284,7 +285,7 @@ struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) struct mem_section *ms = __nr_to_section(pnum); int nid = sparse_early_nid(ms); - map = sparse_early_mem_map_populate(pnum, nid); + map = sparse_mem_map_populate(pnum, nid); if (map) return map; @@ -322,6 +323,18 @@ void __init sparse_init(void) } #ifdef CONFIG_MEMORY_HOTPLUG +#ifdef CONFIG_SPARSEMEM_VMEMMAP +static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, + unsigned long nr_pages) +{ + /* This will make the necessary allocations eventually. */ + return sparse_mem_map_populate(pnum, nid); +} +static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) +{ + return; /* XXX: Not implemented yet */ +} +#else static struct page *__kmalloc_section_memmap(unsigned long nr_pages) { struct page *page, *ret; @@ -344,22 +357,21 @@ got_map_ptr: return ret; } -static int vaddr_in_vmalloc_area(void *addr) +static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, + unsigned long nr_pages) { - if (addr >= (void *)VMALLOC_START && - addr < (void *)VMALLOC_END) - return 1; - return 0; + return __kmalloc_section_memmap(nr_pages); } static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) { - if (vaddr_in_vmalloc_area(memmap)) + if (is_vmalloc_addr(memmap)) vfree(memmap); else free_pages((unsigned long)memmap, get_order(sizeof(struct page) * nr_pages)); } +#endif /* CONFIG_SPARSEMEM_VMEMMAP */ /* * returns the number of sections whose mem_maps were properly @@ -381,9 +393,17 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, * no locking for this, because it does its own * plus, it does a kmalloc */ - sparse_index_init(section_nr, pgdat->node_id); - memmap = __kmalloc_section_memmap(nr_pages); + ret = sparse_index_init(section_nr, pgdat->node_id); + if (ret < 0 && ret != -EEXIST) + return ret; + memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages); + if (!memmap) + return -ENOMEM; usemap = __kmalloc_section_usemap(); + if (!usemap) { + __kfree_section_memmap(memmap, nr_pages); + return -ENOMEM; + } pgdat_resize_lock(pgdat, &flags); @@ -393,18 +413,16 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, goto out; } - if (!usemap) { - ret = -ENOMEM; - goto out; - } ms->section_mem_map |= SECTION_MARKED_PRESENT; ret = sparse_init_one_section(ms, section_nr, memmap, usemap); out: pgdat_resize_unlock(pgdat, &flags); - if (ret <= 0) + if (ret <= 0) { + kfree(usemap); __kfree_section_memmap(memmap, nr_pages); + } return ret; } #endif