memcg: rework usage of stats by soft limit
[safe/jmp/linux-2.6] / mm / sparse.c
1 /*
2  * sparse memory mappings.
3  */
4 #include <linux/mm.h>
5 #include <linux/mmzone.h>
6 #include <linux/bootmem.h>
7 #include <linux/highmem.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
11 #include "internal.h"
12 #include <asm/dma.h>
13 #include <asm/pgalloc.h>
14 #include <asm/pgtable.h>
15
16 /*
17  * Permanent SPARSEMEM data:
18  *
19  * 1) mem_section       - memory sections, mem_map's for valid memory
20  */
21 #ifdef CONFIG_SPARSEMEM_EXTREME
22 struct mem_section *mem_section[NR_SECTION_ROOTS]
23         ____cacheline_internodealigned_in_smp;
24 #else
25 struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
26         ____cacheline_internodealigned_in_smp;
27 #endif
28 EXPORT_SYMBOL(mem_section);
29
30 #ifdef NODE_NOT_IN_PAGE_FLAGS
31 /*
32  * If we did not store the node number in the page then we have to
33  * do a lookup in the section_to_node_table in order to find which
34  * node the page belongs to.
35  */
36 #if MAX_NUMNODES <= 256
37 static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
38 #else
39 static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
40 #endif
41
42 int page_to_nid(struct page *page)
43 {
44         return section_to_node_table[page_to_section(page)];
45 }
46 EXPORT_SYMBOL(page_to_nid);
47
48 static void set_section_nid(unsigned long section_nr, int nid)
49 {
50         section_to_node_table[section_nr] = nid;
51 }
52 #else /* !NODE_NOT_IN_PAGE_FLAGS */
53 static inline void set_section_nid(unsigned long section_nr, int nid)
54 {
55 }
56 #endif
57
58 #ifdef CONFIG_SPARSEMEM_EXTREME
59 static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
60 {
61         struct mem_section *section = NULL;
62         unsigned long array_size = SECTIONS_PER_ROOT *
63                                    sizeof(struct mem_section);
64
65         if (slab_is_available()) {
66                 if (node_state(nid, N_HIGH_MEMORY))
67                         section = kmalloc_node(array_size, GFP_KERNEL, nid);
68                 else
69                         section = kmalloc(array_size, GFP_KERNEL);
70         } else
71                 section = alloc_bootmem_node(NODE_DATA(nid), array_size);
72
73         if (section)
74                 memset(section, 0, array_size);
75
76         return section;
77 }
78
79 static int __meminit sparse_index_init(unsigned long section_nr, int nid)
80 {
81         static DEFINE_SPINLOCK(index_init_lock);
82         unsigned long root = SECTION_NR_TO_ROOT(section_nr);
83         struct mem_section *section;
84         int ret = 0;
85
86         if (mem_section[root])
87                 return -EEXIST;
88
89         section = sparse_index_alloc(nid);
90         if (!section)
91                 return -ENOMEM;
92         /*
93          * This lock keeps two different sections from
94          * reallocating for the same index
95          */
96         spin_lock(&index_init_lock);
97
98         if (mem_section[root]) {
99                 ret = -EEXIST;
100                 goto out;
101         }
102
103         mem_section[root] = section;
104 out:
105         spin_unlock(&index_init_lock);
106         return ret;
107 }
108 #else /* !SPARSEMEM_EXTREME */
109 static inline int sparse_index_init(unsigned long section_nr, int nid)
110 {
111         return 0;
112 }
113 #endif
114
115 /*
116  * Although written for the SPARSEMEM_EXTREME case, this happens
117  * to also work for the flat array case because
118  * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
119  */
120 int __section_nr(struct mem_section* ms)
121 {
122         unsigned long root_nr;
123         struct mem_section* root;
124
125         for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
126                 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
127                 if (!root)
128                         continue;
129
130                 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
131                      break;
132         }
133
134         return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
135 }
136
137 /*
138  * During early boot, before section_mem_map is used for an actual
139  * mem_map, we use section_mem_map to store the section's NUMA
140  * node.  This keeps us from having to use another data structure.  The
141  * node information is cleared just before we store the real mem_map.
142  */
143 static inline unsigned long sparse_encode_early_nid(int nid)
144 {
145         return (nid << SECTION_NID_SHIFT);
146 }
147
148 static inline int sparse_early_nid(struct mem_section *section)
149 {
150         return (section->section_mem_map >> SECTION_NID_SHIFT);
151 }
152
153 /* Validate the physical addressing limitations of the model */
154 void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
155                                                 unsigned long *end_pfn)
156 {
157         unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
158
159         /*
160          * Sanity checks - do not allow an architecture to pass
161          * in larger pfns than the maximum scope of sparsemem:
162          */
163         if (*start_pfn > max_sparsemem_pfn) {
164                 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
165                         "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
166                         *start_pfn, *end_pfn, max_sparsemem_pfn);
167                 WARN_ON_ONCE(1);
168                 *start_pfn = max_sparsemem_pfn;
169                 *end_pfn = max_sparsemem_pfn;
170         } else if (*end_pfn > max_sparsemem_pfn) {
171                 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
172                         "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
173                         *start_pfn, *end_pfn, max_sparsemem_pfn);
174                 WARN_ON_ONCE(1);
175                 *end_pfn = max_sparsemem_pfn;
176         }
177 }
178
179 /* Record a memory area against a node. */
180 void __init memory_present(int nid, unsigned long start, unsigned long end)
181 {
182         unsigned long pfn;
183
184         start &= PAGE_SECTION_MASK;
185         mminit_validate_memmodel_limits(&start, &end);
186         for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
187                 unsigned long section = pfn_to_section_nr(pfn);
188                 struct mem_section *ms;
189
190                 sparse_index_init(section, nid);
191                 set_section_nid(section, nid);
192
193                 ms = __nr_to_section(section);
194                 if (!ms->section_mem_map)
195                         ms->section_mem_map = sparse_encode_early_nid(nid) |
196                                                         SECTION_MARKED_PRESENT;
197         }
198 }
199
200 /*
201  * Only used by the i386 NUMA architecures, but relatively
202  * generic code.
203  */
204 unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
205                                                      unsigned long end_pfn)
206 {
207         unsigned long pfn;
208         unsigned long nr_pages = 0;
209
210         mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
211         for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
212                 if (nid != early_pfn_to_nid(pfn))
213                         continue;
214
215                 if (pfn_present(pfn))
216                         nr_pages += PAGES_PER_SECTION;
217         }
218
219         return nr_pages * sizeof(struct page);
220 }
221
222 /*
223  * Subtle, we encode the real pfn into the mem_map such that
224  * the identity pfn - section_mem_map will return the actual
225  * physical page frame number.
226  */
227 static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
228 {
229         return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
230 }
231
232 /*
233  * Decode mem_map from the coded memmap
234  */
235 struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
236 {
237         /* mask off the extra low bits of information */
238         coded_mem_map &= SECTION_MAP_MASK;
239         return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
240 }
241
242 static int __meminit sparse_init_one_section(struct mem_section *ms,
243                 unsigned long pnum, struct page *mem_map,
244                 unsigned long *pageblock_bitmap)
245 {
246         if (!present_section(ms))
247                 return -EINVAL;
248
249         ms->section_mem_map &= ~SECTION_MAP_MASK;
250         ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
251                                                         SECTION_HAS_MEM_MAP;
252         ms->pageblock_flags = pageblock_bitmap;
253
254         return 1;
255 }
256
257 unsigned long usemap_size(void)
258 {
259         unsigned long size_bytes;
260         size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
261         size_bytes = roundup(size_bytes, sizeof(unsigned long));
262         return size_bytes;
263 }
264
265 #ifdef CONFIG_MEMORY_HOTPLUG
266 static unsigned long *__kmalloc_section_usemap(void)
267 {
268         return kmalloc(usemap_size(), GFP_KERNEL);
269 }
270 #endif /* CONFIG_MEMORY_HOTPLUG */
271
272 #ifdef CONFIG_MEMORY_HOTREMOVE
273 static unsigned long * __init
274 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
275                                          unsigned long count)
276 {
277         unsigned long section_nr;
278
279         /*
280          * A page may contain usemaps for other sections preventing the
281          * page being freed and making a section unremovable while
282          * other sections referencing the usemap retmain active. Similarly,
283          * a pgdat can prevent a section being removed. If section A
284          * contains a pgdat and section B contains the usemap, both
285          * sections become inter-dependent. This allocates usemaps
286          * from the same section as the pgdat where possible to avoid
287          * this problem.
288          */
289         section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
290         return alloc_bootmem_section(usemap_size() * count, section_nr);
291 }
292
293 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
294 {
295         unsigned long usemap_snr, pgdat_snr;
296         static unsigned long old_usemap_snr = NR_MEM_SECTIONS;
297         static unsigned long old_pgdat_snr = NR_MEM_SECTIONS;
298         struct pglist_data *pgdat = NODE_DATA(nid);
299         int usemap_nid;
300
301         usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
302         pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
303         if (usemap_snr == pgdat_snr)
304                 return;
305
306         if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
307                 /* skip redundant message */
308                 return;
309
310         old_usemap_snr = usemap_snr;
311         old_pgdat_snr = pgdat_snr;
312
313         usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
314         if (usemap_nid != nid) {
315                 printk(KERN_INFO
316                        "node %d must be removed before remove section %ld\n",
317                        nid, usemap_snr);
318                 return;
319         }
320         /*
321          * There is a circular dependency.
322          * Some platforms allow un-removable section because they will just
323          * gather other removable sections for dynamic partitioning.
324          * Just notify un-removable section's number here.
325          */
326         printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr,
327                pgdat_snr, nid);
328         printk(KERN_CONT
329                " have a circular dependency on usemap and pgdat allocations\n");
330 }
331 #else
332 static unsigned long * __init
333 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
334                                          unsigned long count)
335 {
336         return NULL;
337 }
338
339 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
340 {
341 }
342 #endif /* CONFIG_MEMORY_HOTREMOVE */
343
344 static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
345                                  unsigned long pnum_begin,
346                                  unsigned long pnum_end,
347                                  unsigned long usemap_count, int nodeid)
348 {
349         void *usemap;
350         unsigned long pnum;
351         int size = usemap_size();
352
353         usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
354                                                                  usemap_count);
355         if (usemap) {
356                 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
357                         if (!present_section_nr(pnum))
358                                 continue;
359                         usemap_map[pnum] = usemap;
360                         usemap += size;
361                 }
362                 return;
363         }
364
365         usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count);
366         if (usemap) {
367                 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
368                         if (!present_section_nr(pnum))
369                                 continue;
370                         usemap_map[pnum] = usemap;
371                         usemap += size;
372                         check_usemap_section_nr(nodeid, usemap_map[pnum]);
373                 }
374                 return;
375         }
376
377         printk(KERN_WARNING "%s: allocation failed\n", __func__);
378 }
379
380 #ifndef CONFIG_SPARSEMEM_VMEMMAP
381 struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
382 {
383         struct page *map;
384
385         map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
386         if (map)
387                 return map;
388
389         map = alloc_bootmem_pages_node(NODE_DATA(nid),
390                        PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION));
391         return map;
392 }
393 void __init sparse_mem_maps_populate_node(struct page **map_map,
394                                           unsigned long pnum_begin,
395                                           unsigned long pnum_end,
396                                           unsigned long map_count, int nodeid)
397 {
398         void *map;
399         unsigned long pnum;
400         unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
401
402         map = alloc_remap(nodeid, size * map_count);
403         if (map) {
404                 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
405                         if (!present_section_nr(pnum))
406                                 continue;
407                         map_map[pnum] = map;
408                         map += size;
409                 }
410                 return;
411         }
412
413         size = PAGE_ALIGN(size);
414         map = alloc_bootmem_pages_node(NODE_DATA(nodeid), size * map_count);
415         if (map) {
416                 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
417                         if (!present_section_nr(pnum))
418                                 continue;
419                         map_map[pnum] = map;
420                         map += size;
421                 }
422                 return;
423         }
424
425         /* fallback */
426         for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
427                 struct mem_section *ms;
428
429                 if (!present_section_nr(pnum))
430                         continue;
431                 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
432                 if (map_map[pnum])
433                         continue;
434                 ms = __nr_to_section(pnum);
435                 printk(KERN_ERR "%s: sparsemem memory map backing failed "
436                         "some memory will not be available.\n", __func__);
437                 ms->section_mem_map = 0;
438         }
439 }
440 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
441
442 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
443 static void __init sparse_early_mem_maps_alloc_node(struct page **map_map,
444                                  unsigned long pnum_begin,
445                                  unsigned long pnum_end,
446                                  unsigned long map_count, int nodeid)
447 {
448         sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
449                                          map_count, nodeid);
450 }
451 #else
452 static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
453 {
454         struct page *map;
455         struct mem_section *ms = __nr_to_section(pnum);
456         int nid = sparse_early_nid(ms);
457
458         map = sparse_mem_map_populate(pnum, nid);
459         if (map)
460                 return map;
461
462         printk(KERN_ERR "%s: sparsemem memory map backing failed "
463                         "some memory will not be available.\n", __func__);
464         ms->section_mem_map = 0;
465         return NULL;
466 }
467 #endif
468
469 void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
470 {
471 }
472
473 /*
474  * Allocate the accumulated non-linear sections, allocate a mem_map
475  * for each and record the physical to section mapping.
476  */
477 void __init sparse_init(void)
478 {
479         unsigned long pnum;
480         struct page *map;
481         unsigned long *usemap;
482         unsigned long **usemap_map;
483         int size;
484         int nodeid_begin = 0;
485         unsigned long pnum_begin = 0;
486         unsigned long usemap_count;
487 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
488         unsigned long map_count;
489         int size2;
490         struct page **map_map;
491 #endif
492
493         /*
494          * map is using big page (aka 2M in x86 64 bit)
495          * usemap is less one page (aka 24 bytes)
496          * so alloc 2M (with 2M align) and 24 bytes in turn will
497          * make next 2M slip to one more 2M later.
498          * then in big system, the memory will have a lot of holes...
499          * here try to allocate 2M pages continously.
500          *
501          * powerpc need to call sparse_init_one_section right after each
502          * sparse_early_mem_map_alloc, so allocate usemap_map at first.
503          */
504         size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
505         usemap_map = alloc_bootmem(size);
506         if (!usemap_map)
507                 panic("can not allocate usemap_map\n");
508
509         for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
510                 struct mem_section *ms;
511
512                 if (!present_section_nr(pnum))
513                         continue;
514                 ms = __nr_to_section(pnum);
515                 nodeid_begin = sparse_early_nid(ms);
516                 pnum_begin = pnum;
517                 break;
518         }
519         usemap_count = 1;
520         for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
521                 struct mem_section *ms;
522                 int nodeid;
523
524                 if (!present_section_nr(pnum))
525                         continue;
526                 ms = __nr_to_section(pnum);
527                 nodeid = sparse_early_nid(ms);
528                 if (nodeid == nodeid_begin) {
529                         usemap_count++;
530                         continue;
531                 }
532                 /* ok, we need to take cake of from pnum_begin to pnum - 1*/
533                 sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum,
534                                                  usemap_count, nodeid_begin);
535                 /* new start, update count etc*/
536                 nodeid_begin = nodeid;
537                 pnum_begin = pnum;
538                 usemap_count = 1;
539         }
540         /* ok, last chunk */
541         sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS,
542                                          usemap_count, nodeid_begin);
543
544 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
545         size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
546         map_map = alloc_bootmem(size2);
547         if (!map_map)
548                 panic("can not allocate map_map\n");
549
550         for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
551                 struct mem_section *ms;
552
553                 if (!present_section_nr(pnum))
554                         continue;
555                 ms = __nr_to_section(pnum);
556                 nodeid_begin = sparse_early_nid(ms);
557                 pnum_begin = pnum;
558                 break;
559         }
560         map_count = 1;
561         for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
562                 struct mem_section *ms;
563                 int nodeid;
564
565                 if (!present_section_nr(pnum))
566                         continue;
567                 ms = __nr_to_section(pnum);
568                 nodeid = sparse_early_nid(ms);
569                 if (nodeid == nodeid_begin) {
570                         map_count++;
571                         continue;
572                 }
573                 /* ok, we need to take cake of from pnum_begin to pnum - 1*/
574                 sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum,
575                                                  map_count, nodeid_begin);
576                 /* new start, update count etc*/
577                 nodeid_begin = nodeid;
578                 pnum_begin = pnum;
579                 map_count = 1;
580         }
581         /* ok, last chunk */
582         sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS,
583                                          map_count, nodeid_begin);
584 #endif
585
586         for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
587                 if (!present_section_nr(pnum))
588                         continue;
589
590                 usemap = usemap_map[pnum];
591                 if (!usemap)
592                         continue;
593
594 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
595                 map = map_map[pnum];
596 #else
597                 map = sparse_early_mem_map_alloc(pnum);
598 #endif
599                 if (!map)
600                         continue;
601
602                 sparse_init_one_section(__nr_to_section(pnum), pnum, map,
603                                                                 usemap);
604         }
605
606         vmemmap_populate_print_last();
607
608 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
609         free_bootmem(__pa(map_map), size2);
610 #endif
611         free_bootmem(__pa(usemap_map), size);
612 }
613
614 #ifdef CONFIG_MEMORY_HOTPLUG
615 #ifdef CONFIG_SPARSEMEM_VMEMMAP
616 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
617                                                  unsigned long nr_pages)
618 {
619         /* This will make the necessary allocations eventually. */
620         return sparse_mem_map_populate(pnum, nid);
621 }
622 static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
623 {
624         return; /* XXX: Not implemented yet */
625 }
626 static void free_map_bootmem(struct page *page, unsigned long nr_pages)
627 {
628 }
629 #else
630 static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
631 {
632         struct page *page, *ret;
633         unsigned long memmap_size = sizeof(struct page) * nr_pages;
634
635         page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
636         if (page)
637                 goto got_map_page;
638
639         ret = vmalloc(memmap_size);
640         if (ret)
641                 goto got_map_ptr;
642
643         return NULL;
644 got_map_page:
645         ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
646 got_map_ptr:
647         memset(ret, 0, memmap_size);
648
649         return ret;
650 }
651
652 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
653                                                   unsigned long nr_pages)
654 {
655         return __kmalloc_section_memmap(nr_pages);
656 }
657
658 static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
659 {
660         if (is_vmalloc_addr(memmap))
661                 vfree(memmap);
662         else
663                 free_pages((unsigned long)memmap,
664                            get_order(sizeof(struct page) * nr_pages));
665 }
666
667 static void free_map_bootmem(struct page *page, unsigned long nr_pages)
668 {
669         unsigned long maps_section_nr, removing_section_nr, i;
670         int magic;
671
672         for (i = 0; i < nr_pages; i++, page++) {
673                 magic = atomic_read(&page->_mapcount);
674
675                 BUG_ON(magic == NODE_INFO);
676
677                 maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
678                 removing_section_nr = page->private;
679
680                 /*
681                  * When this function is called, the removing section is
682                  * logical offlined state. This means all pages are isolated
683                  * from page allocator. If removing section's memmap is placed
684                  * on the same section, it must not be freed.
685                  * If it is freed, page allocator may allocate it which will
686                  * be removed physically soon.
687                  */
688                 if (maps_section_nr != removing_section_nr)
689                         put_page_bootmem(page);
690         }
691 }
692 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
693
694 static void free_section_usemap(struct page *memmap, unsigned long *usemap)
695 {
696         struct page *usemap_page;
697         unsigned long nr_pages;
698
699         if (!usemap)
700                 return;
701
702         usemap_page = virt_to_page(usemap);
703         /*
704          * Check to see if allocation came from hot-plug-add
705          */
706         if (PageSlab(usemap_page)) {
707                 kfree(usemap);
708                 if (memmap)
709                         __kfree_section_memmap(memmap, PAGES_PER_SECTION);
710                 return;
711         }
712
713         /*
714          * The usemap came from bootmem. This is packed with other usemaps
715          * on the section which has pgdat at boot time. Just keep it as is now.
716          */
717
718         if (memmap) {
719                 struct page *memmap_page;
720                 memmap_page = virt_to_page(memmap);
721
722                 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
723                         >> PAGE_SHIFT;
724
725                 free_map_bootmem(memmap_page, nr_pages);
726         }
727 }
728
729 /*
730  * returns the number of sections whose mem_maps were properly
731  * set.  If this is <=0, then that means that the passed-in
732  * map was not consumed and must be freed.
733  */
734 int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
735                            int nr_pages)
736 {
737         unsigned long section_nr = pfn_to_section_nr(start_pfn);
738         struct pglist_data *pgdat = zone->zone_pgdat;
739         struct mem_section *ms;
740         struct page *memmap;
741         unsigned long *usemap;
742         unsigned long flags;
743         int ret;
744
745         /*
746          * no locking for this, because it does its own
747          * plus, it does a kmalloc
748          */
749         ret = sparse_index_init(section_nr, pgdat->node_id);
750         if (ret < 0 && ret != -EEXIST)
751                 return ret;
752         memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
753         if (!memmap)
754                 return -ENOMEM;
755         usemap = __kmalloc_section_usemap();
756         if (!usemap) {
757                 __kfree_section_memmap(memmap, nr_pages);
758                 return -ENOMEM;
759         }
760
761         pgdat_resize_lock(pgdat, &flags);
762
763         ms = __pfn_to_section(start_pfn);
764         if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
765                 ret = -EEXIST;
766                 goto out;
767         }
768
769         ms->section_mem_map |= SECTION_MARKED_PRESENT;
770
771         ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
772
773 out:
774         pgdat_resize_unlock(pgdat, &flags);
775         if (ret <= 0) {
776                 kfree(usemap);
777                 __kfree_section_memmap(memmap, nr_pages);
778         }
779         return ret;
780 }
781
782 void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
783 {
784         struct page *memmap = NULL;
785         unsigned long *usemap = NULL;
786
787         if (ms->section_mem_map) {
788                 usemap = ms->pageblock_flags;
789                 memmap = sparse_decode_mem_map(ms->section_mem_map,
790                                                 __section_nr(ms));
791                 ms->section_mem_map = 0;
792                 ms->pageblock_flags = NULL;
793         }
794
795         free_section_usemap(memmap, usemap);
796 }
797 #endif