[PATCH] sparsemem memory model
[safe/jmp/linux-2.6] / mm / sparse.c
1 /*
2  * sparse memory mappings.
3  */
4 #include <linux/config.h>
5 #include <linux/mm.h>
6 #include <linux/mmzone.h>
7 #include <linux/bootmem.h>
8 #include <linux/module.h>
9 #include <asm/dma.h>
10
11 /*
12  * Permanent SPARSEMEM data:
13  *
14  * 1) mem_section       - memory sections, mem_map's for valid memory
15  */
16 struct mem_section mem_section[NR_MEM_SECTIONS];
17 EXPORT_SYMBOL(mem_section);
18
19 /* Record a memory area against a node. */
20 void memory_present(int nid, unsigned long start, unsigned long end)
21 {
22         unsigned long pfn;
23
24         start &= PAGE_SECTION_MASK;
25         for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
26                 unsigned long section = pfn_to_section_nr(pfn);
27                 if (!mem_section[section].section_mem_map)
28                         mem_section[section].section_mem_map = (void *) -1;
29         }
30 }
31
32 /*
33  * Only used by the i386 NUMA architecures, but relatively
34  * generic code.
35  */
36 unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
37                                                      unsigned long end_pfn)
38 {
39         unsigned long pfn;
40         unsigned long nr_pages = 0;
41
42         for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
43                 if (nid != early_pfn_to_nid(pfn))
44                         continue;
45
46                 if (pfn_valid(pfn))
47                         nr_pages += PAGES_PER_SECTION;
48         }
49
50         return nr_pages * sizeof(struct page);
51 }
52
53 /*
54  * Allocate the accumulated non-linear sections, allocate a mem_map
55  * for each and record the physical to section mapping.
56  */
57 void sparse_init(void)
58 {
59         unsigned long pnum;
60         struct page *map;
61         int nid;
62
63         for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
64                 if (!mem_section[pnum].section_mem_map)
65                         continue;
66
67                 nid = early_pfn_to_nid(section_nr_to_pfn(pnum));
68                 map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
69                 if (!map)
70                         map = alloc_bootmem_node(NODE_DATA(nid),
71                                 sizeof(struct page) * PAGES_PER_SECTION);
72                 if (!map) {
73                         mem_section[pnum].section_mem_map = 0;
74                         continue;
75                 }
76
77                 /*
78                  * Subtle, we encode the real pfn into the mem_map such that
79                  * the identity pfn - section_mem_map will return the actual
80                  * physical page frame number.
81                  */
82                 mem_section[pnum].section_mem_map = map -
83                                                 section_nr_to_pfn(pnum);
84         }
85 }