dffa6c74948981bc31796b14401539f713b7acee
[safe/jmp/linux-2.6] / arch / sh / mm / init.c
1 /*
2  * linux/arch/sh/mm/init.c
3  *
4  *  Copyright (C) 1999  Niibe Yutaka
5  *  Copyright (C) 2002 - 2007  Paul Mundt
6  *
7  *  Based on linux/arch/i386/mm/init.c:
8  *   Copyright (C) 1995  Linus Torvalds
9  */
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
14 #include <linux/proc_fs.h>
15 #include <linux/pagemap.h>
16 #include <linux/percpu.h>
17 #include <linux/io.h>
18 #include <linux/dma-mapping.h>
19 #include <asm/mmu_context.h>
20 #include <asm/tlb.h>
21 #include <asm/cacheflush.h>
22 #include <asm/sections.h>
23 #include <asm/cache.h>
24
25 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
26 pgd_t swapper_pg_dir[PTRS_PER_PGD];
27
28 #ifdef CONFIG_SUPERH32
29 /*
30  * This is the offset of the uncached section from its cached alias.
31  *
32  * Legacy platforms handle trivial transitions between cached and
33  * uncached segments by making use of the 1:1 mapping relationship in
34  * 512MB lowmem, others via a special uncached mapping.
35  *
36  * Default value only valid in 29 bit mode, in 32bit mode this will be
37  * updated by the early PMB initialization code.
38  */
39 unsigned long cached_to_uncached = P2SEG - P1SEG;
40 unsigned long uncached_size = 0x20000000;
41 #endif
42
43 #ifdef CONFIG_MMU
44 static pte_t *__get_pte_phys(unsigned long addr)
45 {
46         pgd_t *pgd;
47         pud_t *pud;
48         pmd_t *pmd;
49         pte_t *pte;
50
51         pgd = pgd_offset_k(addr);
52         if (pgd_none(*pgd)) {
53                 pgd_ERROR(*pgd);
54                 return NULL;
55         }
56
57         pud = pud_alloc(NULL, pgd, addr);
58         if (unlikely(!pud)) {
59                 pud_ERROR(*pud);
60                 return NULL;
61         }
62
63         pmd = pmd_alloc(NULL, pud, addr);
64         if (unlikely(!pmd)) {
65                 pmd_ERROR(*pmd);
66                 return NULL;
67         }
68
69         pte = pte_offset_kernel(pmd, addr);
70         return pte;
71 }
72
73 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
74 {
75         pte_t *pte;
76
77         pte = __get_pte_phys(addr);
78         if (!pte_none(*pte)) {
79                 pte_ERROR(*pte);
80                 return;
81         }
82
83         set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
84         local_flush_tlb_one(get_asid(), addr);
85
86         if (pgprot_val(prot) & _PAGE_WIRED)
87                 tlb_wire_entry(NULL, addr, *pte);
88 }
89
90 static void clear_pte_phys(unsigned long addr, pgprot_t prot)
91 {
92         pte_t *pte;
93
94         pte = __get_pte_phys(addr);
95
96         if (pgprot_val(prot) & _PAGE_WIRED)
97                 tlb_unwire_entry();
98
99         set_pte(pte, pfn_pte(0, __pgprot(0)));
100         local_flush_tlb_one(get_asid(), addr);
101 }
102
103 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
104 {
105         unsigned long address = __fix_to_virt(idx);
106
107         if (idx >= __end_of_fixed_addresses) {
108                 BUG();
109                 return;
110         }
111
112         set_pte_phys(address, phys, prot);
113 }
114
115 void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
116 {
117         unsigned long address = __fix_to_virt(idx);
118
119         if (idx >= __end_of_fixed_addresses) {
120                 BUG();
121                 return;
122         }
123
124         clear_pte_phys(address, prot);
125 }
126
127 void __init page_table_range_init(unsigned long start, unsigned long end,
128                                          pgd_t *pgd_base)
129 {
130         pgd_t *pgd;
131         pud_t *pud;
132         pmd_t *pmd;
133         pte_t *pte;
134         int i, j, k;
135         unsigned long vaddr;
136
137         vaddr = start;
138         i = __pgd_offset(vaddr);
139         j = __pud_offset(vaddr);
140         k = __pmd_offset(vaddr);
141         pgd = pgd_base + i;
142
143         for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
144                 pud = (pud_t *)pgd;
145                 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
146 #ifdef __PAGETABLE_PMD_FOLDED
147                         pmd = (pmd_t *)pud;
148 #else
149                         pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
150                         pud_populate(&init_mm, pud, pmd);
151                         pmd += k;
152 #endif
153                         for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
154                                 if (pmd_none(*pmd)) {
155                                         pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
156                                         pmd_populate_kernel(&init_mm, pmd, pte);
157                                         BUG_ON(pte != pte_offset_kernel(pmd, 0));
158                                 }
159                                 vaddr += PMD_SIZE;
160                         }
161                         k = 0;
162                 }
163                 j = 0;
164         }
165 }
166 #endif  /* CONFIG_MMU */
167
168 /*
169  * paging_init() sets up the page tables
170  */
171 void __init paging_init(void)
172 {
173         unsigned long max_zone_pfns[MAX_NR_ZONES];
174         unsigned long vaddr, end;
175         int nid;
176
177         /* We don't need to map the kernel through the TLB, as
178          * it is permanatly mapped using P1. So clear the
179          * entire pgd. */
180         memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
181
182         /* Set an initial value for the MMU.TTB so we don't have to
183          * check for a null value. */
184         set_TTB(swapper_pg_dir);
185
186         /*
187          * Populate the relevant portions of swapper_pg_dir so that
188          * we can use the fixmap entries without calling kmalloc.
189          * pte's will be filled in by __set_fixmap().
190          */
191         vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
192         end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
193         page_table_range_init(vaddr, end, swapper_pg_dir);
194
195         kmap_coherent_init();
196
197         memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
198
199         for_each_online_node(nid) {
200                 pg_data_t *pgdat = NODE_DATA(nid);
201                 unsigned long low, start_pfn;
202
203                 start_pfn = pgdat->bdata->node_min_pfn;
204                 low = pgdat->bdata->node_low_pfn;
205
206                 if (max_zone_pfns[ZONE_NORMAL] < low)
207                         max_zone_pfns[ZONE_NORMAL] = low;
208
209                 printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
210                        nid, start_pfn, low);
211         }
212
213         free_area_init_nodes(max_zone_pfns);
214 }
215
216 /*
217  * Early initialization for any I/O MMUs we might have.
218  */
219 static void __init iommu_init(void)
220 {
221         no_iommu_init();
222 }
223
224 unsigned int mem_init_done = 0;
225
226 void __init mem_init(void)
227 {
228         int codesize, datasize, initsize;
229         int nid;
230
231         iommu_init();
232
233         num_physpages = 0;
234         high_memory = NULL;
235
236         for_each_online_node(nid) {
237                 pg_data_t *pgdat = NODE_DATA(nid);
238                 unsigned long node_pages = 0;
239                 void *node_high_memory;
240
241                 num_physpages += pgdat->node_present_pages;
242
243                 if (pgdat->node_spanned_pages)
244                         node_pages = free_all_bootmem_node(pgdat);
245
246                 totalram_pages += node_pages;
247
248                 node_high_memory = (void *)__va((pgdat->node_start_pfn +
249                                                  pgdat->node_spanned_pages) <<
250                                                  PAGE_SHIFT);
251                 if (node_high_memory > high_memory)
252                         high_memory = node_high_memory;
253         }
254
255         /* Set this up early, so we can take care of the zero page */
256         cpu_cache_init();
257
258         /* clear the zero-page */
259         memset(empty_zero_page, 0, PAGE_SIZE);
260         __flush_wback_region(empty_zero_page, PAGE_SIZE);
261
262         /* Initialize the vDSO */
263         vsyscall_init();
264
265         codesize =  (unsigned long) &_etext - (unsigned long) &_text;
266         datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
267         initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
268
269         printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
270                "%dk data, %dk init)\n",
271                 nr_free_pages() << (PAGE_SHIFT-10),
272                 num_physpages << (PAGE_SHIFT-10),
273                 codesize >> 10,
274                 datasize >> 10,
275                 initsize >> 10);
276
277         printk(KERN_INFO "virtual kernel memory layout:\n"
278                 "    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
279 #ifdef CONFIG_HIGHMEM
280                 "    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
281 #endif
282                 "    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
283                 "    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
284                 "            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
285                 "      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
286                 "      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
287                 "      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
288                 FIXADDR_START, FIXADDR_TOP,
289                 (FIXADDR_TOP - FIXADDR_START) >> 10,
290
291 #ifdef CONFIG_HIGHMEM
292                 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
293                 (LAST_PKMAP*PAGE_SIZE) >> 10,
294 #endif
295
296                 (unsigned long)VMALLOC_START, VMALLOC_END,
297                 (VMALLOC_END - VMALLOC_START) >> 20,
298
299                 (unsigned long)memory_start, (unsigned long)high_memory,
300                 ((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
301
302                 (unsigned long)memory_start + cached_to_uncached,
303                 (unsigned long)memory_start + cached_to_uncached + uncached_size,
304                 uncached_size >> 20,
305
306                 (unsigned long)&__init_begin, (unsigned long)&__init_end,
307                 ((unsigned long)&__init_end -
308                  (unsigned long)&__init_begin) >> 10,
309
310                 (unsigned long)&_etext, (unsigned long)&_edata,
311                 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
312
313                 (unsigned long)&_text, (unsigned long)&_etext,
314                 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
315
316         mem_init_done = 1;
317 }
318
319 void free_initmem(void)
320 {
321         unsigned long addr;
322
323         addr = (unsigned long)(&__init_begin);
324         for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
325                 ClearPageReserved(virt_to_page(addr));
326                 init_page_count(virt_to_page(addr));
327                 free_page(addr);
328                 totalram_pages++;
329         }
330         printk("Freeing unused kernel memory: %ldk freed\n",
331                ((unsigned long)&__init_end -
332                 (unsigned long)&__init_begin) >> 10);
333 }
334
335 #ifdef CONFIG_BLK_DEV_INITRD
336 void free_initrd_mem(unsigned long start, unsigned long end)
337 {
338         unsigned long p;
339         for (p = start; p < end; p += PAGE_SIZE) {
340                 ClearPageReserved(virt_to_page(p));
341                 init_page_count(virt_to_page(p));
342                 free_page(p);
343                 totalram_pages++;
344         }
345         printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
346 }
347 #endif
348
349 #ifdef CONFIG_MEMORY_HOTPLUG
350 int arch_add_memory(int nid, u64 start, u64 size)
351 {
352         pg_data_t *pgdat;
353         unsigned long start_pfn = start >> PAGE_SHIFT;
354         unsigned long nr_pages = size >> PAGE_SHIFT;
355         int ret;
356
357         pgdat = NODE_DATA(nid);
358
359         /* We only have ZONE_NORMAL, so this is easy.. */
360         ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL,
361                                 start_pfn, nr_pages);
362         if (unlikely(ret))
363                 printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
364
365         return ret;
366 }
367 EXPORT_SYMBOL_GPL(arch_add_memory);
368
369 #ifdef CONFIG_NUMA
370 int memory_add_physaddr_to_nid(u64 addr)
371 {
372         /* Node 0 for now.. */
373         return 0;
374 }
375 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
376 #endif
377
378 #endif /* CONFIG_MEMORY_HOTPLUG */