sh: Isolate uncached mapping support.
[safe/jmp/linux-2.6] / arch / sh / mm / init.c
1 /*
2  * linux/arch/sh/mm/init.c
3  *
4  *  Copyright (C) 1999  Niibe Yutaka
5  *  Copyright (C) 2002 - 2007  Paul Mundt
6  *
7  *  Based on linux/arch/i386/mm/init.c:
8  *   Copyright (C) 1995  Linus Torvalds
9  */
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
14 #include <linux/proc_fs.h>
15 #include <linux/pagemap.h>
16 #include <linux/percpu.h>
17 #include <linux/io.h>
18 #include <linux/dma-mapping.h>
19 #include <asm/mmu_context.h>
20 #include <asm/tlb.h>
21 #include <asm/cacheflush.h>
22 #include <asm/sections.h>
23 #include <asm/cache.h>
24 #include <asm/sizes.h>
25
26 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
27 pgd_t swapper_pg_dir[PTRS_PER_PGD];
28
29 #ifdef CONFIG_UNCACHED_MAPPING
30 /*
31  * This is the offset of the uncached section from its cached alias.
32  *
33  * Legacy platforms handle trivial transitions between cached and
34  * uncached segments by making use of the 1:1 mapping relationship in
35  * 512MB lowmem, others via a special uncached mapping.
36  *
37  * Default value only valid in 29 bit mode, in 32bit mode this will be
38  * updated by the early PMB initialization code.
39  */
40 unsigned long cached_to_uncached = 0x20000000;
41 unsigned long uncached_size = SZ_512M;
42 #endif
43
44 #ifdef CONFIG_MMU
45 static pte_t *__get_pte_phys(unsigned long addr)
46 {
47         pgd_t *pgd;
48         pud_t *pud;
49         pmd_t *pmd;
50         pte_t *pte;
51
52         pgd = pgd_offset_k(addr);
53         if (pgd_none(*pgd)) {
54                 pgd_ERROR(*pgd);
55                 return NULL;
56         }
57
58         pud = pud_alloc(NULL, pgd, addr);
59         if (unlikely(!pud)) {
60                 pud_ERROR(*pud);
61                 return NULL;
62         }
63
64         pmd = pmd_alloc(NULL, pud, addr);
65         if (unlikely(!pmd)) {
66                 pmd_ERROR(*pmd);
67                 return NULL;
68         }
69
70         pte = pte_offset_kernel(pmd, addr);
71         return pte;
72 }
73
74 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
75 {
76         pte_t *pte;
77
78         pte = __get_pte_phys(addr);
79         if (!pte_none(*pte)) {
80                 pte_ERROR(*pte);
81                 return;
82         }
83
84         set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
85         local_flush_tlb_one(get_asid(), addr);
86
87         if (pgprot_val(prot) & _PAGE_WIRED)
88                 tlb_wire_entry(NULL, addr, *pte);
89 }
90
91 static void clear_pte_phys(unsigned long addr, pgprot_t prot)
92 {
93         pte_t *pte;
94
95         pte = __get_pte_phys(addr);
96
97         if (pgprot_val(prot) & _PAGE_WIRED)
98                 tlb_unwire_entry();
99
100         set_pte(pte, pfn_pte(0, __pgprot(0)));
101         local_flush_tlb_one(get_asid(), addr);
102 }
103
104 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
105 {
106         unsigned long address = __fix_to_virt(idx);
107
108         if (idx >= __end_of_fixed_addresses) {
109                 BUG();
110                 return;
111         }
112
113         set_pte_phys(address, phys, prot);
114 }
115
116 void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
117 {
118         unsigned long address = __fix_to_virt(idx);
119
120         if (idx >= __end_of_fixed_addresses) {
121                 BUG();
122                 return;
123         }
124
125         clear_pte_phys(address, prot);
126 }
127
128 void __init page_table_range_init(unsigned long start, unsigned long end,
129                                          pgd_t *pgd_base)
130 {
131         pgd_t *pgd;
132         pud_t *pud;
133         pmd_t *pmd;
134         pte_t *pte;
135         int i, j, k;
136         unsigned long vaddr;
137
138         vaddr = start;
139         i = __pgd_offset(vaddr);
140         j = __pud_offset(vaddr);
141         k = __pmd_offset(vaddr);
142         pgd = pgd_base + i;
143
144         for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
145                 pud = (pud_t *)pgd;
146                 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
147 #ifdef __PAGETABLE_PMD_FOLDED
148                         pmd = (pmd_t *)pud;
149 #else
150                         pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
151                         pud_populate(&init_mm, pud, pmd);
152                         pmd += k;
153 #endif
154                         for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
155                                 if (pmd_none(*pmd)) {
156                                         pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
157                                         pmd_populate_kernel(&init_mm, pmd, pte);
158                                         BUG_ON(pte != pte_offset_kernel(pmd, 0));
159                                 }
160                                 vaddr += PMD_SIZE;
161                         }
162                         k = 0;
163                 }
164                 j = 0;
165         }
166 }
167 #endif  /* CONFIG_MMU */
168
169 /*
170  * paging_init() sets up the page tables
171  */
172 void __init paging_init(void)
173 {
174         unsigned long max_zone_pfns[MAX_NR_ZONES];
175         unsigned long vaddr, end;
176         int nid;
177
178         /* We don't need to map the kernel through the TLB, as
179          * it is permanatly mapped using P1. So clear the
180          * entire pgd. */
181         memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
182
183         /* Set an initial value for the MMU.TTB so we don't have to
184          * check for a null value. */
185         set_TTB(swapper_pg_dir);
186
187         /*
188          * Populate the relevant portions of swapper_pg_dir so that
189          * we can use the fixmap entries without calling kmalloc.
190          * pte's will be filled in by __set_fixmap().
191          */
192         vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
193         end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
194         page_table_range_init(vaddr, end, swapper_pg_dir);
195
196         kmap_coherent_init();
197
198         memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
199
200         for_each_online_node(nid) {
201                 pg_data_t *pgdat = NODE_DATA(nid);
202                 unsigned long low, start_pfn;
203
204                 start_pfn = pgdat->bdata->node_min_pfn;
205                 low = pgdat->bdata->node_low_pfn;
206
207                 if (max_zone_pfns[ZONE_NORMAL] < low)
208                         max_zone_pfns[ZONE_NORMAL] = low;
209
210                 printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
211                        nid, start_pfn, low);
212         }
213
214         free_area_init_nodes(max_zone_pfns);
215 }
216
217 /*
218  * Early initialization for any I/O MMUs we might have.
219  */
220 static void __init iommu_init(void)
221 {
222         no_iommu_init();
223 }
224
225 unsigned int mem_init_done = 0;
226
227 void __init mem_init(void)
228 {
229         int codesize, datasize, initsize;
230         int nid;
231
232         iommu_init();
233
234         num_physpages = 0;
235         high_memory = NULL;
236
237         for_each_online_node(nid) {
238                 pg_data_t *pgdat = NODE_DATA(nid);
239                 unsigned long node_pages = 0;
240                 void *node_high_memory;
241
242                 num_physpages += pgdat->node_present_pages;
243
244                 if (pgdat->node_spanned_pages)
245                         node_pages = free_all_bootmem_node(pgdat);
246
247                 totalram_pages += node_pages;
248
249                 node_high_memory = (void *)__va((pgdat->node_start_pfn +
250                                                  pgdat->node_spanned_pages) <<
251                                                  PAGE_SHIFT);
252                 if (node_high_memory > high_memory)
253                         high_memory = node_high_memory;
254         }
255
256         /* Set this up early, so we can take care of the zero page */
257         cpu_cache_init();
258
259         /* clear the zero-page */
260         memset(empty_zero_page, 0, PAGE_SIZE);
261         __flush_wback_region(empty_zero_page, PAGE_SIZE);
262
263         /* Initialize the vDSO */
264         vsyscall_init();
265
266         codesize =  (unsigned long) &_etext - (unsigned long) &_text;
267         datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
268         initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
269
270         printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
271                "%dk data, %dk init)\n",
272                 nr_free_pages() << (PAGE_SHIFT-10),
273                 num_physpages << (PAGE_SHIFT-10),
274                 codesize >> 10,
275                 datasize >> 10,
276                 initsize >> 10);
277
278         printk(KERN_INFO "virtual kernel memory layout:\n"
279                 "    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
280 #ifdef CONFIG_HIGHMEM
281                 "    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
282 #endif
283                 "    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
284                 "    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
285 #ifdef CONFIG_UNCACHED_MAPPING
286                 "            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
287 #endif
288                 "      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
289                 "      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
290                 "      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
291                 FIXADDR_START, FIXADDR_TOP,
292                 (FIXADDR_TOP - FIXADDR_START) >> 10,
293
294 #ifdef CONFIG_HIGHMEM
295                 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
296                 (LAST_PKMAP*PAGE_SIZE) >> 10,
297 #endif
298
299                 (unsigned long)VMALLOC_START, VMALLOC_END,
300                 (VMALLOC_END - VMALLOC_START) >> 20,
301
302                 (unsigned long)memory_start, (unsigned long)high_memory,
303                 ((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
304
305 #ifdef CONFIG_UNCACHED_MAPPING
306                 (unsigned long)memory_start + cached_to_uncached,
307                 (unsigned long)memory_start + cached_to_uncached + uncached_size,
308                 uncached_size >> 20,
309 #endif
310
311                 (unsigned long)&__init_begin, (unsigned long)&__init_end,
312                 ((unsigned long)&__init_end -
313                  (unsigned long)&__init_begin) >> 10,
314
315                 (unsigned long)&_etext, (unsigned long)&_edata,
316                 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
317
318                 (unsigned long)&_text, (unsigned long)&_etext,
319                 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
320
321         mem_init_done = 1;
322 }
323
324 void free_initmem(void)
325 {
326         unsigned long addr;
327
328         addr = (unsigned long)(&__init_begin);
329         for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
330                 ClearPageReserved(virt_to_page(addr));
331                 init_page_count(virt_to_page(addr));
332                 free_page(addr);
333                 totalram_pages++;
334         }
335         printk("Freeing unused kernel memory: %ldk freed\n",
336                ((unsigned long)&__init_end -
337                 (unsigned long)&__init_begin) >> 10);
338 }
339
340 #ifdef CONFIG_BLK_DEV_INITRD
341 void free_initrd_mem(unsigned long start, unsigned long end)
342 {
343         unsigned long p;
344         for (p = start; p < end; p += PAGE_SIZE) {
345                 ClearPageReserved(virt_to_page(p));
346                 init_page_count(virt_to_page(p));
347                 free_page(p);
348                 totalram_pages++;
349         }
350         printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
351 }
352 #endif
353
354 #ifdef CONFIG_MEMORY_HOTPLUG
355 int arch_add_memory(int nid, u64 start, u64 size)
356 {
357         pg_data_t *pgdat;
358         unsigned long start_pfn = start >> PAGE_SHIFT;
359         unsigned long nr_pages = size >> PAGE_SHIFT;
360         int ret;
361
362         pgdat = NODE_DATA(nid);
363
364         /* We only have ZONE_NORMAL, so this is easy.. */
365         ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL,
366                                 start_pfn, nr_pages);
367         if (unlikely(ret))
368                 printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
369
370         return ret;
371 }
372 EXPORT_SYMBOL_GPL(arch_add_memory);
373
374 #ifdef CONFIG_NUMA
375 int memory_add_physaddr_to_nid(u64 addr)
376 {
377         /* Node 0 for now.. */
378         return 0;
379 }
380 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
381 #endif
382
383 #endif /* CONFIG_MEMORY_HOTPLUG */