2 * linux/arch/sh/mm/init.c
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2002 - 2010 Paul Mundt
7 * Based on linux/arch/i386/mm/init.c:
8 * Copyright (C) 1995 Linus Torvalds
11 #include <linux/swap.h>
12 #include <linux/init.h>
13 #include <linux/gfp.h>
14 #include <linux/bootmem.h>
15 #include <linux/proc_fs.h>
16 #include <linux/pagemap.h>
17 #include <linux/percpu.h>
19 #include <linux/lmb.h>
20 #include <linux/dma-mapping.h>
21 #include <asm/mmu_context.h>
23 #include <asm/cacheflush.h>
24 #include <asm/sections.h>
25 #include <asm/cache.h>
26 #include <asm/sizes.h>
28 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
29 pgd_t swapper_pg_dir[PTRS_PER_PGD];
31 void __init generic_mem_init(void)
33 lmb_add(__MEMORY_START, __MEMORY_SIZE);
37 static pte_t *__get_pte_phys(unsigned long addr)
44 pgd = pgd_offset_k(addr);
50 pud = pud_alloc(NULL, pgd, addr);
56 pmd = pmd_alloc(NULL, pud, addr);
62 pte = pte_offset_kernel(pmd, addr);
66 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
70 pte = __get_pte_phys(addr);
71 if (!pte_none(*pte)) {
76 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
77 local_flush_tlb_one(get_asid(), addr);
79 if (pgprot_val(prot) & _PAGE_WIRED)
80 tlb_wire_entry(NULL, addr, *pte);
83 static void clear_pte_phys(unsigned long addr, pgprot_t prot)
87 pte = __get_pte_phys(addr);
89 if (pgprot_val(prot) & _PAGE_WIRED)
92 set_pte(pte, pfn_pte(0, __pgprot(0)));
93 local_flush_tlb_one(get_asid(), addr);
96 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
98 unsigned long address = __fix_to_virt(idx);
100 if (idx >= __end_of_fixed_addresses) {
105 set_pte_phys(address, phys, prot);
108 void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
110 unsigned long address = __fix_to_virt(idx);
112 if (idx >= __end_of_fixed_addresses) {
117 clear_pte_phys(address, prot);
120 void __init page_table_range_init(unsigned long start, unsigned long end,
131 i = __pgd_offset(vaddr);
132 j = __pud_offset(vaddr);
133 k = __pmd_offset(vaddr);
136 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
138 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
139 #ifdef __PAGETABLE_PMD_FOLDED
142 pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
143 pud_populate(&init_mm, pud, pmd);
146 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
147 if (pmd_none(*pmd)) {
148 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
149 pmd_populate_kernel(&init_mm, pmd, pte);
150 BUG_ON(pte != pte_offset_kernel(pmd, 0));
159 #endif /* CONFIG_MMU */
162 * paging_init() sets up the page tables
164 void __init paging_init(void)
166 unsigned long max_zone_pfns[MAX_NR_ZONES];
167 unsigned long vaddr, end;
170 /* We don't need to map the kernel through the TLB, as
171 * it is permanatly mapped using P1. So clear the
173 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
175 /* Set an initial value for the MMU.TTB so we don't have to
176 * check for a null value. */
177 set_TTB(swapper_pg_dir);
180 * Populate the relevant portions of swapper_pg_dir so that
181 * we can use the fixmap entries without calling kmalloc.
182 * pte's will be filled in by __set_fixmap().
184 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
185 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
186 page_table_range_init(vaddr, end, swapper_pg_dir);
188 kmap_coherent_init();
190 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
192 for_each_online_node(nid) {
193 pg_data_t *pgdat = NODE_DATA(nid);
194 unsigned long low, start_pfn;
196 start_pfn = pgdat->bdata->node_min_pfn;
197 low = pgdat->bdata->node_low_pfn;
199 if (max_zone_pfns[ZONE_NORMAL] < low)
200 max_zone_pfns[ZONE_NORMAL] = low;
202 printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
203 nid, start_pfn, low);
206 free_area_init_nodes(max_zone_pfns);
210 * Early initialization for any I/O MMUs we might have.
212 static void __init iommu_init(void)
217 unsigned int mem_init_done = 0;
219 void __init mem_init(void)
221 int codesize, datasize, initsize;
229 for_each_online_node(nid) {
230 pg_data_t *pgdat = NODE_DATA(nid);
231 unsigned long node_pages = 0;
232 void *node_high_memory;
234 num_physpages += pgdat->node_present_pages;
236 if (pgdat->node_spanned_pages)
237 node_pages = free_all_bootmem_node(pgdat);
239 totalram_pages += node_pages;
241 node_high_memory = (void *)__va((pgdat->node_start_pfn +
242 pgdat->node_spanned_pages) <<
244 if (node_high_memory > high_memory)
245 high_memory = node_high_memory;
248 /* Set this up early, so we can take care of the zero page */
251 /* clear the zero-page */
252 memset(empty_zero_page, 0, PAGE_SIZE);
253 __flush_wback_region(empty_zero_page, PAGE_SIZE);
257 codesize = (unsigned long) &_etext - (unsigned long) &_text;
258 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
259 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
261 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
262 "%dk data, %dk init)\n",
263 nr_free_pages() << (PAGE_SHIFT-10),
264 num_physpages << (PAGE_SHIFT-10),
269 printk(KERN_INFO "virtual kernel memory layout:\n"
270 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
271 #ifdef CONFIG_HIGHMEM
272 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
274 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
275 " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n"
276 #ifdef CONFIG_UNCACHED_MAPPING
277 " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n"
279 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
280 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
281 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
282 FIXADDR_START, FIXADDR_TOP,
283 (FIXADDR_TOP - FIXADDR_START) >> 10,
285 #ifdef CONFIG_HIGHMEM
286 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
287 (LAST_PKMAP*PAGE_SIZE) >> 10,
290 (unsigned long)VMALLOC_START, VMALLOC_END,
291 (VMALLOC_END - VMALLOC_START) >> 20,
293 (unsigned long)memory_start, (unsigned long)high_memory,
294 ((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
296 #ifdef CONFIG_UNCACHED_MAPPING
297 uncached_start, uncached_end, uncached_size >> 20,
300 (unsigned long)&__init_begin, (unsigned long)&__init_end,
301 ((unsigned long)&__init_end -
302 (unsigned long)&__init_begin) >> 10,
304 (unsigned long)&_etext, (unsigned long)&_edata,
305 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
307 (unsigned long)&_text, (unsigned long)&_etext,
308 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
313 void free_initmem(void)
317 addr = (unsigned long)(&__init_begin);
318 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
319 ClearPageReserved(virt_to_page(addr));
320 init_page_count(virt_to_page(addr));
324 printk("Freeing unused kernel memory: %ldk freed\n",
325 ((unsigned long)&__init_end -
326 (unsigned long)&__init_begin) >> 10);
329 #ifdef CONFIG_BLK_DEV_INITRD
330 void free_initrd_mem(unsigned long start, unsigned long end)
333 for (p = start; p < end; p += PAGE_SIZE) {
334 ClearPageReserved(virt_to_page(p));
335 init_page_count(virt_to_page(p));
339 printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
343 #ifdef CONFIG_MEMORY_HOTPLUG
344 int arch_add_memory(int nid, u64 start, u64 size)
347 unsigned long start_pfn = start >> PAGE_SHIFT;
348 unsigned long nr_pages = size >> PAGE_SHIFT;
351 pgdat = NODE_DATA(nid);
353 /* We only have ZONE_NORMAL, so this is easy.. */
354 ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL,
355 start_pfn, nr_pages);
357 printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
361 EXPORT_SYMBOL_GPL(arch_add_memory);
364 int memory_add_physaddr_to_nid(u64 addr)
366 /* Node 0 for now.. */
369 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
372 #endif /* CONFIG_MEMORY_HOTPLUG */