2 * linux/arch/sh/mm/init.c
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2002 - 2007 Paul Mundt
7 * Based on linux/arch/i386/mm/init.c:
8 * Copyright (C) 1995 Linus Torvalds
11 #include <linux/swap.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
14 #include <linux/proc_fs.h>
15 #include <linux/pagemap.h>
16 #include <linux/percpu.h>
18 #include <linux/dma-mapping.h>
19 #include <asm/mmu_context.h>
21 #include <asm/cacheflush.h>
22 #include <asm/sections.h>
23 #include <asm/cache.h>
25 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
26 pgd_t swapper_pg_dir[PTRS_PER_PGD];
28 #ifdef CONFIG_SUPERH32
30 * Handle trivial transitions between cached and uncached
31 * segments, making use of the 1:1 mapping relationship in
34 * This is the offset of the uncached section from its cached alias.
35 * Default value only valid in 29 bit mode, in 32bit mode will be
36 * overridden in pmb_init.
38 unsigned long cached_to_uncached = P2SEG - P1SEG;
42 static pte_t *__get_pte_phys(unsigned long addr)
49 pgd = pgd_offset_k(addr);
55 pud = pud_alloc(NULL, pgd, addr);
61 pmd = pmd_alloc(NULL, pud, addr);
67 pte = pte_offset_kernel(pmd, addr);
71 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
75 pte = __get_pte_phys(addr);
76 if (!pte_none(*pte)) {
81 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
82 local_flush_tlb_one(get_asid(), addr);
84 if (pgprot_val(prot) & _PAGE_WIRED)
85 tlb_wire_entry(NULL, addr, *pte);
88 static void clear_pte_phys(unsigned long addr, pgprot_t prot)
92 pte = __get_pte_phys(addr);
94 if (pgprot_val(prot) & _PAGE_WIRED)
97 set_pte(pte, pfn_pte(0, __pgprot(0)));
98 local_flush_tlb_one(get_asid(), addr);
102 * As a performance optimization, other platforms preserve the fixmap mapping
103 * across a context switch, we don't presently do this, but this could be done
104 * in a similar fashion as to the wired TLB interface that sh64 uses (by way
105 * of the memory mapped UTLB configuration) -- this unfortunately forces us to
106 * give up a TLB entry for each mapping we want to preserve. While this may be
107 * viable for a small number of fixmaps, it's not particularly useful for
108 * everything and needs to be carefully evaluated. (ie, we may want this for
109 * the vsyscall page).
111 * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass
112 * in at __set_fixmap() time to determine the appropriate behavior to follow.
116 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
118 unsigned long address = __fix_to_virt(idx);
120 if (idx >= __end_of_fixed_addresses) {
125 set_pte_phys(address, phys, prot);
128 void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
130 unsigned long address = __fix_to_virt(idx);
132 if (idx >= __end_of_fixed_addresses) {
137 clear_pte_phys(address, prot);
140 void __init page_table_range_init(unsigned long start, unsigned long end,
151 i = __pgd_offset(vaddr);
152 j = __pud_offset(vaddr);
153 k = __pmd_offset(vaddr);
156 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
158 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
159 #ifdef __PAGETABLE_PMD_FOLDED
162 pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
163 pud_populate(&init_mm, pud, pmd);
166 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
167 if (pmd_none(*pmd)) {
168 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
169 pmd_populate_kernel(&init_mm, pmd, pte);
170 BUG_ON(pte != pte_offset_kernel(pmd, 0));
179 #endif /* CONFIG_MMU */
182 * paging_init() sets up the page tables
184 void __init paging_init(void)
186 unsigned long max_zone_pfns[MAX_NR_ZONES];
187 unsigned long vaddr, end;
190 /* We don't need to map the kernel through the TLB, as
191 * it is permanatly mapped using P1. So clear the
193 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
195 /* Set an initial value for the MMU.TTB so we don't have to
196 * check for a null value. */
197 set_TTB(swapper_pg_dir);
200 * Populate the relevant portions of swapper_pg_dir so that
201 * we can use the fixmap entries without calling kmalloc.
202 * pte's will be filled in by __set_fixmap().
204 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
205 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
206 page_table_range_init(vaddr, end, swapper_pg_dir);
208 kmap_coherent_init();
210 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
212 for_each_online_node(nid) {
213 pg_data_t *pgdat = NODE_DATA(nid);
214 unsigned long low, start_pfn;
216 start_pfn = pgdat->bdata->node_min_pfn;
217 low = pgdat->bdata->node_low_pfn;
219 if (max_zone_pfns[ZONE_NORMAL] < low)
220 max_zone_pfns[ZONE_NORMAL] = low;
222 printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
223 nid, start_pfn, low);
226 free_area_init_nodes(max_zone_pfns);
228 /* Set up the uncached fixmap */
229 set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
233 * Early initialization for any I/O MMUs we might have.
235 static void __init iommu_init(void)
240 void __init mem_init(void)
242 int codesize, datasize, initsize;
250 for_each_online_node(nid) {
251 pg_data_t *pgdat = NODE_DATA(nid);
252 unsigned long node_pages = 0;
253 void *node_high_memory;
255 num_physpages += pgdat->node_present_pages;
257 if (pgdat->node_spanned_pages)
258 node_pages = free_all_bootmem_node(pgdat);
260 totalram_pages += node_pages;
262 node_high_memory = (void *)__va((pgdat->node_start_pfn +
263 pgdat->node_spanned_pages) <<
265 if (node_high_memory > high_memory)
266 high_memory = node_high_memory;
269 /* Set this up early, so we can take care of the zero page */
272 /* clear the zero-page */
273 memset(empty_zero_page, 0, PAGE_SIZE);
274 __flush_wback_region(empty_zero_page, PAGE_SIZE);
276 codesize = (unsigned long) &_etext - (unsigned long) &_text;
277 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
278 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
280 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
281 "%dk data, %dk init)\n",
282 nr_free_pages() << (PAGE_SHIFT-10),
283 num_physpages << (PAGE_SHIFT-10),
288 /* Initialize the vDSO */
292 void free_initmem(void)
296 addr = (unsigned long)(&__init_begin);
297 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
298 ClearPageReserved(virt_to_page(addr));
299 init_page_count(virt_to_page(addr));
303 printk("Freeing unused kernel memory: %ldk freed\n",
304 ((unsigned long)&__init_end -
305 (unsigned long)&__init_begin) >> 10);
308 #ifdef CONFIG_BLK_DEV_INITRD
309 void free_initrd_mem(unsigned long start, unsigned long end)
312 for (p = start; p < end; p += PAGE_SIZE) {
313 ClearPageReserved(virt_to_page(p));
314 init_page_count(virt_to_page(p));
318 printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
322 #ifdef CONFIG_MEMORY_HOTPLUG
323 int arch_add_memory(int nid, u64 start, u64 size)
326 unsigned long start_pfn = start >> PAGE_SHIFT;
327 unsigned long nr_pages = size >> PAGE_SHIFT;
330 pgdat = NODE_DATA(nid);
332 /* We only have ZONE_NORMAL, so this is easy.. */
333 ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL,
334 start_pfn, nr_pages);
336 printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
340 EXPORT_SYMBOL_GPL(arch_add_memory);
343 int memory_add_physaddr_to_nid(u64 addr)
345 /* Node 0 for now.. */
348 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
351 #endif /* CONFIG_MEMORY_HOTPLUG */
354 int __in_29bit_mode(void)
356 return !(ctrl_inl(PMB_PASCR) & PASCR_SE);
358 #endif /* CONFIG_PMB */