60cc2161438f0ce6536fa7d69525e0fae866d7ac
[safe/jmp/linux-2.6] / arch / sh / kernel / setup.c
1 /*
2  * arch/sh/kernel/setup.c
3  *
4  * This file handles the architecture-dependent parts of initialization
5  *
6  *  Copyright (C) 1999  Niibe Yutaka
7  *  Copyright (C) 2002 - 2007 Paul Mundt
8  */
9 #include <linux/screen_info.h>
10 #include <linux/ioport.h>
11 #include <linux/init.h>
12 #include <linux/initrd.h>
13 #include <linux/bootmem.h>
14 #include <linux/console.h>
15 #include <linux/seq_file.h>
16 #include <linux/root_dev.h>
17 #include <linux/utsname.h>
18 #include <linux/nodemask.h>
19 #include <linux/cpu.h>
20 #include <linux/pfn.h>
21 #include <linux/fs.h>
22 #include <linux/mm.h>
23 #include <asm/uaccess.h>
24 #include <asm/io.h>
25 #include <asm/sections.h>
26 #include <asm/irq.h>
27 #include <asm/setup.h>
28 #include <asm/clock.h>
29 #include <asm/mmu_context.h>
30
31 extern void * __rd_start, * __rd_end;
32
33 /*
34  * Machine setup..
35  */
36
37 /*
38  * Initialize loops_per_jiffy as 10000000 (1000MIPS).
39  * This value will be used at the very early stage of serial setup.
40  * The bigger value means no problem.
41  */
42 struct sh_cpuinfo boot_cpu_data = { CPU_SH_NONE, 10000000, };
43 #ifdef CONFIG_VT
44 struct screen_info screen_info;
45 #endif
46
47 #if defined(CONFIG_SH_UNKNOWN)
48 struct sh_machine_vector sh_mv;
49 #endif
50
51 extern int root_mountflags;
52
53 #define MV_NAME_SIZE 32
54
55 static struct sh_machine_vector* __init get_mv_byname(const char* name);
56
57 /*
58  * This is set up by the setup-routine at boot-time
59  */
60 #define PARAM   ((unsigned char *)empty_zero_page)
61
62 #define MOUNT_ROOT_RDONLY (*(unsigned long *) (PARAM+0x000))
63 #define RAMDISK_FLAGS (*(unsigned long *) (PARAM+0x004))
64 #define ORIG_ROOT_DEV (*(unsigned long *) (PARAM+0x008))
65 #define LOADER_TYPE (*(unsigned long *) (PARAM+0x00c))
66 #define INITRD_START (*(unsigned long *) (PARAM+0x010))
67 #define INITRD_SIZE (*(unsigned long *) (PARAM+0x014))
68 /* ... */
69 #define COMMAND_LINE ((char *) (PARAM+0x100))
70
71 #define RAMDISK_IMAGE_START_MASK        0x07FF
72 #define RAMDISK_PROMPT_FLAG             0x8000
73 #define RAMDISK_LOAD_FLAG               0x4000
74
75 static char __initdata command_line[COMMAND_LINE_SIZE] = { 0, };
76
77 static struct resource code_resource = { .name = "Kernel code", };
78 static struct resource data_resource = { .name = "Kernel data", };
79
80 unsigned long memory_start, memory_end;
81
82 static inline void parse_cmdline (char ** cmdline_p, char mv_name[MV_NAME_SIZE],
83                                   struct sh_machine_vector** mvp,
84                                   unsigned long *mv_io_base)
85 {
86         char c = ' ', *to = command_line, *from = COMMAND_LINE;
87         int len = 0;
88
89         /* Save unparsed command line copy for /proc/cmdline */
90         memcpy(boot_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
91         boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
92
93         memory_start = (unsigned long)PAGE_OFFSET+__MEMORY_START;
94         memory_end = memory_start + __MEMORY_SIZE;
95
96         for (;;) {
97                 /*
98                  * "mem=XXX[kKmM]" defines a size of memory.
99                  */
100                 if (c == ' ' && !memcmp(from, "mem=", 4)) {
101                         if (to != command_line)
102                                 to--;
103                         {
104                                 unsigned long mem_size;
105
106                                 mem_size = memparse(from+4, &from);
107                                 memory_end = memory_start + mem_size;
108                         }
109                 }
110
111                 if (c == ' ' && !memcmp(from, "sh_mv=", 6)) {
112                         char* mv_end;
113                         char* mv_comma;
114                         int mv_len;
115                         if (to != command_line)
116                                 to--;
117                         from += 6;
118                         mv_end = strchr(from, ' ');
119                         if (mv_end == NULL)
120                                 mv_end = from + strlen(from);
121
122                         mv_comma = strchr(from, ',');
123                         if ((mv_comma != NULL) && (mv_comma < mv_end)) {
124                                 int ints[3];
125                                 get_options(mv_comma+1, ARRAY_SIZE(ints), ints);
126                                 *mv_io_base = ints[1];
127                                 mv_len = mv_comma - from;
128                         } else {
129                                 mv_len = mv_end - from;
130                         }
131                         if (mv_len > (MV_NAME_SIZE-1))
132                                 mv_len = MV_NAME_SIZE-1;
133                         memcpy(mv_name, from, mv_len);
134                         mv_name[mv_len] = '\0';
135                         from = mv_end;
136
137                         *mvp = get_mv_byname(mv_name);
138                 }
139
140                 c = *(from++);
141                 if (!c)
142                         break;
143                 if (COMMAND_LINE_SIZE <= ++len)
144                         break;
145                 *(to++) = c;
146         }
147         *to = '\0';
148         *cmdline_p = command_line;
149 }
150
151 static int __init sh_mv_setup(char **cmdline_p)
152 {
153 #ifdef CONFIG_SH_UNKNOWN
154         extern struct sh_machine_vector mv_unknown;
155 #endif
156         struct sh_machine_vector *mv = NULL;
157         char mv_name[MV_NAME_SIZE] = "";
158         unsigned long mv_io_base = 0;
159
160         parse_cmdline(cmdline_p, mv_name, &mv, &mv_io_base);
161
162 #ifdef CONFIG_SH_UNKNOWN
163         if (mv == NULL) {
164                 mv = &mv_unknown;
165                 if (*mv_name != '\0') {
166                         printk("Warning: Unsupported machine %s, using unknown\n",
167                                mv_name);
168                 }
169         }
170         sh_mv = *mv;
171 #endif
172
173         /*
174          * Manually walk the vec, fill in anything that the board hasn't yet
175          * by hand, wrapping to the generic implementation.
176          */
177 #define mv_set(elem) do { \
178         if (!sh_mv.mv_##elem) \
179                 sh_mv.mv_##elem = generic_##elem; \
180 } while (0)
181
182         mv_set(inb);    mv_set(inw);    mv_set(inl);
183         mv_set(outb);   mv_set(outw);   mv_set(outl);
184
185         mv_set(inb_p);  mv_set(inw_p);  mv_set(inl_p);
186         mv_set(outb_p); mv_set(outw_p); mv_set(outl_p);
187
188         mv_set(insb);   mv_set(insw);   mv_set(insl);
189         mv_set(outsb);  mv_set(outsw);  mv_set(outsl);
190
191         mv_set(readb);  mv_set(readw);  mv_set(readl);
192         mv_set(writeb); mv_set(writew); mv_set(writel);
193
194         mv_set(ioport_map);
195         mv_set(ioport_unmap);
196         mv_set(irq_demux);
197
198 #ifdef CONFIG_SH_UNKNOWN
199         __set_io_port_base(mv_io_base);
200 #endif
201
202         if (!sh_mv.mv_nr_irqs)
203                 sh_mv.mv_nr_irqs = NR_IRQS;
204
205         return 0;
206 }
207
208 /*
209  * Register fully available low RAM pages with the bootmem allocator.
210  */
211 static void __init register_bootmem_low_pages(void)
212 {
213         unsigned long curr_pfn, last_pfn, pages;
214
215         /*
216          * We are rounding up the start address of usable memory:
217          */
218         curr_pfn = PFN_UP(__MEMORY_START);
219
220         /*
221          * ... and at the end of the usable range downwards:
222          */
223         last_pfn = PFN_DOWN(__pa(memory_end));
224
225         if (last_pfn > max_low_pfn)
226                 last_pfn = max_low_pfn;
227
228         pages = last_pfn - curr_pfn;
229         free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages));
230 }
231
232 void __init setup_bootmem_allocator(unsigned long start_pfn)
233 {
234         unsigned long bootmap_size;
235
236         /*
237          * Find a proper area for the bootmem bitmap. After this
238          * bootstrap step all allocations (until the page allocator
239          * is intact) must be done via bootmem_alloc().
240          */
241         bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
242                                          min_low_pfn, max_low_pfn);
243
244         register_bootmem_low_pages();
245
246         node_set_online(0);
247
248         /*
249          * Reserve the kernel text and
250          * Reserve the bootmem bitmap. We do this in two steps (first step
251          * was init_bootmem()), because this catches the (definitely buggy)
252          * case of us accidentally initializing the bootmem allocator with
253          * an invalid RAM area.
254          */
255         reserve_bootmem(__MEMORY_START+PAGE_SIZE,
256                 (PFN_PHYS(start_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START);
257
258         /*
259          * reserve physical page 0 - it's a special BIOS page on many boxes,
260          * enabling clean reboots, SMP operation, laptop functions.
261          */
262         reserve_bootmem(__MEMORY_START, PAGE_SIZE);
263
264 #ifdef CONFIG_BLK_DEV_INITRD
265         ROOT_DEV = MKDEV(RAMDISK_MAJOR, 0);
266         if (&__rd_start != &__rd_end) {
267                 LOADER_TYPE = 1;
268                 INITRD_START = PHYSADDR((unsigned long)&__rd_start) -
269                                         __MEMORY_START;
270                 INITRD_SIZE = (unsigned long)&__rd_end -
271                               (unsigned long)&__rd_start;
272         }
273
274         if (LOADER_TYPE && INITRD_START) {
275                 if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
276                         reserve_bootmem(INITRD_START + __MEMORY_START,
277                                         INITRD_SIZE);
278                         initrd_start = INITRD_START + PAGE_OFFSET +
279                                         __MEMORY_START;
280                         initrd_end = initrd_start + INITRD_SIZE;
281                 } else {
282                         printk("initrd extends beyond end of memory "
283                             "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
284                                     INITRD_START + INITRD_SIZE,
285                                     max_low_pfn << PAGE_SHIFT);
286                         initrd_start = 0;
287                 }
288         }
289 #endif
290 }
291
292 #ifndef CONFIG_NEED_MULTIPLE_NODES
293 static void __init setup_memory(void)
294 {
295         unsigned long start_pfn;
296
297         /*
298          * Partially used pages are not usable - thus
299          * we are rounding upwards:
300          */
301         start_pfn = PFN_UP(__pa(_end));
302         setup_bootmem_allocator(start_pfn);
303 }
304 #else
305 extern void __init setup_memory(void);
306 #endif
307
308 void __init setup_arch(char **cmdline_p)
309 {
310         enable_mmu();
311
312 #ifdef CONFIG_CMDLINE_BOOL
313         strcpy(COMMAND_LINE, CONFIG_CMDLINE);
314 #endif
315
316         ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
317
318 #ifdef CONFIG_BLK_DEV_RAM
319         rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
320         rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
321         rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
322 #endif
323
324         if (!MOUNT_ROOT_RDONLY)
325                 root_mountflags &= ~MS_RDONLY;
326         init_mm.start_code = (unsigned long) _text;
327         init_mm.end_code = (unsigned long) _etext;
328         init_mm.end_data = (unsigned long) _edata;
329         init_mm.brk = (unsigned long) _end;
330
331         code_resource.start = virt_to_phys(_text);
332         code_resource.end = virt_to_phys(_etext)-1;
333         data_resource.start = virt_to_phys(_etext);
334         data_resource.end = virt_to_phys(_edata)-1;
335
336         parse_early_param();
337
338         sh_mv_setup(cmdline_p);
339
340         /*
341          * Find the highest page frame number we have available
342          */
343         max_pfn = PFN_DOWN(__pa(memory_end));
344
345         /*
346          * Determine low and high memory ranges:
347          */
348         max_low_pfn = max_pfn;
349         min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
350
351         nodes_clear(node_online_map);
352         setup_memory();
353         paging_init();
354         sparse_init();
355
356 #ifdef CONFIG_DUMMY_CONSOLE
357         conswitchp = &dummy_con;
358 #endif
359
360         /* Perform the machine specific initialisation */
361         if (likely(sh_mv.mv_setup))
362                 sh_mv.mv_setup(cmdline_p);
363 }
364
365 struct sh_machine_vector* __init get_mv_byname(const char* name)
366 {
367         extern long __machvec_start, __machvec_end;
368         struct sh_machine_vector *all_vecs =
369                 (struct sh_machine_vector *)&__machvec_start;
370
371         int i, n = ((unsigned long)&__machvec_end
372                     - (unsigned long)&__machvec_start)/
373                 sizeof(struct sh_machine_vector);
374
375         for (i = 0; i < n; ++i) {
376                 struct sh_machine_vector *mv = &all_vecs[i];
377                 if (mv == NULL)
378                         continue;
379                 if (strcasecmp(name, get_system_type()) == 0) {
380                         return mv;
381                 }
382         }
383         return NULL;
384 }
385
386 static struct cpu cpu[NR_CPUS];
387
388 static int __init topology_init(void)
389 {
390         int cpu_id;
391
392         for_each_possible_cpu(cpu_id)
393                 register_cpu(&cpu[cpu_id], cpu_id);
394
395         return 0;
396 }
397
398 subsys_initcall(topology_init);
399
400 static const char *cpu_name[] = {
401         [CPU_SH7206]    = "SH7206",     [CPU_SH7619]    = "SH7619",
402         [CPU_SH7604]    = "SH7604",     [CPU_SH7300]    = "SH7300",
403         [CPU_SH7705]    = "SH7705",     [CPU_SH7706]    = "SH7706",
404         [CPU_SH7707]    = "SH7707",     [CPU_SH7708]    = "SH7708",
405         [CPU_SH7709]    = "SH7709",     [CPU_SH7710]    = "SH7710",
406         [CPU_SH7712]    = "SH7712",
407         [CPU_SH7729]    = "SH7729",     [CPU_SH7750]    = "SH7750",
408         [CPU_SH7750S]   = "SH7750S",    [CPU_SH7750R]   = "SH7750R",
409         [CPU_SH7751]    = "SH7751",     [CPU_SH7751R]   = "SH7751R",
410         [CPU_SH7760]    = "SH7760",     [CPU_SH73180]   = "SH73180",
411         [CPU_ST40RA]    = "ST40RA",     [CPU_ST40GX1]   = "ST40GX1",
412         [CPU_SH4_202]   = "SH4-202",    [CPU_SH4_501]   = "SH4-501",
413         [CPU_SH7770]    = "SH7770",     [CPU_SH7780]    = "SH7780",
414         [CPU_SH7781]    = "SH7781",     [CPU_SH7343]    = "SH7343",
415         [CPU_SH7785]    = "SH7785",     [CPU_SH7722]    = "SH7722",
416         [CPU_SH_NONE]   = "Unknown"
417 };
418
419 const char *get_cpu_subtype(struct sh_cpuinfo *c)
420 {
421         return cpu_name[c->type];
422 }
423
424 #ifdef CONFIG_PROC_FS
425 /* Symbolic CPU flags, keep in sync with asm/cpu-features.h */
426 static const char *cpu_flags[] = {
427         "none", "fpu", "p2flush", "mmuassoc", "dsp", "perfctr",
428         "ptea", "llsc", "l2", NULL
429 };
430
431 static void show_cpuflags(struct seq_file *m, struct sh_cpuinfo *c)
432 {
433         unsigned long i;
434
435         seq_printf(m, "cpu flags\t:");
436
437         if (!c->flags) {
438                 seq_printf(m, " %s\n", cpu_flags[0]);
439                 return;
440         }
441
442         for (i = 0; cpu_flags[i]; i++)
443                 if ((c->flags & (1 << i)))
444                         seq_printf(m, " %s", cpu_flags[i+1]);
445
446         seq_printf(m, "\n");
447 }
448
449 static void show_cacheinfo(struct seq_file *m, const char *type,
450                            struct cache_info info)
451 {
452         unsigned int cache_size;
453
454         cache_size = info.ways * info.sets * info.linesz;
455
456         seq_printf(m, "%s size\t: %2dKiB (%d-way)\n",
457                    type, cache_size >> 10, info.ways);
458 }
459
460 /*
461  *      Get CPU information for use by the procfs.
462  */
463 static int show_cpuinfo(struct seq_file *m, void *v)
464 {
465         struct sh_cpuinfo *c = v;
466         unsigned int cpu = c - cpu_data;
467
468         if (!cpu_online(cpu))
469                 return 0;
470
471         if (cpu == 0)
472                 seq_printf(m, "machine\t\t: %s\n", get_system_type());
473
474         seq_printf(m, "processor\t: %d\n", cpu);
475         seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine);
476         seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype(c));
477
478         show_cpuflags(m, c);
479
480         seq_printf(m, "cache type\t: ");
481
482         /*
483          * Check for what type of cache we have, we support both the
484          * unified cache on the SH-2 and SH-3, as well as the harvard
485          * style cache on the SH-4.
486          */
487         if (c->icache.flags & SH_CACHE_COMBINED) {
488                 seq_printf(m, "unified\n");
489                 show_cacheinfo(m, "cache", c->icache);
490         } else {
491                 seq_printf(m, "split (harvard)\n");
492                 show_cacheinfo(m, "icache", c->icache);
493                 show_cacheinfo(m, "dcache", c->dcache);
494         }
495
496         /* Optional secondary cache */
497         if (c->flags & CPU_HAS_L2_CACHE)
498                 show_cacheinfo(m, "scache", c->scache);
499
500         seq_printf(m, "bogomips\t: %lu.%02lu\n",
501                      c->loops_per_jiffy/(500000/HZ),
502                      (c->loops_per_jiffy/(5000/HZ)) % 100);
503
504         return show_clocks(m);
505 }
506
507 static void *c_start(struct seq_file *m, loff_t *pos)
508 {
509         return *pos < NR_CPUS ? cpu_data + *pos : NULL;
510 }
511 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
512 {
513         ++*pos;
514         return c_start(m, pos);
515 }
516 static void c_stop(struct seq_file *m, void *v)
517 {
518 }
519 struct seq_operations cpuinfo_op = {
520         .start  = c_start,
521         .next   = c_next,
522         .stop   = c_stop,
523         .show   = show_cpuinfo,
524 };
525 #endif /* CONFIG_PROC_FS */