[PATCH] powerpc: Create a trampoline for the fwnmi vectors
[safe/jmp/linux-2.6] / arch / powerpc / platforms / pseries / setup.c
1 /*
2  *  64-bit pSeries and RS/6000 setup code.
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *  Adapted from 'alpha' version by Gary Thomas
6  *  Modified by Cort Dougan (cort@cs.nmt.edu)
7  *  Modified by PPC64 Team, IBM Corp
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version
12  * 2 of the License, or (at your option) any later version.
13  */
14
15 /*
16  * bootup setup stuff..
17  */
18
19 #undef DEBUG
20
21 #include <linux/config.h>
22 #include <linux/cpu.h>
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/stddef.h>
28 #include <linux/unistd.h>
29 #include <linux/slab.h>
30 #include <linux/user.h>
31 #include <linux/a.out.h>
32 #include <linux/tty.h>
33 #include <linux/major.h>
34 #include <linux/interrupt.h>
35 #include <linux/reboot.h>
36 #include <linux/init.h>
37 #include <linux/ioport.h>
38 #include <linux/console.h>
39 #include <linux/pci.h>
40 #include <linux/utsname.h>
41 #include <linux/adb.h>
42 #include <linux/module.h>
43 #include <linux/delay.h>
44 #include <linux/irq.h>
45 #include <linux/seq_file.h>
46 #include <linux/root_dev.h>
47
48 #include <asm/mmu.h>
49 #include <asm/processor.h>
50 #include <asm/io.h>
51 #include <asm/pgtable.h>
52 #include <asm/prom.h>
53 #include <asm/rtas.h>
54 #include <asm/pci-bridge.h>
55 #include <asm/iommu.h>
56 #include <asm/dma.h>
57 #include <asm/machdep.h>
58 #include <asm/irq.h>
59 #include <asm/kexec.h>
60 #include <asm/time.h>
61 #include <asm/nvram.h>
62 #include "xics.h"
63 #include <asm/firmware.h>
64 #include <asm/pmc.h>
65 #include <asm/mpic.h>
66 #include <asm/ppc-pci.h>
67 #include <asm/i8259.h>
68 #include <asm/udbg.h>
69 #include <asm/smp.h>
70
71 #include "plpar_wrappers.h"
72
73 #ifdef DEBUG
74 #define DBG(fmt...) udbg_printf(fmt)
75 #else
76 #define DBG(fmt...)
77 #endif
78
79 extern void find_udbg_vterm(void);
80
81 int fwnmi_active;  /* TRUE if an FWNMI handler is present */
82
83 extern void pSeries_system_reset_exception(struct pt_regs *regs);
84 extern int pSeries_machine_check_exception(struct pt_regs *regs);
85
86 static void pseries_shared_idle(void);
87 static void pseries_dedicated_idle(void);
88
89 struct mpic *pSeries_mpic;
90
91 void pSeries_show_cpuinfo(struct seq_file *m)
92 {
93         struct device_node *root;
94         const char *model = "";
95
96         root = of_find_node_by_path("/");
97         if (root)
98                 model = get_property(root, "model", NULL);
99         seq_printf(m, "machine\t\t: CHRP %s\n", model);
100         of_node_put(root);
101 }
102
103 /* Initialize firmware assisted non-maskable interrupts if
104  * the firmware supports this feature.
105  */
106 static void __init fwnmi_init(void)
107 {
108         unsigned long system_reset_addr, machine_check_addr;
109
110         int ibm_nmi_register = rtas_token("ibm,nmi-register");
111         if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE)
112                 return;
113
114         /* If the kernel's not linked at zero we point the firmware at low
115          * addresses anyway, and use a trampoline to get to the real code. */
116         system_reset_addr  = __pa(system_reset_fwnmi) - PHYSICAL_START;
117         machine_check_addr = __pa(machine_check_fwnmi) - PHYSICAL_START;
118
119         if (0 == rtas_call(ibm_nmi_register, 2, 1, NULL, system_reset_addr,
120                                 machine_check_addr))
121                 fwnmi_active = 1;
122 }
123
124 static void __init pSeries_init_mpic(void)
125 {
126         unsigned int *addrp;
127         struct device_node *np;
128         unsigned long intack = 0;
129
130         /* All ISUs are setup, complete initialization */
131         mpic_init(pSeries_mpic);
132
133         /* Check what kind of cascade ACK we have */
134         if (!(np = of_find_node_by_name(NULL, "pci"))
135             || !(addrp = (unsigned int *)
136                  get_property(np, "8259-interrupt-acknowledge", NULL)))
137                 printk(KERN_ERR "Cannot find pci to get ack address\n");
138         else
139                 intack = addrp[prom_n_addr_cells(np)-1];
140         of_node_put(np);
141
142         /* Setup the legacy interrupts & controller */
143         i8259_init(intack, 0);
144
145         /* Hook cascade to mpic */
146         mpic_setup_cascade(NUM_ISA_INTERRUPTS, i8259_irq_cascade, NULL);
147 }
148
149 static void __init pSeries_setup_mpic(void)
150 {
151         unsigned int *opprop;
152         unsigned long openpic_addr = 0;
153         unsigned char senses[NR_IRQS - NUM_ISA_INTERRUPTS];
154         struct device_node *root;
155         int irq_count;
156
157         /* Find the Open PIC if present */
158         root = of_find_node_by_path("/");
159         opprop = (unsigned int *) get_property(root, "platform-open-pic", NULL);
160         if (opprop != 0) {
161                 int n = prom_n_addr_cells(root);
162
163                 for (openpic_addr = 0; n > 0; --n)
164                         openpic_addr = (openpic_addr << 32) + *opprop++;
165                 printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
166         }
167         of_node_put(root);
168
169         BUG_ON(openpic_addr == 0);
170
171         /* Get the sense values from OF */
172         prom_get_irq_senses(senses, NUM_ISA_INTERRUPTS, NR_IRQS);
173         
174         /* Setup the openpic driver */
175         irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */
176         pSeries_mpic = mpic_alloc(openpic_addr, MPIC_PRIMARY,
177                                   16, 16, irq_count, /* isu size, irq offset, irq count */ 
178                                   NR_IRQS - 4, /* ipi offset */
179                                   senses, irq_count, /* sense & sense size */
180                                   " MPIC     ");
181 }
182
183 static void pseries_lpar_enable_pmcs(void)
184 {
185         unsigned long set, reset;
186
187         power4_enable_pmcs();
188
189         set = 1UL << 63;
190         reset = 0;
191         plpar_hcall_norets(H_PERFMON, set, reset);
192
193         /* instruct hypervisor to maintain PMCs */
194         if (firmware_has_feature(FW_FEATURE_SPLPAR))
195                 get_paca()->lppaca.pmcregs_in_use = 1;
196 }
197
198 static void __init pSeries_setup_arch(void)
199 {
200         /* Fixup ppc_md depending on the type of interrupt controller */
201         if (ppc64_interrupt_controller == IC_OPEN_PIC) {
202                 ppc_md.init_IRQ       = pSeries_init_mpic;
203                 ppc_md.get_irq        = mpic_get_irq;
204                 /* Allocate the mpic now, so that find_and_init_phbs() can
205                  * fill the ISUs */
206                 pSeries_setup_mpic();
207         } else {
208                 ppc_md.init_IRQ       = xics_init_IRQ;
209                 ppc_md.get_irq        = xics_get_irq;
210         }
211
212 #ifdef CONFIG_SMP
213         smp_init_pSeries();
214 #endif
215         /* openpic global configuration register (64-bit format). */
216         /* openpic Interrupt Source Unit pointer (64-bit format). */
217         /* python0 facility area (mmio) (64-bit format) REAL address. */
218
219         /* init to some ~sane value until calibrate_delay() runs */
220         loops_per_jiffy = 50000000;
221
222         if (ROOT_DEV == 0) {
223                 printk("No ramdisk, default root is /dev/sda2\n");
224                 ROOT_DEV = Root_SDA2;
225         }
226
227         fwnmi_init();
228
229         /* Find and initialize PCI host bridges */
230         init_pci_config_tokens();
231         find_and_init_phbs();
232         eeh_init();
233
234         pSeries_nvram_init();
235
236         /* Choose an idle loop */
237         if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
238                 vpa_init(boot_cpuid);
239                 if (get_paca()->lppaca.shared_proc) {
240                         printk(KERN_INFO "Using shared processor idle loop\n");
241                         ppc_md.idle_loop = pseries_shared_idle;
242                 } else {
243                         printk(KERN_INFO "Using dedicated idle loop\n");
244                         ppc_md.idle_loop = pseries_dedicated_idle;
245                 }
246         } else {
247                 printk(KERN_INFO "Using default idle loop\n");
248                 ppc_md.idle_loop = default_idle;
249         }
250
251         if (platform_is_lpar())
252                 ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
253         else
254                 ppc_md.enable_pmcs = power4_enable_pmcs;
255 }
256
257 static int __init pSeries_init_panel(void)
258 {
259         /* Manually leave the kernel version on the panel. */
260         ppc_md.progress("Linux ppc64\n", 0);
261         ppc_md.progress(system_utsname.version, 0);
262
263         return 0;
264 }
265 arch_initcall(pSeries_init_panel);
266
267
268 /* Build up the ppc64_firmware_features bitmask field
269  * using contents of device-tree/ibm,hypertas-functions.
270  * Ultimately this functionality may be moved into prom.c prom_init().
271  */
272 static void __init fw_feature_init(void)
273 {
274         struct device_node * dn;
275         char * hypertas;
276         unsigned int len;
277
278         DBG(" -> fw_feature_init()\n");
279
280         ppc64_firmware_features = 0;
281         dn = of_find_node_by_path("/rtas");
282         if (dn == NULL) {
283                 printk(KERN_ERR "WARNING ! Cannot find RTAS in device-tree !\n");
284                 goto no_rtas;
285         }
286
287         hypertas = get_property(dn, "ibm,hypertas-functions", &len);
288         if (hypertas) {
289                 while (len > 0){
290                         int i, hypertas_len;
291                         /* check value against table of strings */
292                         for(i=0; i < FIRMWARE_MAX_FEATURES ;i++) {
293                                 if ((firmware_features_table[i].name) &&
294                                     (strcmp(firmware_features_table[i].name,hypertas))==0) {
295                                         /* we have a match */
296                                         ppc64_firmware_features |= 
297                                                 (firmware_features_table[i].val);
298                                         break;
299                                 } 
300                         }
301                         hypertas_len = strlen(hypertas);
302                         len -= hypertas_len +1;
303                         hypertas+= hypertas_len +1;
304                 }
305         }
306
307         of_node_put(dn);
308 no_rtas:
309
310         DBG(" <- fw_feature_init()\n");
311 }
312
313
314 static  void __init pSeries_discover_pic(void)
315 {
316         struct device_node *np;
317         char *typep;
318
319         /*
320          * Setup interrupt mapping options that are needed for finish_device_tree
321          * to properly parse the OF interrupt tree & do the virtual irq mapping
322          */
323         __irq_offset_value = NUM_ISA_INTERRUPTS;
324         ppc64_interrupt_controller = IC_INVALID;
325         for (np = NULL; (np = of_find_node_by_name(np, "interrupt-controller"));) {
326                 typep = (char *)get_property(np, "compatible", NULL);
327                 if (strstr(typep, "open-pic"))
328                         ppc64_interrupt_controller = IC_OPEN_PIC;
329                 else if (strstr(typep, "ppc-xicp"))
330                         ppc64_interrupt_controller = IC_PPC_XIC;
331                 else
332                         printk("pSeries_discover_pic: failed to recognize"
333                                " interrupt-controller\n");
334                 break;
335         }
336 }
337
338 static void pSeries_mach_cpu_die(void)
339 {
340         local_irq_disable();
341         idle_task_exit();
342         /* Some hardware requires clearing the CPPR, while other hardware does not
343          * it is safe either way
344          */
345         pSeriesLP_cppr_info(0, 0);
346         rtas_stop_self();
347         /* Should never get here... */
348         BUG();
349         for(;;);
350 }
351
352 static int pseries_set_dabr(unsigned long dabr)
353 {
354         return plpar_hcall_norets(H_SET_DABR, dabr);
355 }
356
357 static int pseries_set_xdabr(unsigned long dabr)
358 {
359         /* We want to catch accesses from kernel and userspace */
360         return plpar_hcall_norets(H_SET_XDABR, dabr,
361                         H_DABRX_KERNEL | H_DABRX_USER);
362 }
363
364 /*
365  * Early initialization.  Relocation is on but do not reference unbolted pages
366  */
367 static void __init pSeries_init_early(void)
368 {
369         int iommu_off = 0;
370
371         DBG(" -> pSeries_init_early()\n");
372
373         fw_feature_init();
374         
375         if (platform_is_lpar())
376                 hpte_init_lpar();
377         else {
378                 hpte_init_native();
379                 iommu_off = (of_chosen &&
380                              get_property(of_chosen, "linux,iommu-off", NULL));
381         }
382
383         if (platform_is_lpar())
384                 find_udbg_vterm();
385
386         if (firmware_has_feature(FW_FEATURE_DABR))
387                 ppc_md.set_dabr = pseries_set_dabr;
388         else if (firmware_has_feature(FW_FEATURE_XDABR))
389                 ppc_md.set_dabr = pseries_set_xdabr;
390
391         iommu_init_early_pSeries();
392
393         pSeries_discover_pic();
394
395         DBG(" <- pSeries_init_early()\n");
396 }
397
398
399 static int pSeries_check_legacy_ioport(unsigned int baseport)
400 {
401         struct device_node *np;
402
403 #define I8042_DATA_REG  0x60
404 #define FDC_BASE        0x3f0
405
406
407         switch(baseport) {
408         case I8042_DATA_REG:
409                 np = of_find_node_by_type(NULL, "8042");
410                 if (np == NULL)
411                         return -ENODEV;
412                 of_node_put(np);
413                 break;
414         case FDC_BASE:
415                 np = of_find_node_by_type(NULL, "fdc");
416                 if (np == NULL)
417                         return -ENODEV;
418                 of_node_put(np);
419                 break;
420         }
421         return 0;
422 }
423
424 /*
425  * Called very early, MMU is off, device-tree isn't unflattened
426  */
427 extern struct machdep_calls pSeries_md;
428
429 static int __init pSeries_probe(int platform)
430 {
431         if (platform != PLATFORM_PSERIES &&
432             platform != PLATFORM_PSERIES_LPAR)
433                 return 0;
434
435         /* if we have some ppc_md fixups for LPAR to do, do
436          * it here ...
437          */
438
439         return 1;
440 }
441
442 DECLARE_PER_CPU(unsigned long, smt_snooze_delay);
443
444 static inline void dedicated_idle_sleep(unsigned int cpu)
445 {
446         struct paca_struct *ppaca = &paca[cpu ^ 1];
447
448         /* Only sleep if the other thread is not idle */
449         if (!(ppaca->lppaca.idle)) {
450                 local_irq_disable();
451
452                 /*
453                  * We are about to sleep the thread and so wont be polling any
454                  * more.
455                  */
456                 clear_thread_flag(TIF_POLLING_NRFLAG);
457                 smp_mb__after_clear_bit();
458
459                 /*
460                  * SMT dynamic mode. Cede will result in this thread going
461                  * dormant, if the partner thread is still doing work.  Thread
462                  * wakes up if partner goes idle, an interrupt is presented, or
463                  * a prod occurs.  Returning from the cede enables external
464                  * interrupts.
465                  */
466                 if (!need_resched())
467                         cede_processor();
468                 else
469                         local_irq_enable();
470                 set_thread_flag(TIF_POLLING_NRFLAG);
471         } else {
472                 /*
473                  * Give the HV an opportunity at the processor, since we are
474                  * not doing any work.
475                  */
476                 poll_pending();
477         }
478 }
479
480 static void pseries_dedicated_idle(void)
481
482         struct paca_struct *lpaca = get_paca();
483         unsigned int cpu = smp_processor_id();
484         unsigned long start_snooze;
485         unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
486         set_thread_flag(TIF_POLLING_NRFLAG);
487
488         while (1) {
489                 /*
490                  * Indicate to the HV that we are idle. Now would be
491                  * a good time to find other work to dispatch.
492                  */
493                 lpaca->lppaca.idle = 1;
494
495                 if (!need_resched()) {
496                         start_snooze = get_tb() +
497                                 *smt_snooze_delay * tb_ticks_per_usec;
498
499                         while (!need_resched() && !cpu_is_offline(cpu)) {
500                                 ppc64_runlatch_off();
501
502                                 /*
503                                  * Go into low thread priority and possibly
504                                  * low power mode.
505                                  */
506                                 HMT_low();
507                                 HMT_very_low();
508
509                                 if (*smt_snooze_delay != 0 &&
510                                     get_tb() > start_snooze) {
511                                         HMT_medium();
512                                         dedicated_idle_sleep(cpu);
513                                 }
514
515                         }
516
517                         HMT_medium();
518                 }
519
520                 lpaca->lppaca.idle = 0;
521                 ppc64_runlatch_on();
522
523                 preempt_enable_no_resched();
524                 schedule();
525                 preempt_disable();
526
527                 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
528                         cpu_die();
529         }
530 }
531
532 static void pseries_shared_idle(void)
533 {
534         struct paca_struct *lpaca = get_paca();
535         unsigned int cpu = smp_processor_id();
536
537         while (1) {
538                 /*
539                  * Indicate to the HV that we are idle. Now would be
540                  * a good time to find other work to dispatch.
541                  */
542                 lpaca->lppaca.idle = 1;
543
544                 while (!need_resched() && !cpu_is_offline(cpu)) {
545                         local_irq_disable();
546                         ppc64_runlatch_off();
547
548                         /*
549                          * Yield the processor to the hypervisor.  We return if
550                          * an external interrupt occurs (which are driven prior
551                          * to returning here) or if a prod occurs from another
552                          * processor. When returning here, external interrupts
553                          * are enabled.
554                          *
555                          * Check need_resched() again with interrupts disabled
556                          * to avoid a race.
557                          */
558                         if (!need_resched())
559                                 cede_processor();
560                         else
561                                 local_irq_enable();
562
563                         HMT_medium();
564                 }
565
566                 lpaca->lppaca.idle = 0;
567                 ppc64_runlatch_on();
568
569                 preempt_enable_no_resched();
570                 schedule();
571                 preempt_disable();
572
573                 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
574                         cpu_die();
575         }
576 }
577
578 static int pSeries_pci_probe_mode(struct pci_bus *bus)
579 {
580         if (platform_is_lpar())
581                 return PCI_PROBE_DEVTREE;
582         return PCI_PROBE_NORMAL;
583 }
584
585 #ifdef CONFIG_KEXEC
586 static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
587 {
588         /* Don't risk a hypervisor call if we're crashing */
589         if (!crash_shutdown) {
590                 unsigned long vpa = __pa(&get_paca()->lppaca);
591
592                 if (unregister_vpa(hard_smp_processor_id(), vpa)) {
593                         printk("VPA deregistration of cpu %u (hw_cpu_id %d) "
594                                         "failed\n", smp_processor_id(),
595                                         hard_smp_processor_id());
596                 }
597         }
598
599         if (ppc64_interrupt_controller == IC_OPEN_PIC)
600                 mpic_teardown_this_cpu(secondary);
601         else
602                 xics_teardown_cpu(secondary);
603 }
604 #endif
605
606 struct machdep_calls __initdata pSeries_md = {
607         .probe                  = pSeries_probe,
608         .setup_arch             = pSeries_setup_arch,
609         .init_early             = pSeries_init_early,
610         .show_cpuinfo           = pSeries_show_cpuinfo,
611         .log_error              = pSeries_log_error,
612         .pcibios_fixup          = pSeries_final_fixup,
613         .pci_probe_mode         = pSeries_pci_probe_mode,
614         .irq_bus_setup          = pSeries_irq_bus_setup,
615         .restart                = rtas_restart,
616         .power_off              = rtas_power_off,
617         .halt                   = rtas_halt,
618         .panic                  = rtas_os_term,
619         .cpu_die                = pSeries_mach_cpu_die,
620         .get_boot_time          = rtas_get_boot_time,
621         .get_rtc_time           = rtas_get_rtc_time,
622         .set_rtc_time           = rtas_set_rtc_time,
623         .calibrate_decr         = generic_calibrate_decr,
624         .progress               = rtas_progress,
625         .check_legacy_ioport    = pSeries_check_legacy_ioport,
626         .system_reset_exception = pSeries_system_reset_exception,
627         .machine_check_exception = pSeries_machine_check_exception,
628 #ifdef CONFIG_KEXEC
629         .kexec_cpu_down         = pseries_kexec_cpu_down,
630         .machine_kexec          = default_machine_kexec,
631         .machine_kexec_prepare  = default_machine_kexec_prepare,
632 #endif
633 };