[POWERPC] Use the genirq framework
[safe/jmp/linux-2.6] / arch / powerpc / platforms / pseries / setup.c
1 /*
2  *  64-bit pSeries and RS/6000 setup code.
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *  Adapted from 'alpha' version by Gary Thomas
6  *  Modified by Cort Dougan (cort@cs.nmt.edu)
7  *  Modified by PPC64 Team, IBM Corp
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version
12  * 2 of the License, or (at your option) any later version.
13  */
14
15 /*
16  * bootup setup stuff..
17  */
18
19 #undef DEBUG
20
21 #include <linux/cpu.h>
22 #include <linux/errno.h>
23 #include <linux/sched.h>
24 #include <linux/kernel.h>
25 #include <linux/mm.h>
26 #include <linux/stddef.h>
27 #include <linux/unistd.h>
28 #include <linux/slab.h>
29 #include <linux/user.h>
30 #include <linux/a.out.h>
31 #include <linux/tty.h>
32 #include <linux/major.h>
33 #include <linux/interrupt.h>
34 #include <linux/reboot.h>
35 #include <linux/init.h>
36 #include <linux/ioport.h>
37 #include <linux/console.h>
38 #include <linux/pci.h>
39 #include <linux/utsname.h>
40 #include <linux/adb.h>
41 #include <linux/module.h>
42 #include <linux/delay.h>
43 #include <linux/irq.h>
44 #include <linux/seq_file.h>
45 #include <linux/root_dev.h>
46
47 #include <asm/mmu.h>
48 #include <asm/processor.h>
49 #include <asm/io.h>
50 #include <asm/pgtable.h>
51 #include <asm/prom.h>
52 #include <asm/rtas.h>
53 #include <asm/pci-bridge.h>
54 #include <asm/iommu.h>
55 #include <asm/dma.h>
56 #include <asm/machdep.h>
57 #include <asm/irq.h>
58 #include <asm/kexec.h>
59 #include <asm/time.h>
60 #include <asm/nvram.h>
61 #include "xics.h"
62 #include <asm/pmc.h>
63 #include <asm/mpic.h>
64 #include <asm/ppc-pci.h>
65 #include <asm/i8259.h>
66 #include <asm/udbg.h>
67 #include <asm/smp.h>
68
69 #include "plpar_wrappers.h"
70 #include "ras.h"
71 #include "firmware.h"
72
73 #ifdef DEBUG
74 #define DBG(fmt...) udbg_printf(fmt)
75 #else
76 #define DBG(fmt...)
77 #endif
78
79 extern void find_udbg_vterm(void);
80
81 int fwnmi_active;  /* TRUE if an FWNMI handler is present */
82
83 static void pseries_shared_idle_sleep(void);
84 static void pseries_dedicated_idle_sleep(void);
85
86 struct mpic *pSeries_mpic;
87
88 static void pSeries_show_cpuinfo(struct seq_file *m)
89 {
90         struct device_node *root;
91         const char *model = "";
92
93         root = of_find_node_by_path("/");
94         if (root)
95                 model = get_property(root, "model", NULL);
96         seq_printf(m, "machine\t\t: CHRP %s\n", model);
97         of_node_put(root);
98 }
99
100 /* Initialize firmware assisted non-maskable interrupts if
101  * the firmware supports this feature.
102  */
103 static void __init fwnmi_init(void)
104 {
105         unsigned long system_reset_addr, machine_check_addr;
106
107         int ibm_nmi_register = rtas_token("ibm,nmi-register");
108         if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE)
109                 return;
110
111         /* If the kernel's not linked at zero we point the firmware at low
112          * addresses anyway, and use a trampoline to get to the real code. */
113         system_reset_addr  = __pa(system_reset_fwnmi) - PHYSICAL_START;
114         machine_check_addr = __pa(machine_check_fwnmi) - PHYSICAL_START;
115
116         if (0 == rtas_call(ibm_nmi_register, 2, 1, NULL, system_reset_addr,
117                                 machine_check_addr))
118                 fwnmi_active = 1;
119 }
120
121 void pSeries_8259_cascade(unsigned int irq, struct irq_desc *desc,
122                           struct pt_regs *regs)
123 {
124         unsigned int max = 100;
125
126         while(max--) {
127                 int cascade_irq = i8259_irq(regs);
128                 if (max == 99)
129                         desc->chip->eoi(irq);
130                 if (cascade_irq < 0)
131                         break;
132                 generic_handle_irq(cascade_irq, regs);
133         };
134 }
135
136 static void __init pSeries_init_mpic(void)
137 {
138         unsigned int *addrp;
139         struct device_node *np;
140         unsigned long intack = 0;
141
142         /* All ISUs are setup, complete initialization */
143         mpic_init(pSeries_mpic);
144
145         /* Check what kind of cascade ACK we have */
146         if (!(np = of_find_node_by_name(NULL, "pci"))
147             || !(addrp = (unsigned int *)
148                  get_property(np, "8259-interrupt-acknowledge", NULL)))
149                 printk(KERN_ERR "Cannot find pci to get ack address\n");
150         else
151                 intack = addrp[prom_n_addr_cells(np)-1];
152         of_node_put(np);
153
154         /* Setup the legacy interrupts & controller */
155         i8259_init(intack, 0);
156
157         /* Hook cascade to mpic */
158         set_irq_chained_handler(NUM_ISA_INTERRUPTS, pSeries_8259_cascade);
159 }
160
161 static void __init pSeries_setup_mpic(void)
162 {
163         unsigned int *opprop;
164         unsigned long openpic_addr = 0;
165         unsigned char senses[NR_IRQS - NUM_ISA_INTERRUPTS];
166         struct device_node *root;
167         int irq_count;
168
169         /* Find the Open PIC if present */
170         root = of_find_node_by_path("/");
171         opprop = (unsigned int *) get_property(root, "platform-open-pic", NULL);
172         if (opprop != 0) {
173                 int n = prom_n_addr_cells(root);
174
175                 for (openpic_addr = 0; n > 0; --n)
176                         openpic_addr = (openpic_addr << 32) + *opprop++;
177                 printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
178         }
179         of_node_put(root);
180
181         BUG_ON(openpic_addr == 0);
182
183         /* Get the sense values from OF */
184         prom_get_irq_senses(senses, NUM_ISA_INTERRUPTS, NR_IRQS);
185         
186         /* Setup the openpic driver */
187         irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */
188         pSeries_mpic = mpic_alloc(openpic_addr, MPIC_PRIMARY,
189                                   16, 16, irq_count, /* isu size, irq offset, irq count */ 
190                                   NR_IRQS - 4, /* ipi offset */
191                                   senses, irq_count, /* sense & sense size */
192                                   " MPIC     ");
193 }
194
195 static void pseries_lpar_enable_pmcs(void)
196 {
197         unsigned long set, reset;
198
199         power4_enable_pmcs();
200
201         set = 1UL << 63;
202         reset = 0;
203         plpar_hcall_norets(H_PERFMON, set, reset);
204
205         /* instruct hypervisor to maintain PMCs */
206         if (firmware_has_feature(FW_FEATURE_SPLPAR))
207                 get_lppaca()->pmcregs_in_use = 1;
208 }
209
210 static void __init pSeries_setup_arch(void)
211 {
212         /* Fixup ppc_md depending on the type of interrupt controller */
213         if (ppc64_interrupt_controller == IC_OPEN_PIC) {
214                 ppc_md.init_IRQ       = pSeries_init_mpic;
215                 ppc_md.get_irq        = mpic_get_irq;
216                 /* Allocate the mpic now, so that find_and_init_phbs() can
217                  * fill the ISUs */
218                 pSeries_setup_mpic();
219         } else
220                 ppc_md.init_IRQ       = xics_init_IRQ;
221
222 #ifdef CONFIG_SMP
223         smp_init_pSeries();
224 #endif
225         /* openpic global configuration register (64-bit format). */
226         /* openpic Interrupt Source Unit pointer (64-bit format). */
227         /* python0 facility area (mmio) (64-bit format) REAL address. */
228
229         /* init to some ~sane value until calibrate_delay() runs */
230         loops_per_jiffy = 50000000;
231
232         if (ROOT_DEV == 0) {
233                 printk("No ramdisk, default root is /dev/sda2\n");
234                 ROOT_DEV = Root_SDA2;
235         }
236
237         fwnmi_init();
238
239         /* Find and initialize PCI host bridges */
240         init_pci_config_tokens();
241         find_and_init_phbs();
242         eeh_init();
243
244         pSeries_nvram_init();
245
246         /* Choose an idle loop */
247         if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
248                 vpa_init(boot_cpuid);
249                 if (get_lppaca()->shared_proc) {
250                         printk(KERN_DEBUG "Using shared processor idle loop\n");
251                         ppc_md.power_save = pseries_shared_idle_sleep;
252                 } else {
253                         printk(KERN_DEBUG "Using dedicated idle loop\n");
254                         ppc_md.power_save = pseries_dedicated_idle_sleep;
255                 }
256         } else {
257                 printk(KERN_DEBUG "Using default idle loop\n");
258         }
259
260         if (firmware_has_feature(FW_FEATURE_LPAR))
261                 ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
262         else
263                 ppc_md.enable_pmcs = power4_enable_pmcs;
264 }
265
266 static int __init pSeries_init_panel(void)
267 {
268         /* Manually leave the kernel version on the panel. */
269         ppc_md.progress("Linux ppc64\n", 0);
270         ppc_md.progress(system_utsname.release, 0);
271
272         return 0;
273 }
274 arch_initcall(pSeries_init_panel);
275
276 static  void __init pSeries_discover_pic(void)
277 {
278         struct device_node *np;
279         char *typep;
280
281         /*
282          * Setup interrupt mapping options that are needed for finish_device_tree
283          * to properly parse the OF interrupt tree & do the virtual irq mapping
284          */
285         __irq_offset_value = NUM_ISA_INTERRUPTS;
286         ppc64_interrupt_controller = IC_INVALID;
287         for (np = NULL; (np = of_find_node_by_name(np, "interrupt-controller"));) {
288                 typep = (char *)get_property(np, "compatible", NULL);
289                 if (strstr(typep, "open-pic")) {
290                         ppc64_interrupt_controller = IC_OPEN_PIC;
291                         break;
292                 } else if (strstr(typep, "ppc-xicp")) {
293                         ppc64_interrupt_controller = IC_PPC_XIC;
294                         break;
295                 }
296         }
297         if (ppc64_interrupt_controller == IC_INVALID)
298                 printk("pSeries_discover_pic: failed to recognize"
299                         " interrupt-controller\n");
300
301 }
302
303 static void pSeries_mach_cpu_die(void)
304 {
305         local_irq_disable();
306         idle_task_exit();
307         xics_teardown_cpu(0);
308         rtas_stop_self();
309         /* Should never get here... */
310         BUG();
311         for(;;);
312 }
313
314 static int pseries_set_dabr(unsigned long dabr)
315 {
316         return plpar_hcall_norets(H_SET_DABR, dabr);
317 }
318
319 static int pseries_set_xdabr(unsigned long dabr)
320 {
321         /* We want to catch accesses from kernel and userspace */
322         return plpar_hcall_norets(H_SET_XDABR, dabr,
323                         H_DABRX_KERNEL | H_DABRX_USER);
324 }
325
326 /*
327  * Early initialization.  Relocation is on but do not reference unbolted pages
328  */
329 static void __init pSeries_init_early(void)
330 {
331         DBG(" -> pSeries_init_early()\n");
332
333         fw_feature_init();
334
335         if (firmware_has_feature(FW_FEATURE_LPAR))
336                 find_udbg_vterm();
337
338         if (firmware_has_feature(FW_FEATURE_DABR))
339                 ppc_md.set_dabr = pseries_set_dabr;
340         else if (firmware_has_feature(FW_FEATURE_XDABR))
341                 ppc_md.set_dabr = pseries_set_xdabr;
342
343         iommu_init_early_pSeries();
344
345         pSeries_discover_pic();
346
347         DBG(" <- pSeries_init_early()\n");
348 }
349
350
351 static int pSeries_check_legacy_ioport(unsigned int baseport)
352 {
353         struct device_node *np;
354
355 #define I8042_DATA_REG  0x60
356 #define FDC_BASE        0x3f0
357
358
359         switch(baseport) {
360         case I8042_DATA_REG:
361                 np = of_find_node_by_type(NULL, "8042");
362                 if (np == NULL)
363                         return -ENODEV;
364                 of_node_put(np);
365                 break;
366         case FDC_BASE:
367                 np = of_find_node_by_type(NULL, "fdc");
368                 if (np == NULL)
369                         return -ENODEV;
370                 of_node_put(np);
371                 break;
372         }
373         return 0;
374 }
375
376 /*
377  * Called very early, MMU is off, device-tree isn't unflattened
378  */
379
380 static int __init pSeries_probe_hypertas(unsigned long node,
381                                          const char *uname, int depth,
382                                          void *data)
383 {
384         if (depth != 1 ||
385             (strcmp(uname, "rtas") != 0 && strcmp(uname, "rtas@0") != 0))
386                 return 0;
387
388         if (of_get_flat_dt_prop(node, "ibm,hypertas-functions", NULL) != NULL)
389                 powerpc_firmware_features |= FW_FEATURE_LPAR;
390
391         if (firmware_has_feature(FW_FEATURE_LPAR))
392                 hpte_init_lpar();
393         else
394                 hpte_init_native();
395
396         return 1;
397 }
398
399 static int __init pSeries_probe(void)
400 {
401         unsigned long root = of_get_flat_dt_root();
402         char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(),
403                                           "device_type", NULL);
404         if (dtype == NULL)
405                 return 0;
406         if (strcmp(dtype, "chrp"))
407                 return 0;
408
409         /* Cell blades firmware claims to be chrp while it's not. Until this
410          * is fixed, we need to avoid those here.
411          */
412         if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0") ||
413             of_flat_dt_is_compatible(root, "IBM,CBEA"))
414                 return 0;
415
416         DBG("pSeries detected, looking for LPAR capability...\n");
417
418         /* Now try to figure out if we are running on LPAR */
419         of_scan_flat_dt(pSeries_probe_hypertas, NULL);
420
421         DBG("Machine is%s LPAR !\n",
422             (powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not");
423
424         return 1;
425 }
426
427
428 DECLARE_PER_CPU(unsigned long, smt_snooze_delay);
429
430 static void pseries_dedicated_idle_sleep(void)
431
432         unsigned int cpu = smp_processor_id();
433         unsigned long start_snooze;
434         unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
435
436         /*
437          * Indicate to the HV that we are idle. Now would be
438          * a good time to find other work to dispatch.
439          */
440         get_lppaca()->idle = 1;
441
442         /*
443          * We come in with interrupts disabled, and need_resched()
444          * has been checked recently.  If we should poll for a little
445          * while, do so.
446          */
447         if (*smt_snooze_delay) {
448                 start_snooze = get_tb() +
449                         *smt_snooze_delay * tb_ticks_per_usec;
450                 local_irq_enable();
451                 set_thread_flag(TIF_POLLING_NRFLAG);
452
453                 while (get_tb() < start_snooze) {
454                         if (need_resched() || cpu_is_offline(cpu))
455                                 goto out;
456                         ppc64_runlatch_off();
457                         HMT_low();
458                         HMT_very_low();
459                 }
460
461                 HMT_medium();
462                 clear_thread_flag(TIF_POLLING_NRFLAG);
463                 smp_mb();
464                 local_irq_disable();
465                 if (need_resched() || cpu_is_offline(cpu))
466                         goto out;
467         }
468
469         /*
470          * Cede if the other thread is not idle, so that it can
471          * go single-threaded.  If the other thread is idle,
472          * we ask the hypervisor if it has pending work it
473          * wants to do and cede if it does.  Otherwise we keep
474          * polling in order to reduce interrupt latency.
475          *
476          * Doing the cede when the other thread is active will
477          * result in this thread going dormant, meaning the other
478          * thread gets to run in single-threaded (ST) mode, which
479          * is slightly faster than SMT mode with this thread at
480          * very low priority.  The cede enables interrupts, which
481          * doesn't matter here.
482          */
483         if (!lppaca[cpu ^ 1].idle || poll_pending() == H_PENDING)
484                 cede_processor();
485
486 out:
487         HMT_medium();
488         get_lppaca()->idle = 0;
489 }
490
491 static void pseries_shared_idle_sleep(void)
492 {
493         /*
494          * Indicate to the HV that we are idle. Now would be
495          * a good time to find other work to dispatch.
496          */
497         get_lppaca()->idle = 1;
498
499         /*
500          * Yield the processor to the hypervisor.  We return if
501          * an external interrupt occurs (which are driven prior
502          * to returning here) or if a prod occurs from another
503          * processor. When returning here, external interrupts
504          * are enabled.
505          */
506         cede_processor();
507
508         get_lppaca()->idle = 0;
509 }
510
511 static int pSeries_pci_probe_mode(struct pci_bus *bus)
512 {
513         if (firmware_has_feature(FW_FEATURE_LPAR))
514                 return PCI_PROBE_DEVTREE;
515         return PCI_PROBE_NORMAL;
516 }
517
518 #ifdef CONFIG_KEXEC
519 static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
520 {
521         /* Don't risk a hypervisor call if we're crashing */
522         if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) {
523                 unsigned long vpa = __pa(get_lppaca());
524
525                 if (unregister_vpa(hard_smp_processor_id(), vpa)) {
526                         printk("VPA deregistration of cpu %u (hw_cpu_id %d) "
527                                         "failed\n", smp_processor_id(),
528                                         hard_smp_processor_id());
529                 }
530         }
531
532         if (ppc64_interrupt_controller == IC_OPEN_PIC)
533                 mpic_teardown_this_cpu(secondary);
534         else
535                 xics_teardown_cpu(secondary);
536 }
537 #endif
538
539 define_machine(pseries) {
540         .name                   = "pSeries",
541         .probe                  = pSeries_probe,
542         .setup_arch             = pSeries_setup_arch,
543         .init_early             = pSeries_init_early,
544         .show_cpuinfo           = pSeries_show_cpuinfo,
545         .log_error              = pSeries_log_error,
546         .pcibios_fixup          = pSeries_final_fixup,
547         .pci_probe_mode         = pSeries_pci_probe_mode,
548         .irq_bus_setup          = pSeries_irq_bus_setup,
549         .restart                = rtas_restart,
550         .power_off              = rtas_power_off,
551         .halt                   = rtas_halt,
552         .panic                  = rtas_os_term,
553         .cpu_die                = pSeries_mach_cpu_die,
554         .get_boot_time          = rtas_get_boot_time,
555         .get_rtc_time           = rtas_get_rtc_time,
556         .set_rtc_time           = rtas_set_rtc_time,
557         .calibrate_decr         = generic_calibrate_decr,
558         .progress               = rtas_progress,
559         .check_legacy_ioport    = pSeries_check_legacy_ioport,
560         .system_reset_exception = pSeries_system_reset_exception,
561         .machine_check_exception = pSeries_machine_check_exception,
562 #ifdef CONFIG_KEXEC
563         .kexec_cpu_down         = pseries_kexec_cpu_down,
564         .machine_kexec          = default_machine_kexec,
565         .machine_kexec_prepare  = default_machine_kexec_prepare,
566         .machine_crash_shutdown = default_machine_crash_shutdown,
567 #endif
568 };