1 /* irq.c: UltraSparc IRQ handling/init/registry.
3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
8 #include <linux/module.h>
9 #include <linux/sched.h>
10 #include <linux/linkage.h>
11 #include <linux/ptrace.h>
12 #include <linux/errno.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/signal.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/random.h>
19 #include <linux/init.h>
20 #include <linux/delay.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/ftrace.h>
24 #include <linux/irq.h>
25 #include <linux/kmemleak.h>
27 #include <asm/ptrace.h>
28 #include <asm/processor.h>
29 #include <asm/atomic.h>
30 #include <asm/system.h>
33 #include <asm/iommu.h>
35 #include <asm/oplib.h>
37 #include <asm/timer.h>
39 #include <asm/starfire.h>
40 #include <asm/uaccess.h>
41 #include <asm/cache.h>
42 #include <asm/cpudata.h>
43 #include <asm/auxio.h>
45 #include <asm/hypervisor.h>
46 #include <asm/cacheflush.h>
51 #define NUM_IVECS (IMAP_INR + 1)
53 struct ino_bucket *ivector_table;
54 unsigned long ivector_table_pa;
56 /* On several sun4u processors, it is illegal to mix bypass and
57 * non-bypass accesses. Therefore we access all INO buckets
58 * using bypass accesses only.
60 static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
64 __asm__ __volatile__("ldxa [%1] %2, %0"
67 offsetof(struct ino_bucket,
69 "i" (ASI_PHYS_USE_EC));
74 static void bucket_clear_chain_pa(unsigned long bucket_pa)
76 __asm__ __volatile__("stxa %%g0, [%0] %1"
79 offsetof(struct ino_bucket,
81 "i" (ASI_PHYS_USE_EC));
84 static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
88 __asm__ __volatile__("lduwa [%1] %2, %0"
91 offsetof(struct ino_bucket,
93 "i" (ASI_PHYS_USE_EC));
98 static void bucket_set_virt_irq(unsigned long bucket_pa,
99 unsigned int virt_irq)
101 __asm__ __volatile__("stwa %0, [%1] %2"
105 offsetof(struct ino_bucket,
107 "i" (ASI_PHYS_USE_EC));
110 #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
113 unsigned int dev_handle;
114 unsigned int dev_ino;
116 } virt_irq_table[NR_IRQS];
117 static DEFINE_SPINLOCK(virt_irq_alloc_lock);
119 unsigned char virt_irq_alloc(unsigned int dev_handle,
120 unsigned int dev_ino)
125 BUILD_BUG_ON(NR_IRQS >= 256);
127 spin_lock_irqsave(&virt_irq_alloc_lock, flags);
129 for (ent = 1; ent < NR_IRQS; ent++) {
130 if (!virt_irq_table[ent].in_use)
133 if (ent >= NR_IRQS) {
134 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
137 virt_irq_table[ent].dev_handle = dev_handle;
138 virt_irq_table[ent].dev_ino = dev_ino;
139 virt_irq_table[ent].in_use = 1;
142 spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
147 #ifdef CONFIG_PCI_MSI
148 void virt_irq_free(unsigned int virt_irq)
152 if (virt_irq >= NR_IRQS)
155 spin_lock_irqsave(&virt_irq_alloc_lock, flags);
157 virt_irq_table[virt_irq].in_use = 0;
159 spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
164 * /proc/interrupts printing:
167 int show_interrupts(struct seq_file *p, void *v)
169 int i = *(loff_t *) v, j;
170 struct irqaction * action;
175 for_each_online_cpu(j)
176 seq_printf(p, "CPU%d ",j);
181 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
182 action = irq_desc[i].action;
185 seq_printf(p, "%3d: ",i);
187 seq_printf(p, "%10u ", kstat_irqs(i));
189 for_each_online_cpu(j)
190 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
192 seq_printf(p, " %9s", irq_desc[i].chip->name);
193 seq_printf(p, " %s", action->name);
195 for (action=action->next; action; action = action->next)
196 seq_printf(p, ", %s", action->name);
200 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
201 } else if (i == NR_IRQS) {
202 seq_printf(p, "NMI: ");
203 for_each_online_cpu(j)
204 seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
205 seq_printf(p, " Non-maskable interrupts\n");
210 static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
214 if (this_is_starfire) {
215 tid = starfire_translate(imap, cpuid);
216 tid <<= IMAP_TID_SHIFT;
219 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
222 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
223 if ((ver >> 32UL) == __JALAPENO_ID ||
224 (ver >> 32UL) == __SERRANO_ID) {
225 tid = cpuid << IMAP_TID_SHIFT;
226 tid &= IMAP_TID_JBUS;
228 unsigned int a = cpuid & 0x1f;
229 unsigned int n = (cpuid >> 5) & 0x1f;
231 tid = ((a << IMAP_AID_SHIFT) |
232 (n << IMAP_NID_SHIFT));
233 tid &= (IMAP_AID_SAFARI |
237 tid = cpuid << IMAP_TID_SHIFT;
245 struct irq_handler_data {
249 void (*pre_handler)(unsigned int, void *, void *);
255 static int irq_choose_cpu(unsigned int virt_irq, const struct cpumask *affinity)
260 cpumask_copy(&mask, affinity);
261 if (cpus_equal(mask, cpu_online_map)) {
262 cpuid = map_to_cpu(virt_irq);
266 cpus_and(tmp, cpu_online_map, mask);
267 cpuid = cpus_empty(tmp) ? map_to_cpu(virt_irq) : first_cpu(tmp);
273 #define irq_choose_cpu(virt_irq, affinity) \
274 real_hard_smp_processor_id()
277 static void sun4u_irq_enable(unsigned int virt_irq)
279 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
282 unsigned long cpuid, imap, val;
285 cpuid = irq_choose_cpu(virt_irq,
286 irq_desc[virt_irq].affinity);
289 tid = sun4u_compute_tid(imap, cpuid);
291 val = upa_readq(imap);
292 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
293 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
294 val |= tid | IMAP_VALID;
295 upa_writeq(val, imap);
296 upa_writeq(ICLR_IDLE, data->iclr);
300 static int sun4u_set_affinity(unsigned int virt_irq,
301 const struct cpumask *mask)
303 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
306 unsigned long cpuid, imap, val;
309 cpuid = irq_choose_cpu(virt_irq, mask);
312 tid = sun4u_compute_tid(imap, cpuid);
314 val = upa_readq(imap);
315 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
316 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
317 val |= tid | IMAP_VALID;
318 upa_writeq(val, imap);
319 upa_writeq(ICLR_IDLE, data->iclr);
325 /* Don't do anything. The desc->status check for IRQ_DISABLED in
326 * handler_irq() will skip the handler call and that will leave the
327 * interrupt in the sent state. The next ->enable() call will hit the
328 * ICLR register to reset the state machine.
330 * This scheme is necessary, instead of clearing the Valid bit in the
331 * IMAP register, to handle the case of IMAP registers being shared by
332 * multiple INOs (and thus ICLR registers). Since we use a different
333 * virtual IRQ for each shared IMAP instance, the generic code thinks
334 * there is only one user so it prematurely calls ->disable() on
337 * We have to provide an explicit ->disable() method instead of using
338 * NULL to get the default. The reason is that if the generic code
339 * sees that, it also hooks up a default ->shutdown method which
340 * invokes ->mask() which we do not want. See irq_chip_set_defaults().
342 static void sun4u_irq_disable(unsigned int virt_irq)
346 static void sun4u_irq_eoi(unsigned int virt_irq)
348 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
349 struct irq_desc *desc = irq_desc + virt_irq;
351 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
355 upa_writeq(ICLR_IDLE, data->iclr);
358 static void sun4v_irq_enable(unsigned int virt_irq)
360 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
361 unsigned long cpuid = irq_choose_cpu(virt_irq,
362 irq_desc[virt_irq].affinity);
365 err = sun4v_intr_settarget(ino, cpuid);
367 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
368 "err(%d)\n", ino, cpuid, err);
369 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
371 printk(KERN_ERR "sun4v_intr_setstate(%x): "
372 "err(%d)\n", ino, err);
373 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
375 printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
379 static int sun4v_set_affinity(unsigned int virt_irq,
380 const struct cpumask *mask)
382 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
383 unsigned long cpuid = irq_choose_cpu(virt_irq, mask);
386 err = sun4v_intr_settarget(ino, cpuid);
388 printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
389 "err(%d)\n", ino, cpuid, err);
394 static void sun4v_irq_disable(unsigned int virt_irq)
396 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
399 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
401 printk(KERN_ERR "sun4v_intr_setenabled(%x): "
402 "err(%d)\n", ino, err);
405 static void sun4v_irq_eoi(unsigned int virt_irq)
407 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
408 struct irq_desc *desc = irq_desc + virt_irq;
411 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
414 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
416 printk(KERN_ERR "sun4v_intr_setstate(%x): "
417 "err(%d)\n", ino, err);
420 static void sun4v_virq_enable(unsigned int virt_irq)
422 unsigned long cpuid, dev_handle, dev_ino;
425 cpuid = irq_choose_cpu(virt_irq, irq_desc[virt_irq].affinity);
427 dev_handle = virt_irq_table[virt_irq].dev_handle;
428 dev_ino = virt_irq_table[virt_irq].dev_ino;
430 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
432 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
434 dev_handle, dev_ino, cpuid, err);
435 err = sun4v_vintr_set_state(dev_handle, dev_ino,
438 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
439 "HV_INTR_STATE_IDLE): err(%d)\n",
440 dev_handle, dev_ino, err);
441 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
444 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
445 "HV_INTR_ENABLED): err(%d)\n",
446 dev_handle, dev_ino, err);
449 static int sun4v_virt_set_affinity(unsigned int virt_irq,
450 const struct cpumask *mask)
452 unsigned long cpuid, dev_handle, dev_ino;
455 cpuid = irq_choose_cpu(virt_irq, mask);
457 dev_handle = virt_irq_table[virt_irq].dev_handle;
458 dev_ino = virt_irq_table[virt_irq].dev_ino;
460 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
462 printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
464 dev_handle, dev_ino, cpuid, err);
469 static void sun4v_virq_disable(unsigned int virt_irq)
471 unsigned long dev_handle, dev_ino;
474 dev_handle = virt_irq_table[virt_irq].dev_handle;
475 dev_ino = virt_irq_table[virt_irq].dev_ino;
477 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
480 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
481 "HV_INTR_DISABLED): err(%d)\n",
482 dev_handle, dev_ino, err);
485 static void sun4v_virq_eoi(unsigned int virt_irq)
487 struct irq_desc *desc = irq_desc + virt_irq;
488 unsigned long dev_handle, dev_ino;
491 if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
494 dev_handle = virt_irq_table[virt_irq].dev_handle;
495 dev_ino = virt_irq_table[virt_irq].dev_ino;
497 err = sun4v_vintr_set_state(dev_handle, dev_ino,
500 printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
501 "HV_INTR_STATE_IDLE): err(%d)\n",
502 dev_handle, dev_ino, err);
505 static struct irq_chip sun4u_irq = {
507 .enable = sun4u_irq_enable,
508 .disable = sun4u_irq_disable,
509 .eoi = sun4u_irq_eoi,
510 .set_affinity = sun4u_set_affinity,
513 static struct irq_chip sun4v_irq = {
515 .enable = sun4v_irq_enable,
516 .disable = sun4v_irq_disable,
517 .eoi = sun4v_irq_eoi,
518 .set_affinity = sun4v_set_affinity,
521 static struct irq_chip sun4v_virq = {
523 .enable = sun4v_virq_enable,
524 .disable = sun4v_virq_disable,
525 .eoi = sun4v_virq_eoi,
526 .set_affinity = sun4v_virt_set_affinity,
529 static void pre_flow_handler(unsigned int virt_irq,
530 struct irq_desc *desc)
532 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
533 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
535 data->pre_handler(ino, data->arg1, data->arg2);
537 handle_fasteoi_irq(virt_irq, desc);
540 void irq_install_pre_handler(int virt_irq,
541 void (*func)(unsigned int, void *, void *),
542 void *arg1, void *arg2)
544 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
545 struct irq_desc *desc = irq_desc + virt_irq;
547 data->pre_handler = func;
551 desc->handle_irq = pre_flow_handler;
554 unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
556 struct ino_bucket *bucket;
557 struct irq_handler_data *data;
558 unsigned int virt_irq;
561 BUG_ON(tlb_type == hypervisor);
563 ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
564 bucket = &ivector_table[ino];
565 virt_irq = bucket_get_virt_irq(__pa(bucket));
567 virt_irq = virt_irq_alloc(0, ino);
568 bucket_set_virt_irq(__pa(bucket), virt_irq);
569 set_irq_chip_and_handler_name(virt_irq,
575 data = get_irq_chip_data(virt_irq);
579 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
580 if (unlikely(!data)) {
581 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
584 set_irq_chip_data(virt_irq, data);
593 static unsigned int sun4v_build_common(unsigned long sysino,
594 struct irq_chip *chip)
596 struct ino_bucket *bucket;
597 struct irq_handler_data *data;
598 unsigned int virt_irq;
600 BUG_ON(tlb_type != hypervisor);
602 bucket = &ivector_table[sysino];
603 virt_irq = bucket_get_virt_irq(__pa(bucket));
605 virt_irq = virt_irq_alloc(0, sysino);
606 bucket_set_virt_irq(__pa(bucket), virt_irq);
607 set_irq_chip_and_handler_name(virt_irq, chip,
612 data = get_irq_chip_data(virt_irq);
616 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
617 if (unlikely(!data)) {
618 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
621 set_irq_chip_data(virt_irq, data);
623 /* Catch accidental accesses to these things. IMAP/ICLR handling
624 * is done by hypervisor calls on sun4v platforms, not by direct
634 unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
636 unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
638 return sun4v_build_common(sysino, &sun4v_irq);
641 unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
643 struct irq_handler_data *data;
644 unsigned long hv_err, cookie;
645 struct ino_bucket *bucket;
646 struct irq_desc *desc;
647 unsigned int virt_irq;
649 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
650 if (unlikely(!bucket))
653 /* The only reference we store to the IRQ bucket is
654 * by physical address which kmemleak can't see, tell
655 * it that this object explicitly is not a leak and
658 kmemleak_not_leak(bucket);
660 __flush_dcache_range((unsigned long) bucket,
661 ((unsigned long) bucket +
662 sizeof(struct ino_bucket)));
664 virt_irq = virt_irq_alloc(devhandle, devino);
665 bucket_set_virt_irq(__pa(bucket), virt_irq);
667 set_irq_chip_and_handler_name(virt_irq, &sun4v_virq,
671 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
675 /* In order to make the LDC channel startup sequence easier,
676 * especially wrt. locking, we do not let request_irq() enable
679 desc = irq_desc + virt_irq;
680 desc->status |= IRQ_NOAUTOEN;
682 set_irq_chip_data(virt_irq, data);
684 /* Catch accidental accesses to these things. IMAP/ICLR handling
685 * is done by hypervisor calls on sun4v platforms, not by direct
691 cookie = ~__pa(bucket);
692 hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
694 prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
695 "err=%lu\n", devhandle, devino, hv_err);
702 void ack_bad_irq(unsigned int virt_irq)
704 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
709 printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
713 void *hardirq_stack[NR_CPUS];
714 void *softirq_stack[NR_CPUS];
716 static __attribute__((always_inline)) void *set_hardirq_stack(void)
718 void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
720 __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
722 orig_sp > (sp + THREAD_SIZE)) {
723 sp += THREAD_SIZE - 192 - STACK_BIAS;
724 __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
729 static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
731 __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
734 void __irq_entry handler_irq(int irq, struct pt_regs *regs)
736 unsigned long pstate, bucket_pa;
737 struct pt_regs *old_regs;
740 clear_softint(1 << irq);
742 old_regs = set_irq_regs(regs);
745 /* Grab an atomic snapshot of the pending IVECs. */
746 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
747 "wrpr %0, %3, %%pstate\n\t"
750 "wrpr %0, 0x0, %%pstate\n\t"
751 : "=&r" (pstate), "=&r" (bucket_pa)
752 : "r" (irq_work_pa(smp_processor_id())),
756 orig_sp = set_hardirq_stack();
759 struct irq_desc *desc;
760 unsigned long next_pa;
761 unsigned int virt_irq;
763 next_pa = bucket_get_chain_pa(bucket_pa);
764 virt_irq = bucket_get_virt_irq(bucket_pa);
765 bucket_clear_chain_pa(bucket_pa);
767 desc = irq_desc + virt_irq;
769 if (!(desc->status & IRQ_DISABLED))
770 desc->handle_irq(virt_irq, desc);
775 restore_hardirq_stack(orig_sp);
778 set_irq_regs(old_regs);
781 void do_softirq(void)
788 local_irq_save(flags);
790 if (local_softirq_pending()) {
791 void *orig_sp, *sp = softirq_stack[smp_processor_id()];
793 sp += THREAD_SIZE - 192 - STACK_BIAS;
795 __asm__ __volatile__("mov %%sp, %0\n\t"
800 __asm__ __volatile__("mov %0, %%sp"
804 local_irq_restore(flags);
807 #ifdef CONFIG_HOTPLUG_CPU
808 void fixup_irqs(void)
812 for (irq = 0; irq < NR_IRQS; irq++) {
815 raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
816 if (irq_desc[irq].action &&
817 !(irq_desc[irq].status & IRQ_PER_CPU)) {
818 if (irq_desc[irq].chip->set_affinity)
819 irq_desc[irq].chip->set_affinity(irq,
820 irq_desc[irq].affinity);
822 raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
825 tick_ops->disable_irq();
836 static struct sun5_timer *prom_timers;
837 static u64 prom_limit0, prom_limit1;
839 static void map_prom_timers(void)
841 struct device_node *dp;
842 const unsigned int *addr;
844 /* PROM timer node hangs out in the top level of device siblings... */
845 dp = of_find_node_by_path("/");
848 if (!strcmp(dp->name, "counter-timer"))
853 /* Assume if node is not present, PROM uses different tick mechanism
854 * which we should not care about.
857 prom_timers = (struct sun5_timer *) 0;
861 /* If PROM is really using this, it must be mapped by him. */
862 addr = of_get_property(dp, "address", NULL);
864 prom_printf("PROM does not have timer mapped, trying to continue.\n");
865 prom_timers = (struct sun5_timer *) 0;
868 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
871 static void kill_prom_timer(void)
876 /* Save them away for later. */
877 prom_limit0 = prom_timers->limit0;
878 prom_limit1 = prom_timers->limit1;
880 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
881 * We turn both off here just to be paranoid.
883 prom_timers->limit0 = 0;
884 prom_timers->limit1 = 0;
886 /* Wheee, eat the interrupt packet too... */
887 __asm__ __volatile__(
889 " ldxa [%%g0] %0, %%g1\n"
890 " ldxa [%%g2] %1, %%g1\n"
891 " stxa %%g0, [%%g0] %0\n"
894 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
898 void notrace init_irqwork_curcpu(void)
900 int cpu = hard_smp_processor_id();
902 trap_block[cpu].irq_worklist_pa = 0UL;
905 /* Please be very careful with register_one_mondo() and
906 * sun4v_register_mondo_queues().
908 * On SMP this gets invoked from the CPU trampoline before
909 * the cpu has fully taken over the trap table from OBP,
910 * and it's kernel stack + %g6 thread register state is
911 * not fully cooked yet.
913 * Therefore you cannot make any OBP calls, not even prom_printf,
914 * from these two routines.
916 static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
918 unsigned long num_entries = (qmask + 1) / 64;
919 unsigned long status;
921 status = sun4v_cpu_qconf(type, paddr, num_entries);
922 if (status != HV_EOK) {
923 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
924 "err %lu\n", type, paddr, num_entries, status);
929 void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
931 struct trap_per_cpu *tb = &trap_block[this_cpu];
933 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
934 tb->cpu_mondo_qmask);
935 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
936 tb->dev_mondo_qmask);
937 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
939 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
943 /* Each queue region must be a power of 2 multiple of 64 bytes in
944 * size. The base real address must be aligned to the size of the
945 * region. Thus, an 8KB queue must be 8KB aligned, for example.
947 static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
949 unsigned long size = PAGE_ALIGN(qmask + 1);
950 unsigned long order = get_order(size);
953 p = __get_free_pages(GFP_KERNEL, order);
955 prom_printf("SUN4V: Error, cannot allocate queue.\n");
962 static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
967 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
969 page = get_zeroed_page(GFP_KERNEL);
971 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
975 tb->cpu_mondo_block_pa = __pa(page);
976 tb->cpu_list_pa = __pa(page + 64);
980 /* Allocate mondo and error queues for all possible cpus. */
981 static void __init sun4v_init_mondo_queues(void)
985 for_each_possible_cpu(cpu) {
986 struct trap_per_cpu *tb = &trap_block[cpu];
988 alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
989 alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
990 alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask);
991 alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask);
992 alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
993 alloc_one_queue(&tb->nonresum_kernel_buf_pa,
998 static void __init init_send_mondo_info(void)
1002 for_each_possible_cpu(cpu) {
1003 struct trap_per_cpu *tb = &trap_block[cpu];
1005 init_cpu_send_mondo_info(tb);
1009 static struct irqaction timer_irq_action = {
1013 /* Only invoked on boot processor. */
1014 void __init init_IRQ(void)
1021 size = sizeof(struct ino_bucket) * NUM_IVECS;
1022 ivector_table = kzalloc(size, GFP_KERNEL);
1023 if (!ivector_table) {
1024 prom_printf("Fatal error, cannot allocate ivector_table\n");
1027 __flush_dcache_range((unsigned long) ivector_table,
1028 ((unsigned long) ivector_table) + size);
1030 ivector_table_pa = __pa(ivector_table);
1032 if (tlb_type == hypervisor)
1033 sun4v_init_mondo_queues();
1035 init_send_mondo_info();
1037 if (tlb_type == hypervisor) {
1038 /* Load up the boot cpu's entries. */
1039 sun4v_register_mondo_queues(hard_smp_processor_id());
1042 /* We need to clear any IRQ's pending in the soft interrupt
1043 * registers, a spurious one could be left around from the
1044 * PROM timer which we just disabled.
1046 clear_softint(get_softint());
1048 /* Now that ivector table is initialized, it is safe
1049 * to receive IRQ vector traps. We will normally take
1050 * one or two right now, in case some device PROM used
1051 * to boot us wants to speak to us. We just ignore them.
1053 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
1054 "or %%g1, %0, %%g1\n\t"
1055 "wrpr %%g1, 0x0, %%pstate"
1060 irq_desc[0].action = &timer_irq_action;