1 /* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $
2 * irq.c: UltraSparc IRQ handling/init/registry.
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/ptrace.h>
13 #include <linux/errno.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/signal.h>
17 #include <linux/interrupt.h>
18 #include <linux/slab.h>
19 #include <linux/random.h>
20 #include <linux/init.h>
21 #include <linux/delay.h>
22 #include <linux/proc_fs.h>
23 #include <linux/seq_file.h>
24 #include <linux/bootmem.h>
26 #include <asm/ptrace.h>
27 #include <asm/processor.h>
28 #include <asm/atomic.h>
29 #include <asm/system.h>
33 #include <asm/iommu.h>
35 #include <asm/oplib.h>
36 #include <asm/timer.h>
38 #include <asm/starfire.h>
39 #include <asm/uaccess.h>
40 #include <asm/cache.h>
41 #include <asm/cpudata.h>
42 #include <asm/auxio.h>
46 static void distribute_irqs(void);
49 /* UPA nodes send interrupt packet to UltraSparc with first data reg
50 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
51 * delivered. We must translate this into a non-vector IRQ so we can
52 * set the softint on this cpu.
54 * To make processing these packets efficient and race free we use
55 * an array of irq buckets below. The interrupt vector handler in
56 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
57 * The IVEC handler does not need to act atomically, the PIL dispatch
58 * code uses CAS to get an atomic snapshot of the list and clear it
62 struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
64 /* This has to be in the main kernel image, it cannot be
65 * turned into per-cpu data. The reason is that the main
66 * kernel image is locked into the TLB and this structure
67 * is accessed from the vectored interrupt trap handler. If
68 * access to this structure takes a TLB miss it could cause
69 * the 5-level sparc v9 trap stack to overflow.
71 #define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist)
73 static struct irqaction *irq_action[NR_IRQS];
75 /* This only synchronizes entities which modify IRQ handler
76 * state and some selected user-level spots that want to
77 * read things in the table. IRQ handler processing orders
78 * its' accesses such that no locking is needed.
80 static DEFINE_SPINLOCK(irq_action_lock);
82 static void register_irq_proc (unsigned int irq);
85 * Upper 2b of irqaction->flags holds the ino.
86 * irqaction->mask holds the smp affinity information.
88 #define put_ino_in_irqaction(action, irq) \
89 action->flags &= 0xffffffffffffUL; \
90 action->flags |= __irq_ino(irq) << 48;
92 #define get_ino_in_irqaction(action) (action->flags >> 48)
94 #define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff)
95 #define get_smpaff_in_irqaction(action) ((action)->mask)
97 int show_interrupts(struct seq_file *p, void *v)
100 int i = *(loff_t *) v;
101 struct irqaction *action;
106 spin_lock_irqsave(&irq_action_lock, flags);
108 if (!(action = *(i + irq_action)))
110 seq_printf(p, "%3d: ", i);
112 seq_printf(p, "%10u ", kstat_irqs(i));
114 for_each_online_cpu(j) {
115 seq_printf(p, "%10u ",
116 kstat_cpu(j).irqs[i]);
119 seq_printf(p, " %s", action->name);
120 for (action = action->next; action; action = action->next)
121 seq_printf(p, ", %s", action->name);
125 spin_unlock_irqrestore(&irq_action_lock, flags);
130 extern unsigned long real_hard_smp_processor_id(void);
132 static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
136 if (this_is_starfire) {
137 tid = starfire_translate(imap, cpuid);
138 tid <<= IMAP_TID_SHIFT;
141 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
144 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
145 if ((ver >> 32UL) == __JALAPENO_ID ||
146 (ver >> 32UL) == __SERRANO_ID) {
147 tid = cpuid << IMAP_TID_SHIFT;
148 tid &= IMAP_TID_JBUS;
150 unsigned int a = cpuid & 0x1f;
151 unsigned int n = (cpuid >> 5) & 0x1f;
153 tid = ((a << IMAP_AID_SHIFT) |
154 (n << IMAP_NID_SHIFT));
155 tid &= (IMAP_AID_SAFARI |
159 tid = cpuid << IMAP_TID_SHIFT;
167 /* Now these are always passed a true fully specified sun4u INO. */
168 void enable_irq(unsigned int irq)
170 struct ino_bucket *bucket = __bucket(irq);
171 unsigned long imap, cpuid;
179 /* This gets the physical processor ID, even on uniprocessor,
180 * so we can always program the interrupt target correctly.
182 cpuid = real_hard_smp_processor_id();
184 if (tlb_type == hypervisor) {
185 unsigned int ino = __irq_ino(irq);
188 err = sun4v_intr_settarget(ino, cpuid);
190 printk("sun4v_intr_settarget(%x,%lu): err(%d)\n",
192 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
194 printk("sun4v_intr_setenabled(%x): err(%d)\n",
197 unsigned int tid = sun4u_compute_tid(imap, cpuid);
199 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
200 * of this SYSIO's preconfigured IGN in the SYSIO Control
201 * Register, the hardware just mirrors that value here.
202 * However for Graphics and UPA Slave devices the full
203 * IMAP_INR field can be set by the programmer here.
205 * Things like FFB can now be handled via the new IRQ
208 upa_writel(tid | IMAP_VALID, imap);
214 /* This now gets passed true ino's as well. */
215 void disable_irq(unsigned int irq)
217 struct ino_bucket *bucket = __bucket(irq);
222 if (tlb_type == hypervisor) {
223 unsigned int ino = __irq_ino(irq);
226 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
228 printk("sun4v_intr_setenabled(%x): "
229 "err(%d)\n", ino, err);
233 /* NOTE: We do not want to futz with the IRQ clear registers
234 * and move the state to IDLE, the SCSI code does call
235 * disable_irq() to assure atomicity in the queue cmd
236 * SCSI adapter driver code. Thus we'd lose interrupts.
238 tmp = upa_readl(imap);
240 upa_writel(tmp, imap);
245 static void build_irq_error(const char *msg, unsigned int ino, int inofixup,
246 unsigned long iclr, unsigned long imap,
247 struct ino_bucket *bucket)
249 prom_printf("IRQ: INO %04x (%016lx:%016lx) --> "
250 "(%d:%016lx:%016lx), halting...\n",
251 ino, bucket->iclr, bucket->imap,
252 inofixup, iclr, imap);
256 unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
258 struct ino_bucket *bucket;
261 BUG_ON(tlb_type == hypervisor);
263 /* RULE: Both must be specified in all other cases. */
264 if (iclr == 0UL || imap == 0UL) {
265 prom_printf("Invalid build_irq %d %016lx %016lx\n",
266 inofixup, iclr, imap);
270 ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
271 if (ino > NUM_IVECS) {
272 prom_printf("Invalid INO %04x (%d:%016lx:%016lx)\n",
273 ino, inofixup, iclr, imap);
277 bucket = &ivector_table[ino];
278 if (bucket->flags & IBF_ACTIVE)
279 build_irq_error("IRQ: Trying to build active INO bucket.\n",
280 ino, inofixup, iclr, imap, bucket);
282 if (bucket->irq_info) {
283 if (bucket->imap != imap || bucket->iclr != iclr)
284 build_irq_error("IRQ: Trying to reinit INO bucket.\n",
285 ino, inofixup, iclr, imap, bucket);
290 bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC);
291 if (!bucket->irq_info) {
292 prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n");
296 /* Ok, looks good, set it up. Don't touch the irq_chain or
304 return __irq(bucket);
307 unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, unsigned char flags)
309 struct ino_bucket *bucket;
310 unsigned long sysino;
312 sysino = sun4v_devino_to_sysino(devhandle, devino);
314 bucket = &ivector_table[sysino];
316 /* Catch accidental accesses to these things. IMAP/ICLR handling
317 * is done by hypervisor calls on sun4v platforms, not by direct
320 * But we need to make them look unique for the disable_irq() logic
323 bucket->imap = ~0UL - sysino;
324 bucket->iclr = ~0UL - sysino;
326 bucket->flags = flags;
328 bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC);
329 if (!bucket->irq_info) {
330 prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n");
334 return __irq(bucket);
337 static void atomic_bucket_insert(struct ino_bucket *bucket)
339 unsigned long pstate;
342 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
343 __asm__ __volatile__("wrpr %0, %1, %%pstate"
344 : : "r" (pstate), "i" (PSTATE_IE));
345 ent = irq_work(smp_processor_id());
346 bucket->irq_chain = *ent;
347 *ent = __irq(bucket);
348 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
351 static int check_irq_sharing(int pil, unsigned long irqflags)
353 struct irqaction *action;
355 action = *(irq_action + pil);
357 if (!(action->flags & SA_SHIRQ) || !(irqflags & SA_SHIRQ))
363 static void append_irq_action(int pil, struct irqaction *action)
365 struct irqaction **pp = irq_action + pil;
372 static struct irqaction *get_action_slot(struct ino_bucket *bucket)
374 struct irq_desc *desc = bucket->irq_info;
378 if (bucket->flags & IBF_PCI)
379 max_irq = MAX_IRQ_DESC_ACTION;
380 for (i = 0; i < max_irq; i++) {
381 struct irqaction *p = &desc->action[i];
384 if (desc->action_active_mask & mask)
387 desc->action_active_mask |= mask;
393 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
394 unsigned long irqflags, const char *name, void *dev_id)
396 struct irqaction *action;
397 struct ino_bucket *bucket = __bucket(irq);
401 if (unlikely(!handler))
404 if (unlikely(!bucket->irq_info))
407 if (irqflags & SA_SAMPLE_RANDOM) {
409 * This function might sleep, we want to call it first,
410 * outside of the atomic block. In SA_STATIC_ALLOC case,
411 * random driver's kmalloc will fail, but it is safe.
412 * If already initialized, random driver will not reinit.
413 * Yes, this might clear the entropy pool if the wrong
414 * driver is attempted to be loaded, without actually
415 * installing a new handler, but is this really a problem,
416 * only the sysadmin is able to do this.
418 rand_initialize_irq(PIL_DEVICE_IRQ);
421 spin_lock_irqsave(&irq_action_lock, flags);
423 if (check_irq_sharing(PIL_DEVICE_IRQ, irqflags)) {
424 spin_unlock_irqrestore(&irq_action_lock, flags);
428 action = get_action_slot(bucket);
430 spin_unlock_irqrestore(&irq_action_lock, flags);
434 bucket->flags |= IBF_ACTIVE;
435 pending = bucket->pending;
439 action->handler = handler;
440 action->flags = irqflags;
443 action->dev_id = dev_id;
444 put_ino_in_irqaction(action, irq);
445 put_smpaff_in_irqaction(action, CPU_MASK_NONE);
447 append_irq_action(PIL_DEVICE_IRQ, action);
451 /* We ate the IVEC already, this makes sure it does not get lost. */
453 atomic_bucket_insert(bucket);
454 set_softint(1 << PIL_DEVICE_IRQ);
457 spin_unlock_irqrestore(&irq_action_lock, flags);
459 register_irq_proc(__irq_ino(irq));
467 EXPORT_SYMBOL(request_irq);
469 static struct irqaction *unlink_irq_action(unsigned int irq, void *dev_id)
471 struct irqaction *action, **pp;
473 pp = irq_action + PIL_DEVICE_IRQ;
475 if (unlikely(!action))
478 if (unlikely(!action->handler)) {
479 printk("Freeing free IRQ %d\n", PIL_DEVICE_IRQ);
483 while (action && action->dev_id != dev_id) {
494 void free_irq(unsigned int irq, void *dev_id)
496 struct irqaction *action;
497 struct ino_bucket *bucket;
498 struct irq_desc *desc;
502 spin_lock_irqsave(&irq_action_lock, flags);
504 action = unlink_irq_action(irq, dev_id);
506 spin_unlock_irqrestore(&irq_action_lock, flags);
508 if (unlikely(!action))
511 synchronize_irq(irq);
513 spin_lock_irqsave(&irq_action_lock, flags);
515 bucket = __bucket(irq);
516 desc = bucket->irq_info;
518 for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
519 struct irqaction *p = &desc->action[i];
522 desc->action_active_mask &= ~(1 << i);
527 if (!desc->action_active_mask) {
528 unsigned long imap = bucket->imap;
530 /* This unique interrupt source is now inactive. */
531 bucket->flags &= ~IBF_ACTIVE;
533 /* See if any other buckets share this bucket's IMAP
534 * and are still active.
536 for (ent = 0; ent < NUM_IVECS; ent++) {
537 struct ino_bucket *bp = &ivector_table[ent];
540 (bp->flags & IBF_ACTIVE) != 0)
544 /* Only disable when no other sub-irq levels of
545 * the same IMAP are active.
547 if (ent == NUM_IVECS)
551 spin_unlock_irqrestore(&irq_action_lock, flags);
554 EXPORT_SYMBOL(free_irq);
557 void synchronize_irq(unsigned int irq)
559 struct ino_bucket *bucket = __bucket(irq);
562 /* The following is how I wish I could implement this.
563 * Unfortunately the ICLR registers are read-only, you can
564 * only write ICLR_foo values to them. To get the current
565 * IRQ status you would need to get at the IRQ diag registers
566 * in the PCI/SBUS controller and the layout of those vary
567 * from one controller to the next, sigh... -DaveM
569 unsigned long iclr = bucket->iclr;
572 u32 tmp = upa_readl(iclr);
574 if (tmp == ICLR_TRANSMIT ||
575 tmp == ICLR_PENDING) {
582 /* So we have to do this with a INPROGRESS bit just like x86. */
583 while (bucket->flags & IBF_INPROGRESS)
587 #endif /* CONFIG_SMP */
589 static void process_bucket(struct ino_bucket *bp, struct pt_regs *regs)
591 struct irq_desc *desc = bp->irq_info;
592 unsigned char flags = bp->flags;
596 bp->flags |= IBF_INPROGRESS;
598 if (unlikely(!(flags & IBF_ACTIVE))) {
603 if (desc->pre_handler)
604 desc->pre_handler(bp,
605 desc->pre_handler_arg1,
606 desc->pre_handler_arg2);
608 action_mask = desc->action_active_mask;
610 for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
611 struct irqaction *p = &desc->action[i];
614 if (!(action_mask & mask))
617 action_mask &= ~mask;
619 if (p->handler(__irq(bp), p->dev_id, regs) == IRQ_HANDLED)
626 if (tlb_type == hypervisor) {
627 unsigned int ino = __irq_ino(bp);
630 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
632 printk("sun4v_intr_setstate(%x): "
633 "err(%d)\n", ino, err);
635 upa_writel(ICLR_IDLE, bp->iclr);
638 /* Test and add entropy */
639 if (random & SA_SAMPLE_RANDOM)
640 add_interrupt_randomness(PIL_DEVICE_IRQ);
642 bp->flags &= ~IBF_INPROGRESS;
646 extern irqreturn_t timer_interrupt(int, void *, struct pt_regs *);
648 void timer_irq(int irq, struct pt_regs *regs)
650 unsigned long clr_mask = 1 << irq;
651 unsigned long tick_mask = tick_ops->softint_mask;
653 if (get_softint() & tick_mask) {
655 clr_mask = tick_mask;
657 clear_softint(clr_mask);
660 kstat_this_cpu.irqs[irq]++;
661 timer_interrupt(irq, NULL, regs);
666 void handler_irq(int irq, struct pt_regs *regs)
668 struct ino_bucket *bp;
669 int cpu = smp_processor_id();
671 /* XXX at this point we should be able to assert that
672 * XXX irq is PIL_DEVICE_IRQ...
674 clear_softint(1 << irq);
679 bp = __bucket(xchg32(irq_work(cpu), 0));
681 struct ino_bucket *nbp = __bucket(bp->irq_chain);
683 kstat_this_cpu.irqs[bp->virt_irq]++;
686 process_bucket(bp, regs);
692 #ifdef CONFIG_BLK_DEV_FD
693 extern irqreturn_t floppy_interrupt(int, void *, struct pt_regs *);
695 /* XXX No easy way to include asm/floppy.h XXX */
696 extern unsigned char *pdma_vaddr;
697 extern unsigned long pdma_size;
698 extern volatile int doing_pdma;
699 extern unsigned long fdc_status;
701 irqreturn_t sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
703 if (likely(doing_pdma)) {
704 void __iomem *stat = (void __iomem *) fdc_status;
705 unsigned char *vaddr = pdma_vaddr;
706 unsigned long size = pdma_size;
711 if (unlikely(!(val & 0x80))) {
716 if (unlikely(!(val & 0x20))) {
724 *vaddr++ = readb(stat + 1);
726 unsigned char data = *vaddr++;
729 writeb(data, stat + 1);
737 /* Send Terminal Count pulse to floppy controller. */
738 val = readb(auxio_register);
739 val |= AUXIO_AUX1_FTCNT;
740 writeb(val, auxio_register);
741 val &= ~AUXIO_AUX1_FTCNT;
742 writeb(val, auxio_register);
748 return floppy_interrupt(irq, dev_cookie, regs);
750 EXPORT_SYMBOL(sparc_floppy_irq);
753 /* We really don't need these at all on the Sparc. We only have
754 * stubs here because they are exported to modules.
756 unsigned long probe_irq_on(void)
761 EXPORT_SYMBOL(probe_irq_on);
763 int probe_irq_off(unsigned long mask)
768 EXPORT_SYMBOL(probe_irq_off);
771 static int retarget_one_irq(struct irqaction *p, int goal_cpu)
773 struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table;
775 while (!cpu_online(goal_cpu)) {
776 if (++goal_cpu >= NR_CPUS)
780 if (tlb_type == hypervisor) {
781 unsigned int ino = __irq_ino(bucket);
783 sun4v_intr_settarget(ino, goal_cpu);
784 sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
786 unsigned long imap = bucket->imap;
787 unsigned int tid = sun4u_compute_tid(imap, goal_cpu);
789 upa_writel(tid | IMAP_VALID, imap);
793 if (++goal_cpu >= NR_CPUS)
795 } while (!cpu_online(goal_cpu));
800 /* Called from request_irq. */
801 static void distribute_irqs(void)
806 spin_lock_irqsave(&irq_action_lock, flags);
809 for (level = 1; level < NR_IRQS; level++) {
810 struct irqaction *p = irq_action[level];
813 cpu = retarget_one_irq(p, cpu);
817 spin_unlock_irqrestore(&irq_action_lock, flags);
828 static struct sun5_timer *prom_timers;
829 static u64 prom_limit0, prom_limit1;
831 static void map_prom_timers(void)
833 unsigned int addr[3];
836 /* PROM timer node hangs out in the top level of device siblings... */
837 tnode = prom_finddevice("/counter-timer");
839 /* Assume if node is not present, PROM uses different tick mechanism
840 * which we should not care about.
842 if (tnode == 0 || tnode == -1) {
843 prom_timers = (struct sun5_timer *) 0;
847 /* If PROM is really using this, it must be mapped by him. */
848 err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr));
850 prom_printf("PROM does not have timer mapped, trying to continue.\n");
851 prom_timers = (struct sun5_timer *) 0;
854 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
857 static void kill_prom_timer(void)
862 /* Save them away for later. */
863 prom_limit0 = prom_timers->limit0;
864 prom_limit1 = prom_timers->limit1;
866 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
867 * We turn both off here just to be paranoid.
869 prom_timers->limit0 = 0;
870 prom_timers->limit1 = 0;
872 /* Wheee, eat the interrupt packet too... */
873 __asm__ __volatile__(
875 " ldxa [%%g0] %0, %%g1\n"
876 " ldxa [%%g2] %1, %%g1\n"
877 " stxa %%g0, [%%g0] %0\n"
880 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
884 void init_irqwork_curcpu(void)
886 int cpu = hard_smp_processor_id();
888 trap_block[cpu].irq_worklist = 0;
891 static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type)
893 unsigned long num_entries = 128;
894 unsigned long status;
896 status = sun4v_cpu_qconf(type, paddr, num_entries);
897 if (status != HV_EOK) {
898 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
899 "err %lu\n", type, paddr, num_entries, status);
904 static void __cpuinit sun4v_register_mondo_queues(int this_cpu)
906 struct trap_per_cpu *tb = &trap_block[this_cpu];
908 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO);
909 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO);
910 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR);
911 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR);
914 static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, int use_bootmem)
919 page = alloc_bootmem_low_pages(PAGE_SIZE);
921 page = (void *) get_zeroed_page(GFP_ATOMIC);
924 prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
928 *pa_ptr = __pa(page);
931 static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, int use_bootmem)
936 page = alloc_bootmem_low_pages(PAGE_SIZE);
938 page = (void *) get_zeroed_page(GFP_ATOMIC);
941 prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
945 *pa_ptr = __pa(page);
948 static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem)
953 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
956 page = alloc_bootmem_low_pages(PAGE_SIZE);
958 page = (void *) get_zeroed_page(GFP_ATOMIC);
961 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
965 tb->cpu_mondo_block_pa = __pa(page);
966 tb->cpu_list_pa = __pa(page + 64);
970 /* Allocate and register the mondo and error queues for this cpu. */
971 void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load)
973 struct trap_per_cpu *tb = &trap_block[cpu];
976 alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem);
977 alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem);
978 alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem);
979 alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem);
980 alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem);
981 alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem);
983 init_cpu_send_mondo_info(tb, use_bootmem);
987 if (cpu != hard_smp_processor_id()) {
988 prom_printf("SUN4V: init mondo on cpu %d not %d\n",
989 cpu, hard_smp_processor_id());
992 sun4v_register_mondo_queues(cpu);
996 /* Only invoked on boot processor. */
997 void __init init_IRQ(void)
1001 memset(&ivector_table[0], 0, sizeof(ivector_table));
1003 if (tlb_type == hypervisor)
1004 sun4v_init_mondo_queues(1, hard_smp_processor_id(), 1, 1);
1006 /* We need to clear any IRQ's pending in the soft interrupt
1007 * registers, a spurious one could be left around from the
1008 * PROM timer which we just disabled.
1010 clear_softint(get_softint());
1012 /* Now that ivector table is initialized, it is safe
1013 * to receive IRQ vector traps. We will normally take
1014 * one or two right now, in case some device PROM used
1015 * to boot us wants to speak to us. We just ignore them.
1017 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
1018 "or %%g1, %0, %%g1\n\t"
1019 "wrpr %%g1, 0x0, %%pstate"
1025 static struct proc_dir_entry * root_irq_dir;
1026 static struct proc_dir_entry * irq_dir [NUM_IVECS];
1030 static int irq_affinity_read_proc (char *page, char **start, off_t off,
1031 int count, int *eof, void *data)
1033 struct ino_bucket *bp = ivector_table + (long)data;
1034 struct irq_desc *desc = bp->irq_info;
1035 struct irqaction *ap = desc->action;
1039 mask = get_smpaff_in_irqaction(ap);
1040 if (cpus_empty(mask))
1041 mask = cpu_online_map;
1043 len = cpumask_scnprintf(page, count, mask);
1044 if (count - len < 2)
1046 len += sprintf(page + len, "\n");
1050 static inline void set_intr_affinity(int irq, cpumask_t hw_aff)
1052 struct ino_bucket *bp = ivector_table + irq;
1053 struct irq_desc *desc = bp->irq_info;
1054 struct irqaction *ap = desc->action;
1056 /* Users specify affinity in terms of hw cpu ids.
1057 * As soon as we do this, handler_irq() might see and take action.
1059 put_smpaff_in_irqaction(ap, hw_aff);
1061 /* Migration is simply done by the next cpu to service this
1066 static int irq_affinity_write_proc (struct file *file, const char __user *buffer,
1067 unsigned long count, void *data)
1069 int irq = (long) data, full_count = count, err;
1070 cpumask_t new_value;
1072 err = cpumask_parse(buffer, count, new_value);
1075 * Do not allow disabling IRQs completely - it's a too easy
1076 * way to make the system unusable accidentally :-) At least
1077 * one online CPU still has to be targeted.
1079 cpus_and(new_value, new_value, cpu_online_map);
1080 if (cpus_empty(new_value))
1083 set_intr_affinity(irq, new_value);
1090 #define MAX_NAMELEN 10
1092 static void register_irq_proc (unsigned int irq)
1094 char name [MAX_NAMELEN];
1096 if (!root_irq_dir || irq_dir[irq])
1099 memset(name, 0, MAX_NAMELEN);
1100 sprintf(name, "%x", irq);
1102 /* create /proc/irq/1234 */
1103 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
1106 /* XXX SMP affinity not supported on starfire yet. */
1107 if (this_is_starfire == 0) {
1108 struct proc_dir_entry *entry;
1110 /* create /proc/irq/1234/smp_affinity */
1111 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
1115 entry->data = (void *)(long)irq;
1116 entry->read_proc = irq_affinity_read_proc;
1117 entry->write_proc = irq_affinity_write_proc;
1123 void init_irq_proc (void)
1125 /* create /proc/irq */
1126 root_irq_dir = proc_mkdir("irq", NULL);