2 * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/linkage.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/smp.h>
25 #include <linux/slab.h>
26 #include <linux/kernel_stat.h>
28 #include <asm/errno.h>
29 #include <asm/signal.h>
30 #include <asm/system.h>
34 #include <asm/sibyte/sb1250_regs.h>
35 #include <asm/sibyte/sb1250_int.h>
36 #include <asm/sibyte/sb1250_uart.h>
37 #include <asm/sibyte/sb1250_scd.h>
38 #include <asm/sibyte/sb1250.h>
41 * These are the routines that handle all the low level interrupt stuff.
42 * Actions handled here are: initialization of the interrupt map, requesting of
43 * interrupt lines by handlers, dispatching if interrupts to handlers, probing
48 static void end_sb1250_irq(unsigned int irq);
49 static void enable_sb1250_irq(unsigned int irq);
50 static void disable_sb1250_irq(unsigned int irq);
51 static void ack_sb1250_irq(unsigned int irq);
53 static int sb1250_set_affinity(unsigned int irq, const struct cpumask *mask);
56 #ifdef CONFIG_SIBYTE_HAS_LDT
57 extern unsigned long ldt_eoi_space;
60 static struct irq_chip sb1250_irq_type = {
62 .ack = ack_sb1250_irq,
63 .mask = disable_sb1250_irq,
64 .mask_ack = ack_sb1250_irq,
65 .unmask = enable_sb1250_irq,
66 .end = end_sb1250_irq,
68 .set_affinity = sb1250_set_affinity
72 /* Store the CPU id (not the logical number) */
73 int sb1250_irq_owner[SB1250_NR_IRQS];
75 DEFINE_SPINLOCK(sb1250_imr_lock);
77 void sb1250_mask_irq(int cpu, int irq)
82 spin_lock_irqsave(&sb1250_imr_lock, flags);
83 cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
84 R_IMR_INTERRUPT_MASK));
85 cur_ints |= (((u64) 1) << irq);
86 ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
87 R_IMR_INTERRUPT_MASK));
88 spin_unlock_irqrestore(&sb1250_imr_lock, flags);
91 void sb1250_unmask_irq(int cpu, int irq)
96 spin_lock_irqsave(&sb1250_imr_lock, flags);
97 cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
98 R_IMR_INTERRUPT_MASK));
99 cur_ints &= ~(((u64) 1) << irq);
100 ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
101 R_IMR_INTERRUPT_MASK));
102 spin_unlock_irqrestore(&sb1250_imr_lock, flags);
106 static int sb1250_set_affinity(unsigned int irq, const struct cpumask *mask)
108 int i = 0, old_cpu, cpu, int_on;
110 struct irq_desc *desc = irq_desc + irq;
113 i = cpumask_first(mask);
115 if (cpumask_weight(mask) > 1) {
116 printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
120 /* Convert logical CPU to physical CPU */
121 cpu = cpu_logical_map(i);
123 /* Protect against other affinity changers and IMR manipulation */
124 spin_lock_irqsave(&desc->lock, flags);
125 spin_lock(&sb1250_imr_lock);
127 /* Swizzle each CPU's IMR (but leave the IP selection alone) */
128 old_cpu = sb1250_irq_owner[irq];
129 cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(old_cpu) +
130 R_IMR_INTERRUPT_MASK));
131 int_on = !(cur_ints & (((u64) 1) << irq));
133 /* If it was on, mask it */
134 cur_ints |= (((u64) 1) << irq);
135 ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(old_cpu) +
136 R_IMR_INTERRUPT_MASK));
138 sb1250_irq_owner[irq] = cpu;
140 /* unmask for the new CPU */
141 cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
142 R_IMR_INTERRUPT_MASK));
143 cur_ints &= ~(((u64) 1) << irq);
144 ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
145 R_IMR_INTERRUPT_MASK));
147 spin_unlock(&sb1250_imr_lock);
148 spin_unlock_irqrestore(&desc->lock, flags);
154 /*****************************************************************************/
156 static void disable_sb1250_irq(unsigned int irq)
158 sb1250_mask_irq(sb1250_irq_owner[irq], irq);
161 static void enable_sb1250_irq(unsigned int irq)
163 sb1250_unmask_irq(sb1250_irq_owner[irq], irq);
167 static void ack_sb1250_irq(unsigned int irq)
169 #ifdef CONFIG_SIBYTE_HAS_LDT
173 * If the interrupt was an HT interrupt, now is the time to
174 * clear it. NOTE: we assume the HT bridge was set up to
175 * deliver the interrupts to all CPUs (which makes affinity
176 * changing easier for us)
178 pending = __raw_readq(IOADDR(A_IMR_REGISTER(sb1250_irq_owner[irq],
179 R_IMR_LDT_INTERRUPT)));
180 pending &= ((u64)1 << (irq));
183 for (i=0; i<NR_CPUS; i++) {
186 cpu = cpu_logical_map(i);
191 * Clear for all CPUs so an affinity switch
192 * doesn't find an old status
194 __raw_writeq(pending,
195 IOADDR(A_IMR_REGISTER(cpu,
196 R_IMR_LDT_INTERRUPT_CLR)));
200 * Generate EOI. For Pass 1 parts, EOI is a nop. For
201 * Pass 2, the LDT world may be edge-triggered, but
202 * this EOI shouldn't hurt. If they are
203 * level-sensitive, the EOI is required.
205 *(uint32_t *)(ldt_eoi_space+(irq<<16)+(7<<2)) = 0;
208 sb1250_mask_irq(sb1250_irq_owner[irq], irq);
212 static void end_sb1250_irq(unsigned int irq)
214 if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
215 sb1250_unmask_irq(sb1250_irq_owner[irq], irq);
220 void __init init_sb1250_irqs(void)
224 for (i = 0; i < SB1250_NR_IRQS; i++) {
225 set_irq_chip_and_handler(i, &sb1250_irq_type, handle_level_irq);
226 sb1250_irq_owner[i] = 0;
232 * arch_init_irq is called early in the boot sequence from init/main.c via
233 * init_IRQ. It is responsible for setting up the interrupt mapper and
234 * installing the handler that will be responsible for dispatching interrupts
235 * to the "right" place.
238 * For now, map all interrupts to IP[2]. We could save
239 * some cycles by parceling out system interrupts to different
240 * IP lines, but keep it simple for bringup. We'll also direct
241 * all interrupts to a single CPU; we should probably route
242 * PCI and LDT to one cpu and everything else to the other
243 * to balance the load a bit.
245 * On the second cpu, everything is set to IP5, which is
246 * ignored, EXCEPT the mailbox interrupt. That one is
247 * set to IP[2] so it is handled. This is needed so we
248 * can do cross-cpu function calls, as requred by SMP
251 #define IMR_IP2_VAL K_INT_MAP_I0
252 #define IMR_IP3_VAL K_INT_MAP_I1
253 #define IMR_IP4_VAL K_INT_MAP_I2
254 #define IMR_IP5_VAL K_INT_MAP_I3
255 #define IMR_IP6_VAL K_INT_MAP_I4
257 void __init arch_init_irq(void)
262 unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
263 STATUSF_IP1 | STATUSF_IP0;
265 /* Default everything to IP2 */
266 for (i = 0; i < SB1250_NR_IRQS; i++) { /* was I0 */
267 __raw_writeq(IMR_IP2_VAL,
268 IOADDR(A_IMR_REGISTER(0,
269 R_IMR_INTERRUPT_MAP_BASE) +
271 __raw_writeq(IMR_IP2_VAL,
272 IOADDR(A_IMR_REGISTER(1,
273 R_IMR_INTERRUPT_MAP_BASE) +
280 * Map the high 16 bits of the mailbox registers to IP[3], for
284 __raw_writeq(IMR_IP3_VAL,
285 IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MAP_BASE) +
286 (K_INT_MBOX_0 << 3)));
287 __raw_writeq(IMR_IP3_VAL,
288 IOADDR(A_IMR_REGISTER(1, R_IMR_INTERRUPT_MAP_BASE) +
289 (K_INT_MBOX_0 << 3)));
291 /* Clear the mailboxes. The firmware may leave them dirty */
292 __raw_writeq(0xffffffffffffffffULL,
293 IOADDR(A_IMR_REGISTER(0, R_IMR_MAILBOX_CLR_CPU)));
294 __raw_writeq(0xffffffffffffffffULL,
295 IOADDR(A_IMR_REGISTER(1, R_IMR_MAILBOX_CLR_CPU)));
297 /* Mask everything except the mailbox registers for both cpus */
298 tmp = ~((u64) 0) ^ (((u64) 1) << K_INT_MBOX_0);
299 __raw_writeq(tmp, IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MASK)));
300 __raw_writeq(tmp, IOADDR(A_IMR_REGISTER(1, R_IMR_INTERRUPT_MASK)));
303 * Note that the timer interrupts are also mapped, but this is
304 * done in sb1250_time_init(). Also, the profiling driver
305 * does its own management of IP7.
308 /* Enable necessary IPs, disable the rest */
309 change_c0_status(ST0_IM, imask);
312 extern void sb1250_mailbox_interrupt(void);
314 static inline void dispatch_ip2(void)
316 unsigned int cpu = smp_processor_id();
317 unsigned long long mask;
320 * Default...we've hit an IP[2] interrupt, which means we've got to
321 * check the 1250 interrupt registers to figure out what to do. Need
322 * to detect which CPU we're on, now that smp_affinity is supported.
324 mask = __raw_readq(IOADDR(A_IMR_REGISTER(cpu,
325 R_IMR_INTERRUPT_STATUS_BASE)));
327 do_IRQ(fls64(mask) - 1);
330 asmlinkage void plat_irq_dispatch(void)
332 unsigned int cpu = smp_processor_id();
333 unsigned int pending;
336 * What a pain. We have to be really careful saving the upper 32 bits
337 * of any * register across function calls if we don't want them
338 * trashed--since were running in -o32, the calling routing never saves
339 * the full 64 bits of a register across a function call. Being the
340 * interrupt handler, we're guaranteed that interrupts are disabled
341 * during this code so we don't have to worry about random interrupts
342 * blasting the high 32 bits.
345 pending = read_c0_cause() & read_c0_status() & ST0_IM;
347 if (pending & CAUSEF_IP7) /* CPU performance counter interrupt */
348 do_IRQ(MIPS_CPU_IRQ_BASE + 7);
349 else if (pending & CAUSEF_IP4)
350 do_IRQ(K_INT_TIMER_0 + cpu); /* sb1250_timer_interrupt() */
353 else if (pending & CAUSEF_IP3)
354 sb1250_mailbox_interrupt();
357 else if (pending & CAUSEF_IP2)
360 spurious_interrupt();