1460d0836dc0fffc4594da6b93e8a73f58aad58e
[safe/jmp/linux-2.6] / arch / mips / cavium-octeon / octeon-irq.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2004-2008 Cavium Networks
7  */
8 #include <linux/irq.h>
9 #include <linux/interrupt.h>
10 #include <linux/smp.h>
11
12 #include <asm/octeon/octeon.h>
13 #include <asm/octeon/cvmx-pexp-defs.h>
14 #include <asm/octeon/cvmx-npi-defs.h>
15
16 DEFINE_RWLOCK(octeon_irq_ciu0_rwlock);
17 DEFINE_RWLOCK(octeon_irq_ciu1_rwlock);
18 DEFINE_SPINLOCK(octeon_irq_msi_lock);
19
20 static int octeon_coreid_for_cpu(int cpu)
21 {
22 #ifdef CONFIG_SMP
23         return cpu_logical_map(cpu);
24 #else
25         return cvmx_get_core_num();
26 #endif
27 }
28
29 static void octeon_irq_core_ack(unsigned int irq)
30 {
31         unsigned int bit = irq - OCTEON_IRQ_SW0;
32         /*
33          * We don't need to disable IRQs to make these atomic since
34          * they are already disabled earlier in the low level
35          * interrupt code.
36          */
37         clear_c0_status(0x100 << bit);
38         /* The two user interrupts must be cleared manually. */
39         if (bit < 2)
40                 clear_c0_cause(0x100 << bit);
41 }
42
43 static void octeon_irq_core_eoi(unsigned int irq)
44 {
45         struct irq_desc *desc = irq_desc + irq;
46         unsigned int bit = irq - OCTEON_IRQ_SW0;
47         /*
48          * If an IRQ is being processed while we are disabling it the
49          * handler will attempt to unmask the interrupt after it has
50          * been disabled.
51          */
52         if (desc->status & IRQ_DISABLED)
53                 return;
54
55         /* There is a race here.  We should fix it.  */
56
57         /*
58          * We don't need to disable IRQs to make these atomic since
59          * they are already disabled earlier in the low level
60          * interrupt code.
61          */
62         set_c0_status(0x100 << bit);
63 }
64
65 static void octeon_irq_core_enable(unsigned int irq)
66 {
67         unsigned long flags;
68         unsigned int bit = irq - OCTEON_IRQ_SW0;
69
70         /*
71          * We need to disable interrupts to make sure our updates are
72          * atomic.
73          */
74         local_irq_save(flags);
75         set_c0_status(0x100 << bit);
76         local_irq_restore(flags);
77 }
78
79 static void octeon_irq_core_disable_local(unsigned int irq)
80 {
81         unsigned long flags;
82         unsigned int bit = irq - OCTEON_IRQ_SW0;
83         /*
84          * We need to disable interrupts to make sure our updates are
85          * atomic.
86          */
87         local_irq_save(flags);
88         clear_c0_status(0x100 << bit);
89         local_irq_restore(flags);
90 }
91
92 static void octeon_irq_core_disable(unsigned int irq)
93 {
94 #ifdef CONFIG_SMP
95         on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
96                     (void *) (long) irq, 1);
97 #else
98         octeon_irq_core_disable_local(irq);
99 #endif
100 }
101
102 static struct irq_chip octeon_irq_chip_core = {
103         .name = "Core",
104         .enable = octeon_irq_core_enable,
105         .disable = octeon_irq_core_disable,
106         .ack = octeon_irq_core_ack,
107         .eoi = octeon_irq_core_eoi,
108 };
109
110
111 static void octeon_irq_ciu0_ack(unsigned int irq)
112 {
113         /*
114          * In order to avoid any locking accessing the CIU, we
115          * acknowledge CIU interrupts by disabling all of them.  This
116          * way we can use a per core register and avoid any out of
117          * core locking requirements.  This has the side affect that
118          * CIU interrupts can't be processed recursively.
119          *
120          * We don't need to disable IRQs to make these atomic since
121          * they are already disabled earlier in the low level
122          * interrupt code.
123          */
124         clear_c0_status(0x100 << 2);
125 }
126
127 static void octeon_irq_ciu0_eoi(unsigned int irq)
128 {
129         /*
130          * Enable all CIU interrupts again.  We don't need to disable
131          * IRQs to make these atomic since they are already disabled
132          * earlier in the low level interrupt code.
133          */
134         set_c0_status(0x100 << 2);
135 }
136
137 static void octeon_irq_ciu0_enable(unsigned int irq)
138 {
139         int coreid = cvmx_get_core_num();
140         unsigned long flags;
141         uint64_t en0;
142         int bit = irq - OCTEON_IRQ_WORKQ0;      /* Bit 0-63 of EN0 */
143
144         /*
145          * A read lock is used here to make sure only one core is ever
146          * updating the CIU enable bits at a time. During an enable
147          * the cores don't interfere with each other. During a disable
148          * the write lock stops any enables that might cause a
149          * problem.
150          */
151         read_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
152         en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
153         en0 |= 1ull << bit;
154         cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
155         cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
156         read_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
157 }
158
159 static void octeon_irq_ciu0_disable(unsigned int irq)
160 {
161         int bit = irq - OCTEON_IRQ_WORKQ0;      /* Bit 0-63 of EN0 */
162         unsigned long flags;
163         uint64_t en0;
164         int cpu;
165         write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
166         for_each_online_cpu(cpu) {
167                 int coreid = octeon_coreid_for_cpu(cpu);
168                 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
169                 en0 &= ~(1ull << bit);
170                 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
171         }
172         /*
173          * We need to do a read after the last update to make sure all
174          * of them are done.
175          */
176         cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
177         write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
178 }
179
180 /*
181  * Enable the irq on the current core for chips that have the EN*_W1{S,C}
182  * registers.
183  */
184 static void octeon_irq_ciu0_enable_v2(unsigned int irq)
185 {
186         int index = cvmx_get_core_num() * 2;
187         u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
188
189         cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
190 }
191
192 /*
193  * Disable the irq on the current core for chips that have the EN*_W1{S,C}
194  * registers.
195  */
196 static void octeon_irq_ciu0_ack_v2(unsigned int irq)
197 {
198         int index = cvmx_get_core_num() * 2;
199         u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
200
201         cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
202 }
203
204 /*
205  * Enable the irq on the current core for chips that have the EN*_W1{S,C}
206  * registers.
207  */
208 static void octeon_irq_ciu0_eoi_v2(unsigned int irq)
209 {
210         struct irq_desc *desc = irq_desc + irq;
211         int index = cvmx_get_core_num() * 2;
212         u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
213
214         if ((desc->status & IRQ_DISABLED) == 0)
215                 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
216 }
217
218 /*
219  * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
220  * registers.
221  */
222 static void octeon_irq_ciu0_disable_all_v2(unsigned int irq)
223 {
224         u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
225         int index;
226         int cpu;
227         for_each_online_cpu(cpu) {
228                 index = octeon_coreid_for_cpu(cpu) * 2;
229                 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
230         }
231 }
232
233 #ifdef CONFIG_SMP
234 static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
235 {
236         int cpu;
237         unsigned long flags;
238         int bit = irq - OCTEON_IRQ_WORKQ0;      /* Bit 0-63 of EN0 */
239
240         write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
241         for_each_online_cpu(cpu) {
242                 int coreid = octeon_coreid_for_cpu(cpu);
243                 uint64_t en0 =
244                         cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
245                 if (cpumask_test_cpu(cpu, dest))
246                         en0 |= 1ull << bit;
247                 else
248                         en0 &= ~(1ull << bit);
249                 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
250         }
251         /*
252          * We need to do a read after the last update to make sure all
253          * of them are done.
254          */
255         cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
256         write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
257
258         return 0;
259 }
260
261 /*
262  * Set affinity for the irq for chips that have the EN*_W1{S,C}
263  * registers.
264  */
265 static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
266                                            const struct cpumask *dest)
267 {
268         int cpu;
269         int index;
270         u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
271         for_each_online_cpu(cpu) {
272                 index = octeon_coreid_for_cpu(cpu) * 2;
273                 if (cpumask_test_cpu(cpu, dest))
274                         cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
275                 else
276                         cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
277         }
278         return 0;
279 }
280 #endif
281
282 /*
283  * Newer octeon chips have support for lockless CIU operation.
284  */
285 static struct irq_chip octeon_irq_chip_ciu0_v2 = {
286         .name = "CIU0",
287         .enable = octeon_irq_ciu0_enable_v2,
288         .disable = octeon_irq_ciu0_disable_all_v2,
289         .ack = octeon_irq_ciu0_ack_v2,
290         .eoi = octeon_irq_ciu0_eoi_v2,
291 #ifdef CONFIG_SMP
292         .set_affinity = octeon_irq_ciu0_set_affinity_v2,
293 #endif
294 };
295
296 static struct irq_chip octeon_irq_chip_ciu0 = {
297         .name = "CIU0",
298         .enable = octeon_irq_ciu0_enable,
299         .disable = octeon_irq_ciu0_disable,
300         .ack = octeon_irq_ciu0_ack,
301         .eoi = octeon_irq_ciu0_eoi,
302 #ifdef CONFIG_SMP
303         .set_affinity = octeon_irq_ciu0_set_affinity,
304 #endif
305 };
306
307
308 static void octeon_irq_ciu1_ack(unsigned int irq)
309 {
310         /*
311          * In order to avoid any locking accessing the CIU, we
312          * acknowledge CIU interrupts by disabling all of them.  This
313          * way we can use a per core register and avoid any out of
314          * core locking requirements.  This has the side affect that
315          * CIU interrupts can't be processed recursively.  We don't
316          * need to disable IRQs to make these atomic since they are
317          * already disabled earlier in the low level interrupt code.
318          */
319         clear_c0_status(0x100 << 3);
320 }
321
322 static void octeon_irq_ciu1_eoi(unsigned int irq)
323 {
324         /*
325          * Enable all CIU interrupts again.  We don't need to disable
326          * IRQs to make these atomic since they are already disabled
327          * earlier in the low level interrupt code.
328          */
329         set_c0_status(0x100 << 3);
330 }
331
332 static void octeon_irq_ciu1_enable(unsigned int irq)
333 {
334         int coreid = cvmx_get_core_num();
335         unsigned long flags;
336         uint64_t en1;
337         int bit = irq - OCTEON_IRQ_WDOG0;       /* Bit 0-63 of EN1 */
338
339         /*
340          * A read lock is used here to make sure only one core is ever
341          * updating the CIU enable bits at a time.  During an enable
342          * the cores don't interfere with each other.  During a disable
343          * the write lock stops any enables that might cause a
344          * problem.
345          */
346         read_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
347         en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
348         en1 |= 1ull << bit;
349         cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
350         cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
351         read_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
352 }
353
354 static void octeon_irq_ciu1_disable(unsigned int irq)
355 {
356         int bit = irq - OCTEON_IRQ_WDOG0;       /* Bit 0-63 of EN1 */
357         unsigned long flags;
358         uint64_t en1;
359         int cpu;
360         write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
361         for_each_online_cpu(cpu) {
362                 int coreid = octeon_coreid_for_cpu(cpu);
363                 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
364                 en1 &= ~(1ull << bit);
365                 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
366         }
367         /*
368          * We need to do a read after the last update to make sure all
369          * of them are done.
370          */
371         cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
372         write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
373 }
374
375 /*
376  * Enable the irq on the current core for chips that have the EN*_W1{S,C}
377  * registers.
378  */
379 static void octeon_irq_ciu1_enable_v2(unsigned int irq)
380 {
381         int index = cvmx_get_core_num() * 2 + 1;
382         u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
383
384         cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
385 }
386
387 /*
388  * Disable the irq on the current core for chips that have the EN*_W1{S,C}
389  * registers.
390  */
391 static void octeon_irq_ciu1_ack_v2(unsigned int irq)
392 {
393         int index = cvmx_get_core_num() * 2 + 1;
394         u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
395
396         cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
397 }
398
399 /*
400  * Enable the irq on the current core for chips that have the EN*_W1{S,C}
401  * registers.
402  */
403 static void octeon_irq_ciu1_eoi_v2(unsigned int irq)
404 {
405         struct irq_desc *desc = irq_desc + irq;
406         int index = cvmx_get_core_num() * 2 + 1;
407         u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
408
409         if ((desc->status & IRQ_DISABLED) == 0)
410                 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
411 }
412
413 /*
414  * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
415  * registers.
416  */
417 static void octeon_irq_ciu1_disable_all_v2(unsigned int irq)
418 {
419         u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
420         int index;
421         int cpu;
422         for_each_online_cpu(cpu) {
423                 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
424                 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
425         }
426 }
427
428 #ifdef CONFIG_SMP
429 static int octeon_irq_ciu1_set_affinity(unsigned int irq,
430                                         const struct cpumask *dest)
431 {
432         int cpu;
433         unsigned long flags;
434         int bit = irq - OCTEON_IRQ_WDOG0;       /* Bit 0-63 of EN1 */
435
436         write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
437         for_each_online_cpu(cpu) {
438                 int coreid = octeon_coreid_for_cpu(cpu);
439                 uint64_t en1 =
440                         cvmx_read_csr(CVMX_CIU_INTX_EN1
441                                 (coreid * 2 + 1));
442                 if (cpumask_test_cpu(cpu, dest))
443                         en1 |= 1ull << bit;
444                 else
445                         en1 &= ~(1ull << bit);
446                 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
447         }
448         /*
449          * We need to do a read after the last update to make sure all
450          * of them are done.
451          */
452         cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
453         write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
454
455         return 0;
456 }
457
458 /*
459  * Set affinity for the irq for chips that have the EN*_W1{S,C}
460  * registers.
461  */
462 static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
463                                            const struct cpumask *dest)
464 {
465         int cpu;
466         int index;
467         u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
468         for_each_online_cpu(cpu) {
469                 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
470                 if (cpumask_test_cpu(cpu, dest))
471                         cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
472                 else
473                         cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
474         }
475         return 0;
476 }
477 #endif
478
479 /*
480  * Newer octeon chips have support for lockless CIU operation.
481  */
482 static struct irq_chip octeon_irq_chip_ciu1_v2 = {
483         .name = "CIU0",
484         .enable = octeon_irq_ciu1_enable_v2,
485         .disable = octeon_irq_ciu1_disable_all_v2,
486         .ack = octeon_irq_ciu1_ack_v2,
487         .eoi = octeon_irq_ciu1_eoi_v2,
488 #ifdef CONFIG_SMP
489         .set_affinity = octeon_irq_ciu1_set_affinity_v2,
490 #endif
491 };
492
493 static struct irq_chip octeon_irq_chip_ciu1 = {
494         .name = "CIU1",
495         .enable = octeon_irq_ciu1_enable,
496         .disable = octeon_irq_ciu1_disable,
497         .ack = octeon_irq_ciu1_ack,
498         .eoi = octeon_irq_ciu1_eoi,
499 #ifdef CONFIG_SMP
500         .set_affinity = octeon_irq_ciu1_set_affinity,
501 #endif
502 };
503
504 #ifdef CONFIG_PCI_MSI
505
506 static void octeon_irq_msi_ack(unsigned int irq)
507 {
508         if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
509                 /* These chips have PCI */
510                 cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV,
511                                1ull << (irq - OCTEON_IRQ_MSI_BIT0));
512         } else {
513                 /*
514                  * These chips have PCIe. Thankfully the ACK doesn't
515                  * need any locking.
516                  */
517                 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0,
518                                1ull << (irq - OCTEON_IRQ_MSI_BIT0));
519         }
520 }
521
522 static void octeon_irq_msi_eoi(unsigned int irq)
523 {
524         /* Nothing needed */
525 }
526
527 static void octeon_irq_msi_enable(unsigned int irq)
528 {
529         if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
530                 /*
531                  * Octeon PCI doesn't have the ability to mask/unmask
532                  * MSI interrupts individually.  Instead of
533                  * masking/unmasking them in groups of 16, we simple
534                  * assume MSI devices are well behaved.  MSI
535                  * interrupts are always enable and the ACK is assumed
536                  * to be enough.
537                  */
538         } else {
539                 /* These chips have PCIe.  Note that we only support
540                  * the first 64 MSI interrupts.  Unfortunately all the
541                  * MSI enables are in the same register.  We use
542                  * MSI0's lock to control access to them all.
543                  */
544                 uint64_t en;
545                 unsigned long flags;
546                 spin_lock_irqsave(&octeon_irq_msi_lock, flags);
547                 en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
548                 en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0);
549                 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
550                 cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
551                 spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
552         }
553 }
554
555 static void octeon_irq_msi_disable(unsigned int irq)
556 {
557         if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
558                 /* See comment in enable */
559         } else {
560                 /*
561                  * These chips have PCIe.  Note that we only support
562                  * the first 64 MSI interrupts.  Unfortunately all the
563                  * MSI enables are in the same register.  We use
564                  * MSI0's lock to control access to them all.
565                  */
566                 uint64_t en;
567                 unsigned long flags;
568                 spin_lock_irqsave(&octeon_irq_msi_lock, flags);
569                 en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
570                 en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0));
571                 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
572                 cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
573                 spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
574         }
575 }
576
577 static struct irq_chip octeon_irq_chip_msi = {
578         .name = "MSI",
579         .enable = octeon_irq_msi_enable,
580         .disable = octeon_irq_msi_disable,
581         .ack = octeon_irq_msi_ack,
582         .eoi = octeon_irq_msi_eoi,
583 };
584 #endif
585
586 void __init arch_init_irq(void)
587 {
588         int irq;
589         struct irq_chip *chip0;
590         struct irq_chip *chip1;
591
592 #ifdef CONFIG_SMP
593         /* Set the default affinity to the boot cpu. */
594         cpumask_clear(irq_default_affinity);
595         cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
596 #endif
597
598         if (NR_IRQS < OCTEON_IRQ_LAST)
599                 pr_err("octeon_irq_init: NR_IRQS is set too low\n");
600
601         if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
602             OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
603             OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
604                 chip0 = &octeon_irq_chip_ciu0_v2;
605                 chip1 = &octeon_irq_chip_ciu1_v2;
606         } else {
607                 chip0 = &octeon_irq_chip_ciu0;
608                 chip1 = &octeon_irq_chip_ciu1;
609         }
610
611         /* 0 - 15 reserved for i8259 master and slave controller. */
612
613         /* 17 - 23 Mips internal */
614         for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
615                 set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
616                                          handle_percpu_irq);
617         }
618
619         /* 24 - 87 CIU_INT_SUM0 */
620         for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
621                 set_irq_chip_and_handler(irq, chip0, handle_percpu_irq);
622         }
623
624         /* 88 - 151 CIU_INT_SUM1 */
625         for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
626                 set_irq_chip_and_handler(irq, chip1, handle_percpu_irq);
627         }
628
629 #ifdef CONFIG_PCI_MSI
630         /* 152 - 215 PCI/PCIe MSI interrupts */
631         for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) {
632                 set_irq_chip_and_handler(irq, &octeon_irq_chip_msi,
633                                          handle_percpu_irq);
634         }
635 #endif
636         set_c0_status(0x300 << 2);
637 }
638
639 asmlinkage void plat_irq_dispatch(void)
640 {
641         const unsigned long core_id = cvmx_get_core_num();
642         const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
643         const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
644         const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
645         const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
646         unsigned long cop0_cause;
647         unsigned long cop0_status;
648         uint64_t ciu_en;
649         uint64_t ciu_sum;
650
651         while (1) {
652                 cop0_cause = read_c0_cause();
653                 cop0_status = read_c0_status();
654                 cop0_cause &= cop0_status;
655                 cop0_cause &= ST0_IM;
656
657                 if (unlikely(cop0_cause & STATUSF_IP2)) {
658                         ciu_sum = cvmx_read_csr(ciu_sum0_address);
659                         ciu_en = cvmx_read_csr(ciu_en0_address);
660                         ciu_sum &= ciu_en;
661                         if (likely(ciu_sum))
662                                 do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1);
663                         else
664                                 spurious_interrupt();
665                 } else if (unlikely(cop0_cause & STATUSF_IP3)) {
666                         ciu_sum = cvmx_read_csr(ciu_sum1_address);
667                         ciu_en = cvmx_read_csr(ciu_en1_address);
668                         ciu_sum &= ciu_en;
669                         if (likely(ciu_sum))
670                                 do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1);
671                         else
672                                 spurious_interrupt();
673                 } else if (likely(cop0_cause)) {
674                         do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
675                 } else {
676                         break;
677                 }
678         }
679 }
680
681 #ifdef CONFIG_HOTPLUG_CPU
682 static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu)
683 {
684         unsigned int isset;
685         int coreid = octeon_coreid_for_cpu(cpu);
686         int bit = (irq < OCTEON_IRQ_WDOG0) ?
687                    irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0;
688        if (irq < 64) {
689                 isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) &
690                         (1ull << bit)) >> bit;
691        } else {
692                isset = (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)) &
693                         (1ull << bit)) >> bit;
694        }
695        return isset;
696 }
697
698 void fixup_irqs(void)
699 {
700        int irq;
701
702         for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
703                 octeon_irq_core_disable_local(irq);
704
705         for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_GPIO15; irq++) {
706                 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
707                         /* ciu irq migrates to next cpu */
708                         octeon_irq_chip_ciu0.disable(irq);
709                         octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
710                 }
711         }
712
713 #if 0
714         for (irq = OCTEON_IRQ_MBOX0; irq <= OCTEON_IRQ_MBOX1; irq++)
715                 octeon_irq_mailbox_mask(irq);
716 #endif
717         for (irq = OCTEON_IRQ_UART0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
718                 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
719                         /* ciu irq migrates to next cpu */
720                         octeon_irq_chip_ciu0.disable(irq);
721                         octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
722                 }
723         }
724
725         for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED135; irq++) {
726                 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
727                         /* ciu irq migrates to next cpu */
728                         octeon_irq_chip_ciu1.disable(irq);
729                         octeon_irq_ciu1_set_affinity(irq, &cpu_online_map);
730                 }
731         }
732 }
733
734 #endif /* CONFIG_HOTPLUG_CPU */