2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2004-2008 Cavium Networks
9 #include <linux/interrupt.h>
10 #include <linux/smp.h>
12 #include <asm/octeon/octeon.h>
13 #include <asm/octeon/cvmx-pexp-defs.h>
14 #include <asm/octeon/cvmx-npi-defs.h>
16 DEFINE_RWLOCK(octeon_irq_ciu0_rwlock);
17 DEFINE_RWLOCK(octeon_irq_ciu1_rwlock);
18 DEFINE_SPINLOCK(octeon_irq_msi_lock);
20 static int octeon_coreid_for_cpu(int cpu)
23 return cpu_logical_map(cpu);
25 return cvmx_get_core_num();
29 static void octeon_irq_core_ack(unsigned int irq)
31 unsigned int bit = irq - OCTEON_IRQ_SW0;
33 * We don't need to disable IRQs to make these atomic since
34 * they are already disabled earlier in the low level
37 clear_c0_status(0x100 << bit);
38 /* The two user interrupts must be cleared manually. */
40 clear_c0_cause(0x100 << bit);
43 static void octeon_irq_core_eoi(unsigned int irq)
45 struct irq_desc *desc = irq_desc + irq;
46 unsigned int bit = irq - OCTEON_IRQ_SW0;
48 * If an IRQ is being processed while we are disabling it the
49 * handler will attempt to unmask the interrupt after it has
52 if (desc->status & IRQ_DISABLED)
55 /* There is a race here. We should fix it. */
58 * We don't need to disable IRQs to make these atomic since
59 * they are already disabled earlier in the low level
62 set_c0_status(0x100 << bit);
65 static void octeon_irq_core_enable(unsigned int irq)
68 unsigned int bit = irq - OCTEON_IRQ_SW0;
71 * We need to disable interrupts to make sure our updates are
74 local_irq_save(flags);
75 set_c0_status(0x100 << bit);
76 local_irq_restore(flags);
79 static void octeon_irq_core_disable_local(unsigned int irq)
82 unsigned int bit = irq - OCTEON_IRQ_SW0;
84 * We need to disable interrupts to make sure our updates are
87 local_irq_save(flags);
88 clear_c0_status(0x100 << bit);
89 local_irq_restore(flags);
92 static void octeon_irq_core_disable(unsigned int irq)
95 on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
96 (void *) (long) irq, 1);
98 octeon_irq_core_disable_local(irq);
102 static struct irq_chip octeon_irq_chip_core = {
104 .enable = octeon_irq_core_enable,
105 .disable = octeon_irq_core_disable,
106 .ack = octeon_irq_core_ack,
107 .eoi = octeon_irq_core_eoi,
111 static void octeon_irq_ciu0_ack(unsigned int irq)
114 * In order to avoid any locking accessing the CIU, we
115 * acknowledge CIU interrupts by disabling all of them. This
116 * way we can use a per core register and avoid any out of
117 * core locking requirements. This has the side affect that
118 * CIU interrupts can't be processed recursively.
120 * We don't need to disable IRQs to make these atomic since
121 * they are already disabled earlier in the low level
124 clear_c0_status(0x100 << 2);
127 static void octeon_irq_ciu0_eoi(unsigned int irq)
130 * Enable all CIU interrupts again. We don't need to disable
131 * IRQs to make these atomic since they are already disabled
132 * earlier in the low level interrupt code.
134 set_c0_status(0x100 << 2);
137 static void octeon_irq_ciu0_enable(unsigned int irq)
139 int coreid = cvmx_get_core_num();
142 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
145 * A read lock is used here to make sure only one core is ever
146 * updating the CIU enable bits at a time. During an enable
147 * the cores don't interfere with each other. During a disable
148 * the write lock stops any enables that might cause a
151 read_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
152 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
154 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
155 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
156 read_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
159 static void octeon_irq_ciu0_disable(unsigned int irq)
161 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
165 write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
166 for_each_online_cpu(cpu) {
167 int coreid = octeon_coreid_for_cpu(cpu);
168 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
169 en0 &= ~(1ull << bit);
170 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
173 * We need to do a read after the last update to make sure all
176 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
177 write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
181 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
184 static void octeon_irq_ciu0_enable_v2(unsigned int irq)
186 int index = cvmx_get_core_num() * 2;
187 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
189 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
193 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
196 static void octeon_irq_ciu0_ack_v2(unsigned int irq)
198 int index = cvmx_get_core_num() * 2;
199 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
201 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
205 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
208 static void octeon_irq_ciu0_eoi_v2(unsigned int irq)
210 struct irq_desc *desc = irq_desc + irq;
211 int index = cvmx_get_core_num() * 2;
212 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
214 if ((desc->status & IRQ_DISABLED) == 0)
215 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
219 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
222 static void octeon_irq_ciu0_disable_all_v2(unsigned int irq)
224 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
227 for_each_online_cpu(cpu) {
228 index = octeon_coreid_for_cpu(cpu) * 2;
229 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
234 static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
238 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
240 write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
241 for_each_online_cpu(cpu) {
242 int coreid = octeon_coreid_for_cpu(cpu);
244 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
245 if (cpumask_test_cpu(cpu, dest))
248 en0 &= ~(1ull << bit);
249 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
252 * We need to do a read after the last update to make sure all
255 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
256 write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
262 * Set affinity for the irq for chips that have the EN*_W1{S,C}
265 static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
266 const struct cpumask *dest)
270 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
271 for_each_online_cpu(cpu) {
272 index = octeon_coreid_for_cpu(cpu) * 2;
273 if (cpumask_test_cpu(cpu, dest))
274 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
276 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
283 * Newer octeon chips have support for lockless CIU operation.
285 static struct irq_chip octeon_irq_chip_ciu0_v2 = {
287 .enable = octeon_irq_ciu0_enable_v2,
288 .disable = octeon_irq_ciu0_disable_all_v2,
289 .ack = octeon_irq_ciu0_ack_v2,
290 .eoi = octeon_irq_ciu0_eoi_v2,
292 .set_affinity = octeon_irq_ciu0_set_affinity_v2,
296 static struct irq_chip octeon_irq_chip_ciu0 = {
298 .enable = octeon_irq_ciu0_enable,
299 .disable = octeon_irq_ciu0_disable,
300 .ack = octeon_irq_ciu0_ack,
301 .eoi = octeon_irq_ciu0_eoi,
303 .set_affinity = octeon_irq_ciu0_set_affinity,
308 static void octeon_irq_ciu1_ack(unsigned int irq)
311 * In order to avoid any locking accessing the CIU, we
312 * acknowledge CIU interrupts by disabling all of them. This
313 * way we can use a per core register and avoid any out of
314 * core locking requirements. This has the side affect that
315 * CIU interrupts can't be processed recursively. We don't
316 * need to disable IRQs to make these atomic since they are
317 * already disabled earlier in the low level interrupt code.
319 clear_c0_status(0x100 << 3);
322 static void octeon_irq_ciu1_eoi(unsigned int irq)
325 * Enable all CIU interrupts again. We don't need to disable
326 * IRQs to make these atomic since they are already disabled
327 * earlier in the low level interrupt code.
329 set_c0_status(0x100 << 3);
332 static void octeon_irq_ciu1_enable(unsigned int irq)
334 int coreid = cvmx_get_core_num();
337 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
340 * A read lock is used here to make sure only one core is ever
341 * updating the CIU enable bits at a time. During an enable
342 * the cores don't interfere with each other. During a disable
343 * the write lock stops any enables that might cause a
346 read_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
347 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
349 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
350 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
351 read_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
354 static void octeon_irq_ciu1_disable(unsigned int irq)
356 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
360 write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
361 for_each_online_cpu(cpu) {
362 int coreid = octeon_coreid_for_cpu(cpu);
363 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
364 en1 &= ~(1ull << bit);
365 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
368 * We need to do a read after the last update to make sure all
371 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
372 write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
376 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
379 static void octeon_irq_ciu1_enable_v2(unsigned int irq)
381 int index = cvmx_get_core_num() * 2 + 1;
382 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
384 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
388 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
391 static void octeon_irq_ciu1_ack_v2(unsigned int irq)
393 int index = cvmx_get_core_num() * 2 + 1;
394 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
396 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
400 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
403 static void octeon_irq_ciu1_eoi_v2(unsigned int irq)
405 struct irq_desc *desc = irq_desc + irq;
406 int index = cvmx_get_core_num() * 2 + 1;
407 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
409 if ((desc->status & IRQ_DISABLED) == 0)
410 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
414 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
417 static void octeon_irq_ciu1_disable_all_v2(unsigned int irq)
419 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
422 for_each_online_cpu(cpu) {
423 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
424 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
429 static int octeon_irq_ciu1_set_affinity(unsigned int irq,
430 const struct cpumask *dest)
434 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
436 write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
437 for_each_online_cpu(cpu) {
438 int coreid = octeon_coreid_for_cpu(cpu);
440 cvmx_read_csr(CVMX_CIU_INTX_EN1
442 if (cpumask_test_cpu(cpu, dest))
445 en1 &= ~(1ull << bit);
446 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
449 * We need to do a read after the last update to make sure all
452 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
453 write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
459 * Set affinity for the irq for chips that have the EN*_W1{S,C}
462 static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
463 const struct cpumask *dest)
467 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
468 for_each_online_cpu(cpu) {
469 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
470 if (cpumask_test_cpu(cpu, dest))
471 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
473 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
480 * Newer octeon chips have support for lockless CIU operation.
482 static struct irq_chip octeon_irq_chip_ciu1_v2 = {
484 .enable = octeon_irq_ciu1_enable_v2,
485 .disable = octeon_irq_ciu1_disable_all_v2,
486 .ack = octeon_irq_ciu1_ack_v2,
487 .eoi = octeon_irq_ciu1_eoi_v2,
489 .set_affinity = octeon_irq_ciu1_set_affinity_v2,
493 static struct irq_chip octeon_irq_chip_ciu1 = {
495 .enable = octeon_irq_ciu1_enable,
496 .disable = octeon_irq_ciu1_disable,
497 .ack = octeon_irq_ciu1_ack,
498 .eoi = octeon_irq_ciu1_eoi,
500 .set_affinity = octeon_irq_ciu1_set_affinity,
504 #ifdef CONFIG_PCI_MSI
506 static void octeon_irq_msi_ack(unsigned int irq)
508 if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
509 /* These chips have PCI */
510 cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV,
511 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
514 * These chips have PCIe. Thankfully the ACK doesn't
517 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0,
518 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
522 static void octeon_irq_msi_eoi(unsigned int irq)
527 static void octeon_irq_msi_enable(unsigned int irq)
529 if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
531 * Octeon PCI doesn't have the ability to mask/unmask
532 * MSI interrupts individually. Instead of
533 * masking/unmasking them in groups of 16, we simple
534 * assume MSI devices are well behaved. MSI
535 * interrupts are always enable and the ACK is assumed
539 /* These chips have PCIe. Note that we only support
540 * the first 64 MSI interrupts. Unfortunately all the
541 * MSI enables are in the same register. We use
542 * MSI0's lock to control access to them all.
546 spin_lock_irqsave(&octeon_irq_msi_lock, flags);
547 en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
548 en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0);
549 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
550 cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
551 spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
555 static void octeon_irq_msi_disable(unsigned int irq)
557 if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
558 /* See comment in enable */
561 * These chips have PCIe. Note that we only support
562 * the first 64 MSI interrupts. Unfortunately all the
563 * MSI enables are in the same register. We use
564 * MSI0's lock to control access to them all.
568 spin_lock_irqsave(&octeon_irq_msi_lock, flags);
569 en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
570 en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0));
571 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
572 cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
573 spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
577 static struct irq_chip octeon_irq_chip_msi = {
579 .enable = octeon_irq_msi_enable,
580 .disable = octeon_irq_msi_disable,
581 .ack = octeon_irq_msi_ack,
582 .eoi = octeon_irq_msi_eoi,
586 void __init arch_init_irq(void)
589 struct irq_chip *chip0;
590 struct irq_chip *chip1;
593 /* Set the default affinity to the boot cpu. */
594 cpumask_clear(irq_default_affinity);
595 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
598 if (NR_IRQS < OCTEON_IRQ_LAST)
599 pr_err("octeon_irq_init: NR_IRQS is set too low\n");
601 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
602 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
603 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
604 chip0 = &octeon_irq_chip_ciu0_v2;
605 chip1 = &octeon_irq_chip_ciu1_v2;
607 chip0 = &octeon_irq_chip_ciu0;
608 chip1 = &octeon_irq_chip_ciu1;
611 /* 0 - 15 reserved for i8259 master and slave controller. */
613 /* 17 - 23 Mips internal */
614 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
615 set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
619 /* 24 - 87 CIU_INT_SUM0 */
620 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
621 set_irq_chip_and_handler(irq, chip0, handle_percpu_irq);
624 /* 88 - 151 CIU_INT_SUM1 */
625 for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
626 set_irq_chip_and_handler(irq, chip1, handle_percpu_irq);
629 #ifdef CONFIG_PCI_MSI
630 /* 152 - 215 PCI/PCIe MSI interrupts */
631 for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) {
632 set_irq_chip_and_handler(irq, &octeon_irq_chip_msi,
636 set_c0_status(0x300 << 2);
639 asmlinkage void plat_irq_dispatch(void)
641 const unsigned long core_id = cvmx_get_core_num();
642 const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
643 const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
644 const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
645 const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
646 unsigned long cop0_cause;
647 unsigned long cop0_status;
652 cop0_cause = read_c0_cause();
653 cop0_status = read_c0_status();
654 cop0_cause &= cop0_status;
655 cop0_cause &= ST0_IM;
657 if (unlikely(cop0_cause & STATUSF_IP2)) {
658 ciu_sum = cvmx_read_csr(ciu_sum0_address);
659 ciu_en = cvmx_read_csr(ciu_en0_address);
662 do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1);
664 spurious_interrupt();
665 } else if (unlikely(cop0_cause & STATUSF_IP3)) {
666 ciu_sum = cvmx_read_csr(ciu_sum1_address);
667 ciu_en = cvmx_read_csr(ciu_en1_address);
670 do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1);
672 spurious_interrupt();
673 } else if (likely(cop0_cause)) {
674 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
681 #ifdef CONFIG_HOTPLUG_CPU
682 static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu)
685 int coreid = octeon_coreid_for_cpu(cpu);
686 int bit = (irq < OCTEON_IRQ_WDOG0) ?
687 irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0;
689 isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) &
690 (1ull << bit)) >> bit;
692 isset = (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)) &
693 (1ull << bit)) >> bit;
698 void fixup_irqs(void)
702 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
703 octeon_irq_core_disable_local(irq);
705 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_GPIO15; irq++) {
706 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
707 /* ciu irq migrates to next cpu */
708 octeon_irq_chip_ciu0.disable(irq);
709 octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
714 for (irq = OCTEON_IRQ_MBOX0; irq <= OCTEON_IRQ_MBOX1; irq++)
715 octeon_irq_mailbox_mask(irq);
717 for (irq = OCTEON_IRQ_UART0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
718 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
719 /* ciu irq migrates to next cpu */
720 octeon_irq_chip_ciu0.disable(irq);
721 octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
725 for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED135; irq++) {
726 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
727 /* ciu irq migrates to next cpu */
728 octeon_irq_chip_ciu1.disable(irq);
729 octeon_irq_ciu1_set_affinity(irq, &cpu_online_map);
734 #endif /* CONFIG_HOTPLUG_CPU */