4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
11 * There are four kinds of events which can be mapped to an event
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
19 * 4. Hardware interrupts. Not supported at present.
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/bootmem.h>
31 #include <asm/ptrace.h>
34 #include <asm/sync_bitops.h>
35 #include <asm/xen/hypercall.h>
36 #include <asm/xen/hypervisor.h>
38 #include <xen/xen-ops.h>
39 #include <xen/events.h>
40 #include <xen/interface/xen.h>
41 #include <xen/interface/event_channel.h>
44 * This lock protects updates to the following mapping and reference-count
45 * arrays. The lock does not need to be acquired to read the mapping tables.
47 static DEFINE_SPINLOCK(irq_mapping_update_lock);
49 /* IRQ <-> VIRQ mapping. */
50 static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
52 /* IRQ <-> IPI mapping */
53 static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1};
55 /* Interrupt types. */
65 * Packed IRQ information:
66 * type - enum xen_irq_type
67 * event channel - irq->event channel mapping
68 * cpu - cpu this event channel is bound to
69 * index - type-specific information:
70 * PIRQ - vector, with MSB being "needs EIO"
77 enum xen_irq_type type; /* type */
78 unsigned short evtchn; /* event channel */
79 unsigned short cpu; /* cpu bound */
86 unsigned short vector;
91 static struct irq_info irq_info[NR_IRQS];
93 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
94 [0 ... NR_EVENT_CHANNELS-1] = -1
97 unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
99 static struct cpu_evtchn_s *cpu_evtchn_mask_p;
100 static inline unsigned long *cpu_evtchn_mask(int cpu)
102 return cpu_evtchn_mask_p[cpu].bits;
105 /* Xen will never allocate port zero for any purpose. */
106 #define VALID_EVTCHN(chn) ((chn) != 0)
108 static struct irq_chip xen_dynamic_chip;
110 /* Constructor for packed IRQ information. */
111 static struct irq_info mk_unbound_info(void)
113 return (struct irq_info) { .type = IRQT_UNBOUND };
116 static struct irq_info mk_evtchn_info(unsigned short evtchn)
118 return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn };
121 static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
123 return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
127 static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
129 return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
133 static struct irq_info mk_pirq_info(unsigned short evtchn,
134 unsigned short gsi, unsigned short vector)
136 return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
137 .u.pirq = { .gsi = gsi, .vector = vector } };
141 * Accessors for packed IRQ information.
143 static struct irq_info *info_for_irq(unsigned irq)
145 return &irq_info[irq];
148 static unsigned int evtchn_from_irq(unsigned irq)
150 return info_for_irq(irq)->evtchn;
153 static enum ipi_vector ipi_from_irq(unsigned irq)
155 struct irq_info *info = info_for_irq(irq);
157 BUG_ON(info == NULL);
158 BUG_ON(info->type != IRQT_IPI);
163 static unsigned virq_from_irq(unsigned irq)
165 struct irq_info *info = info_for_irq(irq);
167 BUG_ON(info == NULL);
168 BUG_ON(info->type != IRQT_VIRQ);
173 static unsigned gsi_from_irq(unsigned irq)
175 struct irq_info *info = info_for_irq(irq);
177 BUG_ON(info == NULL);
178 BUG_ON(info->type != IRQT_PIRQ);
180 return info->u.pirq.gsi;
183 static unsigned vector_from_irq(unsigned irq)
185 struct irq_info *info = info_for_irq(irq);
187 BUG_ON(info == NULL);
188 BUG_ON(info->type != IRQT_PIRQ);
190 return info->u.pirq.vector;
193 static enum xen_irq_type type_from_irq(unsigned irq)
195 return info_for_irq(irq)->type;
198 static unsigned cpu_from_irq(unsigned irq)
200 return info_for_irq(irq)->cpu;
203 static unsigned int cpu_from_evtchn(unsigned int evtchn)
205 int irq = evtchn_to_irq[evtchn];
209 ret = cpu_from_irq(irq);
214 static inline unsigned long active_evtchns(unsigned int cpu,
215 struct shared_info *sh,
218 return (sh->evtchn_pending[idx] &
219 cpu_evtchn_mask(cpu)[idx] &
220 ~sh->evtchn_mask[idx]);
223 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
225 int irq = evtchn_to_irq[chn];
229 cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
232 __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
233 __set_bit(chn, cpu_evtchn_mask(cpu));
235 irq_info[irq].cpu = cpu;
238 static void init_evtchn_cpu_bindings(void)
241 struct irq_desc *desc;
244 /* By default all event channels notify CPU#0. */
245 for_each_irq_desc(i, desc) {
246 cpumask_copy(desc->affinity, cpumask_of(0));
250 memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
253 static inline void clear_evtchn(int port)
255 struct shared_info *s = HYPERVISOR_shared_info;
256 sync_clear_bit(port, &s->evtchn_pending[0]);
259 static inline void set_evtchn(int port)
261 struct shared_info *s = HYPERVISOR_shared_info;
262 sync_set_bit(port, &s->evtchn_pending[0]);
265 static inline int test_evtchn(int port)
267 struct shared_info *s = HYPERVISOR_shared_info;
268 return sync_test_bit(port, &s->evtchn_pending[0]);
273 * notify_remote_via_irq - send event to remote end of event channel via irq
274 * @irq: irq of event channel to send event to
276 * Unlike notify_remote_via_evtchn(), this is safe to use across
277 * save/restore. Notifications on a broken connection are silently
280 void notify_remote_via_irq(int irq)
282 int evtchn = evtchn_from_irq(irq);
284 if (VALID_EVTCHN(evtchn))
285 notify_remote_via_evtchn(evtchn);
287 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
289 static void mask_evtchn(int port)
291 struct shared_info *s = HYPERVISOR_shared_info;
292 sync_set_bit(port, &s->evtchn_mask[0]);
295 static void unmask_evtchn(int port)
297 struct shared_info *s = HYPERVISOR_shared_info;
298 unsigned int cpu = get_cpu();
300 BUG_ON(!irqs_disabled());
302 /* Slow path (hypercall) if this is a non-local port. */
303 if (unlikely(cpu != cpu_from_evtchn(port))) {
304 struct evtchn_unmask unmask = { .port = port };
305 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
307 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
309 sync_clear_bit(port, &s->evtchn_mask[0]);
312 * The following is basically the equivalent of
313 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
314 * the interrupt edge' if the channel is masked.
316 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
317 !sync_test_and_set_bit(port / BITS_PER_LONG,
318 &vcpu_info->evtchn_pending_sel))
319 vcpu_info->evtchn_upcall_pending = 1;
325 static int find_unbound_irq(void)
328 struct irq_desc *desc;
330 for (irq = 0; irq < nr_irqs; irq++)
331 if (irq_info[irq].type == IRQT_UNBOUND)
335 panic("No available IRQ to bind to: increase nr_irqs!\n");
337 desc = irq_to_desc_alloc_cpu(irq, 0);
338 if (WARN_ON(desc == NULL))
341 dynamic_irq_init(irq);
346 int bind_evtchn_to_irq(unsigned int evtchn)
350 spin_lock(&irq_mapping_update_lock);
352 irq = evtchn_to_irq[evtchn];
355 irq = find_unbound_irq();
357 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
358 handle_level_irq, "event");
360 evtchn_to_irq[evtchn] = irq;
361 irq_info[irq] = mk_evtchn_info(evtchn);
364 spin_unlock(&irq_mapping_update_lock);
368 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
370 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
372 struct evtchn_bind_ipi bind_ipi;
375 spin_lock(&irq_mapping_update_lock);
377 irq = per_cpu(ipi_to_irq, cpu)[ipi];
379 irq = find_unbound_irq();
383 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
384 handle_level_irq, "ipi");
387 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
390 evtchn = bind_ipi.port;
392 evtchn_to_irq[evtchn] = irq;
393 irq_info[irq] = mk_ipi_info(evtchn, ipi);
395 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
397 bind_evtchn_to_cpu(evtchn, cpu);
401 spin_unlock(&irq_mapping_update_lock);
406 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
408 struct evtchn_bind_virq bind_virq;
411 spin_lock(&irq_mapping_update_lock);
413 irq = per_cpu(virq_to_irq, cpu)[virq];
416 bind_virq.virq = virq;
417 bind_virq.vcpu = cpu;
418 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
421 evtchn = bind_virq.port;
423 irq = find_unbound_irq();
425 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
426 handle_level_irq, "virq");
428 evtchn_to_irq[evtchn] = irq;
429 irq_info[irq] = mk_virq_info(evtchn, virq);
431 per_cpu(virq_to_irq, cpu)[virq] = irq;
433 bind_evtchn_to_cpu(evtchn, cpu);
436 spin_unlock(&irq_mapping_update_lock);
441 static void unbind_from_irq(unsigned int irq)
443 struct evtchn_close close;
444 int evtchn = evtchn_from_irq(irq);
446 spin_lock(&irq_mapping_update_lock);
448 if (VALID_EVTCHN(evtchn)) {
450 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
453 switch (type_from_irq(irq)) {
455 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
456 [virq_from_irq(irq)] = -1;
459 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
460 [ipi_from_irq(irq)] = -1;
466 /* Closed ports are implicitly re-bound to VCPU0. */
467 bind_evtchn_to_cpu(evtchn, 0);
469 evtchn_to_irq[evtchn] = -1;
470 irq_info[irq] = mk_unbound_info();
472 dynamic_irq_cleanup(irq);
475 spin_unlock(&irq_mapping_update_lock);
478 int bind_evtchn_to_irqhandler(unsigned int evtchn,
479 irq_handler_t handler,
480 unsigned long irqflags,
481 const char *devname, void *dev_id)
486 irq = bind_evtchn_to_irq(evtchn);
487 retval = request_irq(irq, handler, irqflags, devname, dev_id);
489 unbind_from_irq(irq);
495 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
497 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
498 irq_handler_t handler,
499 unsigned long irqflags, const char *devname, void *dev_id)
504 irq = bind_virq_to_irq(virq, cpu);
505 retval = request_irq(irq, handler, irqflags, devname, dev_id);
507 unbind_from_irq(irq);
513 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
515 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
517 irq_handler_t handler,
518 unsigned long irqflags,
524 irq = bind_ipi_to_irq(ipi, cpu);
528 retval = request_irq(irq, handler, irqflags, devname, dev_id);
530 unbind_from_irq(irq);
537 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
539 free_irq(irq, dev_id);
540 unbind_from_irq(irq);
542 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
544 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
546 int irq = per_cpu(ipi_to_irq, cpu)[vector];
548 notify_remote_via_irq(irq);
551 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
553 struct shared_info *sh = HYPERVISOR_shared_info;
554 int cpu = smp_processor_id();
557 static DEFINE_SPINLOCK(debug_lock);
559 spin_lock_irqsave(&debug_lock, flags);
561 printk("vcpu %d\n ", cpu);
563 for_each_online_cpu(i) {
564 struct vcpu_info *v = per_cpu(xen_vcpu, i);
565 printk("%d: masked=%d pending=%d event_sel %08lx\n ", i,
566 (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask,
567 v->evtchn_upcall_pending,
568 v->evtchn_pending_sel);
570 printk("pending:\n ");
571 for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
572 printk("%08lx%s", sh->evtchn_pending[i],
573 i % 8 == 0 ? "\n " : " ");
574 printk("\nmasks:\n ");
575 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
576 printk("%08lx%s", sh->evtchn_mask[i],
577 i % 8 == 0 ? "\n " : " ");
579 printk("\nunmasked:\n ");
580 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
581 printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
582 i % 8 == 0 ? "\n " : " ");
584 printk("\npending list:\n");
585 for(i = 0; i < NR_EVENT_CHANNELS; i++) {
586 if (sync_test_bit(i, sh->evtchn_pending)) {
587 printk(" %d: event %d -> irq %d\n",
588 cpu_from_evtchn(i), i,
593 spin_unlock_irqrestore(&debug_lock, flags);
599 static void xen_do_irq(unsigned irq, struct pt_regs *regs)
601 struct pt_regs *old_regs = set_irq_regs(regs);
603 if (WARN_ON(irq == -1))
609 //printk("cpu %d handling irq %d\n", smp_processor_id(), info->irq);
610 handle_irq(irq, regs);
614 set_irq_regs(old_regs);
618 * Search the CPUs pending events bitmasks. For each one found, map
619 * the event number to an irq, and feed it into do_IRQ() for
622 * Xen uses a two-level bitmap to speed searching. The first level is
623 * a bitset of words which contain pending event bits. The second
624 * level is a bitset of pending events themselves.
626 void xen_evtchn_do_upcall(struct pt_regs *regs)
629 struct shared_info *s = HYPERVISOR_shared_info;
630 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
631 static DEFINE_PER_CPU(unsigned, nesting_count);
635 unsigned long pending_words;
637 vcpu_info->evtchn_upcall_pending = 0;
639 if (__get_cpu_var(nesting_count)++)
642 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
643 /* Clear master flag /before/ clearing selector flag. */
646 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
647 while (pending_words != 0) {
648 unsigned long pending_bits;
649 int word_idx = __ffs(pending_words);
650 pending_words &= ~(1UL << word_idx);
652 while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
653 int bit_idx = __ffs(pending_bits);
654 int port = (word_idx * BITS_PER_LONG) + bit_idx;
655 int irq = evtchn_to_irq[port];
657 xen_do_irq(irq, regs);
661 BUG_ON(!irqs_disabled());
663 count = __get_cpu_var(nesting_count);
664 __get_cpu_var(nesting_count) = 0;
671 /* Rebind a new event channel to an existing irq. */
672 void rebind_evtchn_irq(int evtchn, int irq)
674 struct irq_info *info = info_for_irq(irq);
676 /* Make sure the irq is masked, since the new event channel
677 will also be masked. */
680 spin_lock(&irq_mapping_update_lock);
682 /* After resume the irq<->evtchn mappings are all cleared out */
683 BUG_ON(evtchn_to_irq[evtchn] != -1);
684 /* Expect irq to have been bound before,
685 so there should be a proper type */
686 BUG_ON(info->type == IRQT_UNBOUND);
688 evtchn_to_irq[evtchn] = irq;
689 irq_info[irq] = mk_evtchn_info(evtchn);
691 spin_unlock(&irq_mapping_update_lock);
693 /* new event channels are always bound to cpu 0 */
694 irq_set_affinity(irq, cpumask_of(0));
696 /* Unmask the event channel. */
700 /* Rebind an evtchn so that it gets delivered to a specific cpu */
701 static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
703 struct evtchn_bind_vcpu bind_vcpu;
704 int evtchn = evtchn_from_irq(irq);
706 if (!VALID_EVTCHN(evtchn))
709 /* Send future instances of this interrupt to other vcpu. */
710 bind_vcpu.port = evtchn;
711 bind_vcpu.vcpu = tcpu;
714 * If this fails, it usually just indicates that we're dealing with a
715 * virq or IPI channel, which don't actually need to be rebound. Ignore
716 * it, but don't do the xenlinux-level rebind in that case.
718 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
719 bind_evtchn_to_cpu(evtchn, tcpu);
723 static void set_affinity_irq(unsigned irq, const struct cpumask *dest)
725 unsigned tcpu = cpumask_first(dest);
726 rebind_irq_to_cpu(irq, tcpu);
729 int resend_irq_on_evtchn(unsigned int irq)
731 int masked, evtchn = evtchn_from_irq(irq);
732 struct shared_info *s = HYPERVISOR_shared_info;
734 if (!VALID_EVTCHN(evtchn))
737 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
738 sync_set_bit(evtchn, s->evtchn_pending);
740 unmask_evtchn(evtchn);
745 static void enable_dynirq(unsigned int irq)
747 int evtchn = evtchn_from_irq(irq);
749 if (VALID_EVTCHN(evtchn))
750 unmask_evtchn(evtchn);
753 static void disable_dynirq(unsigned int irq)
755 int evtchn = evtchn_from_irq(irq);
757 if (VALID_EVTCHN(evtchn))
761 static void ack_dynirq(unsigned int irq)
763 int evtchn = evtchn_from_irq(irq);
765 move_native_irq(irq);
767 if (VALID_EVTCHN(evtchn))
768 clear_evtchn(evtchn);
771 static int retrigger_dynirq(unsigned int irq)
773 int evtchn = evtchn_from_irq(irq);
774 struct shared_info *sh = HYPERVISOR_shared_info;
777 if (VALID_EVTCHN(evtchn)) {
780 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
781 sync_set_bit(evtchn, sh->evtchn_pending);
783 unmask_evtchn(evtchn);
790 static void restore_cpu_virqs(unsigned int cpu)
792 struct evtchn_bind_virq bind_virq;
793 int virq, irq, evtchn;
795 for (virq = 0; virq < NR_VIRQS; virq++) {
796 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
799 BUG_ON(virq_from_irq(irq) != virq);
801 /* Get a new binding from Xen. */
802 bind_virq.virq = virq;
803 bind_virq.vcpu = cpu;
804 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
807 evtchn = bind_virq.port;
809 /* Record the new mapping. */
810 evtchn_to_irq[evtchn] = irq;
811 irq_info[irq] = mk_virq_info(evtchn, virq);
812 bind_evtchn_to_cpu(evtchn, cpu);
815 unmask_evtchn(evtchn);
819 static void restore_cpu_ipis(unsigned int cpu)
821 struct evtchn_bind_ipi bind_ipi;
822 int ipi, irq, evtchn;
824 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
825 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
828 BUG_ON(ipi_from_irq(irq) != ipi);
830 /* Get a new binding from Xen. */
832 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
835 evtchn = bind_ipi.port;
837 /* Record the new mapping. */
838 evtchn_to_irq[evtchn] = irq;
839 irq_info[irq] = mk_ipi_info(evtchn, ipi);
840 bind_evtchn_to_cpu(evtchn, cpu);
843 unmask_evtchn(evtchn);
848 /* Clear an irq's pending state, in preparation for polling on it */
849 void xen_clear_irq_pending(int irq)
851 int evtchn = evtchn_from_irq(irq);
853 if (VALID_EVTCHN(evtchn))
854 clear_evtchn(evtchn);
857 void xen_set_irq_pending(int irq)
859 int evtchn = evtchn_from_irq(irq);
861 if (VALID_EVTCHN(evtchn))
865 bool xen_test_irq_pending(int irq)
867 int evtchn = evtchn_from_irq(irq);
870 if (VALID_EVTCHN(evtchn))
871 ret = test_evtchn(evtchn);
876 /* Poll waiting for an irq to become pending. In the usual case, the
877 irq will be disabled so it won't deliver an interrupt. */
878 void xen_poll_irq(int irq)
880 evtchn_port_t evtchn = evtchn_from_irq(irq);
882 if (VALID_EVTCHN(evtchn)) {
883 struct sched_poll poll;
887 set_xen_guest_handle(poll.ports, &evtchn);
889 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
894 void xen_irq_resume(void)
896 unsigned int cpu, irq, evtchn;
898 init_evtchn_cpu_bindings();
900 /* New event-channel space is not 'live' yet. */
901 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
904 /* No IRQ <-> event-channel mappings. */
905 for (irq = 0; irq < nr_irqs; irq++)
906 irq_info[irq].evtchn = 0; /* zap event-channel binding */
908 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
909 evtchn_to_irq[evtchn] = -1;
911 for_each_possible_cpu(cpu) {
912 restore_cpu_virqs(cpu);
913 restore_cpu_ipis(cpu);
917 static struct irq_chip xen_dynamic_chip __read_mostly = {
920 .disable = disable_dynirq,
921 .mask = disable_dynirq,
922 .unmask = enable_dynirq,
925 .set_affinity = set_affinity_irq,
926 .retrigger = retrigger_dynirq,
929 void __init xen_init_IRQ(void)
932 size_t size = nr_cpu_ids * sizeof(struct cpu_evtchn_s);
934 cpu_evtchn_mask_p = alloc_bootmem(size);
935 BUG_ON(cpu_evtchn_mask_p == NULL);
937 init_evtchn_cpu_bindings();
939 /* No event channels are 'live' right now. */
940 for (i = 0; i < NR_EVENT_CHANNELS; i++)
943 irq_ctx_init(smp_processor_id());