/* Interrupt types. */
enum xen_irq_type {
- IRQT_UNBOUND,
+ IRQT_UNBOUND = 0,
IRQT_PIRQ,
IRQT_VIRQ,
IRQT_IPI,
return cpu_evtchn_mask_p[cpu].bits;
}
-/* Reference counts for bindings to IRQs. */
-static int irq_bindcount[NR_IRQS];
-
/* Xen will never allocate port zero for any purpose. */
#define VALID_EVTCHN(chn) ((chn) != 0)
static struct irq_info mk_evtchn_info(unsigned short evtchn)
{
- return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn };
+ return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
+ .cpu = 0 };
}
static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
{
return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
- .u.ipi = ipi };
+ .cpu = 0, .u.ipi = ipi };
}
static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
{
return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
- .u.virq = virq };
+ .cpu = 0, .u.virq = virq };
}
static struct irq_info mk_pirq_info(unsigned short evtchn,
unsigned short gsi, unsigned short vector)
{
return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
- .u.pirq = { .gsi = gsi, .vector = vector } };
+ .cpu = 0, .u.pirq = { .gsi = gsi, .vector = vector } };
}
/*
return info_for_irq(irq)->evtchn;
}
+unsigned irq_from_evtchn(unsigned int evtchn)
+{
+ return evtchn_to_irq[evtchn];
+}
+EXPORT_SYMBOL_GPL(irq_from_evtchn);
+
static enum ipi_vector ipi_from_irq(unsigned irq)
{
struct irq_info *info = info_for_irq(irq);
int irq;
struct irq_desc *desc;
- /* Only allocate from dynirq range */
for (irq = 0; irq < nr_irqs; irq++)
- if (irq_bindcount[irq] == 0)
+ if (irq_info[irq].type == IRQT_UNBOUND)
break;
if (irq == nr_irqs)
panic("No available IRQ to bind to: increase nr_irqs!\n");
- desc = irq_to_desc_alloc_cpu(irq, 0);
+ desc = irq_to_desc_alloc_node(irq, 0);
if (WARN_ON(desc == NULL))
return -1;
irq_info[irq] = mk_evtchn_info(evtchn);
}
- irq_bindcount[irq]++;
-
spin_unlock(&irq_mapping_update_lock);
return irq;
spin_lock(&irq_mapping_update_lock);
irq = per_cpu(ipi_to_irq, cpu)[ipi];
+
if (irq == -1) {
irq = find_unbound_irq();
if (irq < 0)
evtchn_to_irq[evtchn] = irq;
irq_info[irq] = mk_ipi_info(evtchn, ipi);
-
per_cpu(ipi_to_irq, cpu)[ipi] = irq;
bind_evtchn_to_cpu(evtchn, cpu);
}
- irq_bindcount[irq]++;
-
out:
spin_unlock(&irq_mapping_update_lock);
return irq;
bind_evtchn_to_cpu(evtchn, cpu);
}
- irq_bindcount[irq]++;
-
spin_unlock(&irq_mapping_update_lock);
return irq;
spin_lock(&irq_mapping_update_lock);
- if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
+ if (VALID_EVTCHN(evtchn)) {
close.port = evtchn;
if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
BUG();
return IRQ_HANDLED;
}
-
-static void xen_do_irq(unsigned irq, struct pt_regs *regs)
-{
- struct pt_regs *old_regs = set_irq_regs(regs);
-
- if (WARN_ON(irq == -1))
- return;
-
- exit_idle();
- irq_enter();
-
- //printk("cpu %d handling irq %d\n", smp_processor_id(), info->irq);
- handle_irq(irq, regs);
-
- irq_exit();
-
- set_irq_regs(old_regs);
-}
-
/*
* Search the CPUs pending events bitmasks. For each one found, map
* the event number to an irq, and feed it into do_IRQ() for
void xen_evtchn_do_upcall(struct pt_regs *regs)
{
int cpu = get_cpu();
+ struct pt_regs *old_regs = set_irq_regs(regs);
struct shared_info *s = HYPERVISOR_shared_info;
struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
static DEFINE_PER_CPU(unsigned, nesting_count);
unsigned count;
+ exit_idle();
+ irq_enter();
+
do {
unsigned long pending_words;
int port = (word_idx * BITS_PER_LONG) + bit_idx;
int irq = evtchn_to_irq[port];
- xen_do_irq(irq, regs);
+ if (irq != -1)
+ handle_irq(irq, regs);
}
}
} while(count != 1);
out:
+ irq_exit();
+ set_irq_regs(old_regs);
+
put_cpu();
}
/* Rebind a new event channel to an existing irq. */
void rebind_evtchn_irq(int evtchn, int irq)
{
+ struct irq_info *info = info_for_irq(irq);
+
/* Make sure the irq is masked, since the new event channel
will also be masked. */
disable_irq(irq);
/* After resume the irq<->evtchn mappings are all cleared out */
BUG_ON(evtchn_to_irq[evtchn] != -1);
/* Expect irq to have been bound before,
- so the bindcount should be non-0 */
- BUG_ON(irq_bindcount[irq] == 0);
+ so there should be a proper type */
+ BUG_ON(info->type == IRQT_UNBOUND);
evtchn_to_irq[evtchn] = irq;
irq_info[irq] = mk_evtchn_info(evtchn);
}
/* Rebind an evtchn so that it gets delivered to a specific cpu */
-static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
+static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
{
struct evtchn_bind_vcpu bind_vcpu;
int evtchn = evtchn_from_irq(irq);
if (!VALID_EVTCHN(evtchn))
- return;
+ return -1;
/* Send future instances of this interrupt to other vcpu. */
bind_vcpu.port = evtchn;
*/
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
bind_evtchn_to_cpu(evtchn, tcpu);
-}
+ return 0;
+}
-static void set_affinity_irq(unsigned irq, const struct cpumask *dest)
+static int set_affinity_irq(unsigned irq, const struct cpumask *dest)
{
unsigned tcpu = cpumask_first(dest);
- rebind_irq_to_cpu(irq, tcpu);
+
+ return rebind_irq_to_cpu(irq, tcpu);
}
int resend_irq_on_evtchn(unsigned int irq)
void __init xen_init_IRQ(void)
{
int i;
- size_t size = nr_cpu_ids * sizeof(struct cpu_evtchn_s);
- cpu_evtchn_mask_p = alloc_bootmem(size);
+ cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
+ GFP_KERNEL);
BUG_ON(cpu_evtchn_mask_p == NULL);
init_evtchn_cpu_bindings();
for (i = 0; i < NR_EVENT_CHANNELS; i++)
mask_evtchn(i);
- /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
- for (i = 0; i < nr_irqs; i++)
- irq_bindcount[i] = 0;
-
irq_ctx_init(smp_processor_id());
}