#include <linux/pci.h>
#include <linux/irq.h>
#include <asm/io_apic.h>
-#include "intel-iommu.h"
+#include <asm/smp.h>
+#include <asm/cpu.h>
+#include <linux/intel-iommu.h>
#include "intr_remapping.h"
static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
u8 irte_mask;
};
-#ifdef CONFIG_HAVE_SPARSE_IRQ
-static struct irq_2_iommu *irq_2_iommuX;
-/* fill one page ? */
-static int nr_irq_2_iommu = 0x100;
-static int irq_2_iommu_index;
-DEFINE_DYN_ARRAY(irq_2_iommuX, sizeof(struct irq_2_iommu), nr_irq_2_iommu, PAGE_SIZE, NULL);
-
-extern void *__alloc_bootmem_nopanic(unsigned long size,
- unsigned long align,
- unsigned long goal);
-
-static struct irq_2_iommu *get_one_free_irq_2_iommu(int not_used)
+#ifdef CONFIG_SPARSE_IRQ
+static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu)
{
struct irq_2_iommu *iommu;
- unsigned long total_bytes;
-
- if (irq_2_iommu_index >= nr_irq_2_iommu) {
- /*
- * we run out of pre-allocate ones, allocate more
- */
- printk(KERN_DEBUG "try to get more irq_2_iommu %d\n", nr_irq_2_iommu);
-
- total_bytes = sizeof(struct irq_2_iommu)*nr_irq_2_iommu;
+ int node;
- if (after_bootmem)
- iommu = kzalloc(total_bytes, GFP_ATOMIC);
- else
- iommu = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
+ node = cpu_to_node(cpu);
- if (!iommu)
- panic("can not get more irq_2_iommu\n");
+ iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
+ printk(KERN_DEBUG "alloc irq_2_iommu on cpu %d node %d\n", cpu, node);
- irq_2_iommuX = iommu;
- irq_2_iommu_index = 0;
- }
-
- iommu = &irq_2_iommuX[irq_2_iommu_index];
- irq_2_iommu_index++;
return iommu;
}
desc = irq_to_desc(irq);
- BUG_ON(!desc);
+ if (WARN_ON_ONCE(!desc))
+ return NULL;
return desc->irq_2_iommu;
}
-static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
+static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu)
{
struct irq_desc *desc;
struct irq_2_iommu *irq_iommu;
- desc = irq_to_desc(irq);
-
- BUG_ON(!desc);
+ /*
+ * alloc irq desc if not allocated already.
+ */
+ desc = irq_to_desc_alloc_cpu(irq, cpu);
+ if (!desc) {
+ printk(KERN_INFO "can not get irq_desc for %d\n", irq);
+ return NULL;
+ }
irq_iommu = desc->irq_2_iommu;
if (!irq_iommu)
- desc->irq_2_iommu = get_one_free_irq_2_iommu(irq);
+ desc->irq_2_iommu = get_one_free_irq_2_iommu(cpu);
return desc->irq_2_iommu;
}
-#else /* !CONFIG_HAVE_SPARSE_IRQ */
+static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
+{
+ return irq_2_iommu_alloc_cpu(irq, boot_cpu_id);
+}
+
+#else /* !CONFIG_SPARSE_IRQ */
-#ifdef CONFIG_HAVE_DYN_ARRAY
-static struct irq_2_iommu *irq_2_iommuX;
-DEFINE_DYN_ARRAY(irq_2_iommuX, sizeof(struct irq_2_iommu), nr_irqs, PAGE_SIZE, NULL);
-#else
static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
-#endif
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
{
if (!count)
return -1;
-#ifndef CONFIG_HAVE_SPARSE_IRQ
+#ifndef CONFIG_SPARSE_IRQ
/* protect irq_2_iommu_alloc later */
if (irq >= nr_irqs)
return -1;
table->base[i].present = 1;
irq_iommu = irq_2_iommu_alloc(irq);
+ if (!irq_iommu) {
+ spin_unlock(&irq_2_ir_lock);
+ printk(KERN_ERR "can't allocate irq_2_iommu\n");
+ return -1;
+ }
+
irq_iommu->iommu = iommu;
irq_iommu->irte_index = index;
irq_iommu->sub_handle = 0;
struct irq_2_iommu *irq_iommu;
spin_lock(&irq_2_ir_lock);
- irq_iommu = valid_irq_2_iommu(irq);
+
+ irq_iommu = irq_2_iommu_alloc(irq);
+
if (!irq_iommu) {
spin_unlock(&irq_2_ir_lock);
+ printk(KERN_ERR "can't allocate irq_2_iommu\n");
return -1;
}