1 #include <linux/dmar.h>
2 #include <linux/spinlock.h>
3 #include <linux/jiffies.h>
5 #include <asm/io_apic.h>
6 #include "intel-iommu.h"
7 #include "intr_remapping.h"
9 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
10 static int ir_ioapic_num;
11 int intr_remapping_enabled;
13 static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
19 addr = virt_to_phys((void *)iommu->ir_table->base);
21 spin_lock_irqsave(&iommu->register_lock, flags);
23 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
24 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
26 /* Set interrupt-remapping table pointer */
27 cmd = iommu->gcmd | DMA_GCMD_SIRTP;
28 writel(cmd, iommu->reg + DMAR_GCMD_REG);
30 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
31 readl, (sts & DMA_GSTS_IRTPS), sts);
32 spin_unlock_irqrestore(&iommu->register_lock, flags);
35 * global invalidation of interrupt entry cache before enabling
36 * interrupt-remapping.
40 spin_lock_irqsave(&iommu->register_lock, flags);
42 /* Enable interrupt-remapping */
43 cmd = iommu->gcmd | DMA_GCMD_IRE;
44 iommu->gcmd |= DMA_GCMD_IRE;
45 writel(cmd, iommu->reg + DMAR_GCMD_REG);
47 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
48 readl, (sts & DMA_GSTS_IRES), sts);
50 spin_unlock_irqrestore(&iommu->register_lock, flags);
54 static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
56 struct ir_table *ir_table;
59 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
65 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
68 printk(KERN_ERR "failed to allocate pages of order %d\n",
69 INTR_REMAP_PAGE_ORDER);
70 kfree(iommu->ir_table);
74 ir_table->base = page_address(pages);
76 iommu_set_intr_remapping(iommu, mode);
80 int __init enable_intr_remapping(int eim)
82 struct dmar_drhd_unit *drhd;
86 * check for the Interrupt-remapping support
88 for_each_drhd_unit(drhd) {
89 struct intel_iommu *iommu = drhd->iommu;
91 if (!ecap_ir_support(iommu->ecap))
94 if (eim && !ecap_eim_support(iommu->ecap)) {
95 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
96 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
102 * Enable queued invalidation for all the DRHD's.
104 for_each_drhd_unit(drhd) {
106 struct intel_iommu *iommu = drhd->iommu;
107 ret = dmar_enable_qi(iommu);
110 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
111 " invalidation, ecap %Lx, ret %d\n",
112 drhd->reg_base_addr, iommu->ecap, ret);
118 * Setup Interrupt-remapping for all the DRHD's now.
120 for_each_drhd_unit(drhd) {
121 struct intel_iommu *iommu = drhd->iommu;
123 if (!ecap_ir_support(iommu->ecap))
126 if (setup_intr_remapping(iommu, eim))
135 intr_remapping_enabled = 1;
141 * handle error condition gracefully here!
146 static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
147 struct intel_iommu *iommu)
149 struct acpi_dmar_hardware_unit *drhd;
150 struct acpi_dmar_device_scope *scope;
153 drhd = (struct acpi_dmar_hardware_unit *)header;
155 start = (void *)(drhd + 1);
156 end = ((void *)drhd) + header->length;
158 while (start < end) {
160 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
161 if (ir_ioapic_num == MAX_IO_APICS) {
162 printk(KERN_WARNING "Exceeded Max IO APICS\n");
166 printk(KERN_INFO "IOAPIC id %d under DRHD base"
167 " 0x%Lx\n", scope->enumeration_id,
170 ir_ioapic[ir_ioapic_num].iommu = iommu;
171 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
174 start += scope->length;
181 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
184 int __init parse_ioapics_under_ir(void)
186 struct dmar_drhd_unit *drhd;
187 int ir_supported = 0;
189 for_each_drhd_unit(drhd) {
190 struct intel_iommu *iommu = drhd->iommu;
192 if (ecap_ir_support(iommu->ecap)) {
193 if (ir_parse_ioapic_scope(drhd->hdr, iommu))
200 if (ir_supported && ir_ioapic_num != nr_ioapics) {
202 "Not all IO-APIC's listed under remapping hardware\n");