1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
7 #include <asm/io_apic.h>
10 #include <linux/intel-iommu.h>
11 #include "intr_remapping.h"
12 #include <acpi/acpi.h>
14 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
15 static int ir_ioapic_num;
16 int intr_remapping_enabled;
19 struct intel_iommu *iommu;
25 #ifdef CONFIG_GENERIC_HARDIRQS
26 static struct irq_2_iommu *get_one_free_irq_2_iommu(int node)
28 struct irq_2_iommu *iommu;
30 iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
31 printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node);
36 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
38 struct irq_desc *desc;
40 desc = irq_to_desc(irq);
42 if (WARN_ON_ONCE(!desc))
45 return desc->irq_2_iommu;
48 static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node)
50 struct irq_desc *desc;
51 struct irq_2_iommu *irq_iommu;
54 * alloc irq desc if not allocated already.
56 desc = irq_to_desc_alloc_node(irq, node);
58 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
62 irq_iommu = desc->irq_2_iommu;
65 desc->irq_2_iommu = get_one_free_irq_2_iommu(node);
67 return desc->irq_2_iommu;
70 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
72 return irq_2_iommu_alloc_node(irq, cpu_to_node(boot_cpu_id));
75 #else /* !CONFIG_SPARSE_IRQ */
77 static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
79 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
82 return &irq_2_iommuX[irq];
86 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
88 return irq_2_iommu(irq);
92 static DEFINE_SPINLOCK(irq_2_ir_lock);
94 static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
96 struct irq_2_iommu *irq_iommu;
98 irq_iommu = irq_2_iommu(irq);
103 if (!irq_iommu->iommu)
109 int irq_remapped(int irq)
111 return valid_irq_2_iommu(irq) != NULL;
114 int get_irte(int irq, struct irte *entry)
117 struct irq_2_iommu *irq_iommu;
123 spin_lock_irqsave(&irq_2_ir_lock, flags);
124 irq_iommu = valid_irq_2_iommu(irq);
126 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
130 index = irq_iommu->irte_index + irq_iommu->sub_handle;
131 *entry = *(irq_iommu->iommu->ir_table->base + index);
133 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
137 int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
139 struct ir_table *table = iommu->ir_table;
140 struct irq_2_iommu *irq_iommu;
141 u16 index, start_index;
142 unsigned int mask = 0;
149 #ifndef CONFIG_SPARSE_IRQ
150 /* protect irq_2_iommu_alloc later */
156 * start the IRTE search from index 0.
158 index = start_index = 0;
161 count = __roundup_pow_of_two(count);
165 if (mask > ecap_max_handle_mask(iommu->ecap)) {
167 "Requested mask %x exceeds the max invalidation handle"
168 " mask value %Lx\n", mask,
169 ecap_max_handle_mask(iommu->ecap));
173 spin_lock_irqsave(&irq_2_ir_lock, flags);
175 for (i = index; i < index + count; i++)
176 if (table->base[i].present)
178 /* empty index found */
179 if (i == index + count)
182 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
184 if (index == start_index) {
185 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
186 printk(KERN_ERR "can't allocate an IRTE\n");
191 for (i = index; i < index + count; i++)
192 table->base[i].present = 1;
194 irq_iommu = irq_2_iommu_alloc(irq);
196 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
197 printk(KERN_ERR "can't allocate irq_2_iommu\n");
201 irq_iommu->iommu = iommu;
202 irq_iommu->irte_index = index;
203 irq_iommu->sub_handle = 0;
204 irq_iommu->irte_mask = mask;
206 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
211 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
215 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
219 return qi_submit_sync(&desc, iommu);
222 int map_irq_to_irte_handle(int irq, u16 *sub_handle)
225 struct irq_2_iommu *irq_iommu;
228 spin_lock_irqsave(&irq_2_ir_lock, flags);
229 irq_iommu = valid_irq_2_iommu(irq);
231 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
235 *sub_handle = irq_iommu->sub_handle;
236 index = irq_iommu->irte_index;
237 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
241 int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
243 struct irq_2_iommu *irq_iommu;
246 spin_lock_irqsave(&irq_2_ir_lock, flags);
248 irq_iommu = irq_2_iommu_alloc(irq);
251 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
252 printk(KERN_ERR "can't allocate irq_2_iommu\n");
256 irq_iommu->iommu = iommu;
257 irq_iommu->irte_index = index;
258 irq_iommu->sub_handle = subhandle;
259 irq_iommu->irte_mask = 0;
261 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
266 int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
268 struct irq_2_iommu *irq_iommu;
271 spin_lock_irqsave(&irq_2_ir_lock, flags);
272 irq_iommu = valid_irq_2_iommu(irq);
274 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
278 irq_iommu->iommu = NULL;
279 irq_iommu->irte_index = 0;
280 irq_iommu->sub_handle = 0;
281 irq_2_iommu(irq)->irte_mask = 0;
283 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
288 int modify_irte(int irq, struct irte *irte_modified)
293 struct intel_iommu *iommu;
294 struct irq_2_iommu *irq_iommu;
297 spin_lock_irqsave(&irq_2_ir_lock, flags);
298 irq_iommu = valid_irq_2_iommu(irq);
300 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
304 iommu = irq_iommu->iommu;
306 index = irq_iommu->irte_index + irq_iommu->sub_handle;
307 irte = &iommu->ir_table->base[index];
309 set_64bit((unsigned long *)irte, irte_modified->low);
310 __iommu_flush_cache(iommu, irte, sizeof(*irte));
312 rc = qi_flush_iec(iommu, index, 0);
313 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
318 int flush_irte(int irq)
322 struct intel_iommu *iommu;
323 struct irq_2_iommu *irq_iommu;
326 spin_lock_irqsave(&irq_2_ir_lock, flags);
327 irq_iommu = valid_irq_2_iommu(irq);
329 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
333 iommu = irq_iommu->iommu;
335 index = irq_iommu->irte_index + irq_iommu->sub_handle;
337 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
338 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
343 struct intel_iommu *map_ioapic_to_ir(int apic)
347 for (i = 0; i < MAX_IO_APICS; i++)
348 if (ir_ioapic[i].id == apic)
349 return ir_ioapic[i].iommu;
353 struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
355 struct dmar_drhd_unit *drhd;
357 drhd = dmar_find_matched_drhd_unit(dev);
364 int free_irte(int irq)
369 struct intel_iommu *iommu;
370 struct irq_2_iommu *irq_iommu;
373 spin_lock_irqsave(&irq_2_ir_lock, flags);
374 irq_iommu = valid_irq_2_iommu(irq);
376 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
380 iommu = irq_iommu->iommu;
382 index = irq_iommu->irte_index + irq_iommu->sub_handle;
383 irte = &iommu->ir_table->base[index];
385 if (!irq_iommu->sub_handle) {
386 for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
387 set_64bit((unsigned long *)(irte + i), 0);
388 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
391 irq_iommu->iommu = NULL;
392 irq_iommu->irte_index = 0;
393 irq_iommu->sub_handle = 0;
394 irq_iommu->irte_mask = 0;
396 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
401 static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
407 addr = virt_to_phys((void *)iommu->ir_table->base);
409 spin_lock_irqsave(&iommu->register_lock, flags);
411 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
412 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
414 /* Set interrupt-remapping table pointer */
415 cmd = iommu->gcmd | DMA_GCMD_SIRTP;
416 iommu->gcmd |= DMA_GCMD_SIRTP;
417 writel(cmd, iommu->reg + DMAR_GCMD_REG);
419 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
420 readl, (sts & DMA_GSTS_IRTPS), sts);
421 spin_unlock_irqrestore(&iommu->register_lock, flags);
424 spin_lock_irqsave(&iommu->register_lock, flags);
426 /* enable comaptiblity format interrupt pass through */
427 cmd = iommu->gcmd | DMA_GCMD_CFI;
428 iommu->gcmd |= DMA_GCMD_CFI;
429 writel(cmd, iommu->reg + DMAR_GCMD_REG);
431 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
432 readl, (sts & DMA_GSTS_CFIS), sts);
434 spin_unlock_irqrestore(&iommu->register_lock, flags);
438 * global invalidation of interrupt entry cache before enabling
439 * interrupt-remapping.
441 qi_global_iec(iommu);
443 spin_lock_irqsave(&iommu->register_lock, flags);
445 /* Enable interrupt-remapping */
446 cmd = iommu->gcmd | DMA_GCMD_IRE;
447 iommu->gcmd |= DMA_GCMD_IRE;
448 writel(cmd, iommu->reg + DMAR_GCMD_REG);
450 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
451 readl, (sts & DMA_GSTS_IRES), sts);
453 spin_unlock_irqrestore(&iommu->register_lock, flags);
457 static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
459 struct ir_table *ir_table;
462 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
465 if (!iommu->ir_table)
468 pages = alloc_pages(GFP_ATOMIC | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
471 printk(KERN_ERR "failed to allocate pages of order %d\n",
472 INTR_REMAP_PAGE_ORDER);
473 kfree(iommu->ir_table);
477 ir_table->base = page_address(pages);
479 iommu_set_intr_remapping(iommu, mode);
484 * Disable Interrupt Remapping.
486 static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
491 if (!ecap_ir_support(iommu->ecap))
495 * global invalidation of interrupt entry cache before disabling
496 * interrupt-remapping.
498 qi_global_iec(iommu);
500 spin_lock_irqsave(&iommu->register_lock, flags);
502 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
503 if (!(sts & DMA_GSTS_IRES))
506 iommu->gcmd &= ~DMA_GCMD_IRE;
507 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
509 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
510 readl, !(sts & DMA_GSTS_IRES), sts);
513 spin_unlock_irqrestore(&iommu->register_lock, flags);
516 int __init enable_intr_remapping(int eim)
518 struct dmar_drhd_unit *drhd;
521 for_each_drhd_unit(drhd) {
522 struct intel_iommu *iommu = drhd->iommu;
525 * If the queued invalidation is already initialized,
526 * shouldn't disable it.
532 * Clear previous faults.
534 dmar_fault(-1, iommu);
537 * Disable intr remapping and queued invalidation, if already
538 * enabled prior to OS handover.
540 iommu_disable_intr_remapping(iommu);
542 dmar_disable_qi(iommu);
546 * check for the Interrupt-remapping support
548 for_each_drhd_unit(drhd) {
549 struct intel_iommu *iommu = drhd->iommu;
551 if (!ecap_ir_support(iommu->ecap))
554 if (eim && !ecap_eim_support(iommu->ecap)) {
555 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
556 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
562 * Enable queued invalidation for all the DRHD's.
564 for_each_drhd_unit(drhd) {
566 struct intel_iommu *iommu = drhd->iommu;
567 ret = dmar_enable_qi(iommu);
570 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
571 " invalidation, ecap %Lx, ret %d\n",
572 drhd->reg_base_addr, iommu->ecap, ret);
578 * Setup Interrupt-remapping for all the DRHD's now.
580 for_each_drhd_unit(drhd) {
581 struct intel_iommu *iommu = drhd->iommu;
583 if (!ecap_ir_support(iommu->ecap))
586 if (setup_intr_remapping(iommu, eim))
595 intr_remapping_enabled = 1;
601 * handle error condition gracefully here!
606 static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
607 struct intel_iommu *iommu)
609 struct acpi_dmar_hardware_unit *drhd;
610 struct acpi_dmar_device_scope *scope;
613 drhd = (struct acpi_dmar_hardware_unit *)header;
615 start = (void *)(drhd + 1);
616 end = ((void *)drhd) + header->length;
618 while (start < end) {
620 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
621 if (ir_ioapic_num == MAX_IO_APICS) {
622 printk(KERN_WARNING "Exceeded Max IO APICS\n");
626 printk(KERN_INFO "IOAPIC id %d under DRHD base"
627 " 0x%Lx\n", scope->enumeration_id,
630 ir_ioapic[ir_ioapic_num].iommu = iommu;
631 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
634 start += scope->length;
641 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
644 int __init parse_ioapics_under_ir(void)
646 struct dmar_drhd_unit *drhd;
647 int ir_supported = 0;
649 for_each_drhd_unit(drhd) {
650 struct intel_iommu *iommu = drhd->iommu;
652 if (ecap_ir_support(iommu->ecap)) {
653 if (ir_parse_ioapic_scope(drhd->hdr, iommu))
660 if (ir_supported && ir_ioapic_num != nr_ioapics) {
662 "Not all IO-APIC's listed under remapping hardware\n");
669 void disable_intr_remapping(void)
671 struct dmar_drhd_unit *drhd;
672 struct intel_iommu *iommu = NULL;
675 * Disable Interrupt-remapping for all the DRHD's now.
677 for_each_iommu(iommu, drhd) {
678 if (!ecap_ir_support(iommu->ecap))
681 iommu_disable_intr_remapping(iommu);
685 int reenable_intr_remapping(int eim)
687 struct dmar_drhd_unit *drhd;
689 struct intel_iommu *iommu = NULL;
691 for_each_iommu(iommu, drhd)
693 dmar_reenable_qi(iommu);
696 * Setup Interrupt-remapping for all the DRHD's now.
698 for_each_iommu(iommu, drhd) {
699 if (!ecap_ir_support(iommu->ecap))
702 /* Set up interrupt remapping for iommu.*/
703 iommu_set_intr_remapping(iommu, eim);
714 * handle error condition gracefully here!