2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
31 #include <linux/iova.h>
32 #include <linux/intel-iommu.h>
33 #include <linux/timer.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
36 #include <linux/tboot.h>
37 #include <linux/dmi.h>
39 #define PREFIX "DMAR: "
41 /* No locks are needed as DMA remapping hardware unit
42 * list is constructed at boot time and hotplug of
43 * these units are not supported by the architecture.
45 LIST_HEAD(dmar_drhd_units);
47 static struct acpi_table_header * __initdata dmar_tbl;
48 static acpi_size dmar_tbl_size;
50 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
53 * add INCLUDE_ALL at the tail, so scan the list will find it at
56 if (drhd->include_all)
57 list_add_tail(&drhd->list, &dmar_drhd_units);
59 list_add(&drhd->list, &dmar_drhd_units);
62 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
63 struct pci_dev **dev, u16 segment)
66 struct pci_dev *pdev = NULL;
67 struct acpi_dmar_pci_path *path;
70 bus = pci_find_bus(segment, scope->bus);
71 path = (struct acpi_dmar_pci_path *)(scope + 1);
72 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
73 / sizeof(struct acpi_dmar_pci_path);
79 * Some BIOSes list non-exist devices in DMAR table, just
84 PREFIX "Device scope bus [%d] not found\n",
88 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
90 printk(KERN_WARNING PREFIX
91 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92 segment, bus->number, path->dev, path->fn);
97 bus = pdev->subordinate;
100 printk(KERN_WARNING PREFIX
101 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment, scope->bus, path->dev, path->fn);
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
110 printk(KERN_WARNING PREFIX
111 "Device scope type does not match for %s\n",
119 static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120 struct pci_dev ***devices, u16 segment)
122 struct acpi_dmar_device_scope *scope;
128 while (start < end) {
130 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
134 printk(KERN_WARNING PREFIX
135 "Unsupported device scope\n");
136 start += scope->length;
141 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
147 while (start < end) {
149 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
150 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
151 ret = dmar_parse_one_dev_scope(scope,
152 &(*devices)[index], segment);
159 start += scope->length;
166 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167 * structure which uniquely represent one DMA remapping hardware unit
168 * present in the platform
171 dmar_parse_one_drhd(struct acpi_dmar_header *header)
173 struct acpi_dmar_hardware_unit *drhd;
174 struct dmar_drhd_unit *dmaru;
177 drhd = (struct acpi_dmar_hardware_unit *)header;
178 if (!drhd->address) {
179 /* Promote an attitude of violence to a BIOS engineer today */
180 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
181 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
182 dmi_get_system_info(DMI_BIOS_VENDOR),
183 dmi_get_system_info(DMI_BIOS_VERSION),
184 dmi_get_system_info(DMI_PRODUCT_VERSION));
187 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
192 dmaru->reg_base_addr = drhd->address;
193 dmaru->segment = drhd->segment;
194 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
196 ret = alloc_iommu(dmaru);
201 dmar_register_drhd_unit(dmaru);
205 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
207 struct acpi_dmar_hardware_unit *drhd;
210 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
212 if (dmaru->include_all)
215 ret = dmar_parse_dev_scope((void *)(drhd + 1),
216 ((void *)drhd) + drhd->header.length,
217 &dmaru->devices_cnt, &dmaru->devices,
220 list_del(&dmaru->list);
227 LIST_HEAD(dmar_rmrr_units);
229 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
231 list_add(&rmrr->list, &dmar_rmrr_units);
236 dmar_parse_one_rmrr(struct acpi_dmar_header *header)
238 struct acpi_dmar_reserved_memory *rmrr;
239 struct dmar_rmrr_unit *rmrru;
241 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
246 rmrr = (struct acpi_dmar_reserved_memory *)header;
247 rmrru->base_address = rmrr->base_address;
248 rmrru->end_address = rmrr->end_address;
250 dmar_register_rmrr_unit(rmrru);
255 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
257 struct acpi_dmar_reserved_memory *rmrr;
260 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
261 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
262 ((void *)rmrr) + rmrr->header.length,
263 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
265 if (ret || (rmrru->devices_cnt == 0)) {
266 list_del(&rmrru->list);
272 static LIST_HEAD(dmar_atsr_units);
274 static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
276 struct acpi_dmar_atsr *atsr;
277 struct dmar_atsr_unit *atsru;
279 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
280 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
285 atsru->include_all = atsr->flags & 0x1;
287 list_add(&atsru->list, &dmar_atsr_units);
292 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
295 struct acpi_dmar_atsr *atsr;
297 if (atsru->include_all)
300 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
301 rc = dmar_parse_dev_scope((void *)(atsr + 1),
302 (void *)atsr + atsr->header.length,
303 &atsru->devices_cnt, &atsru->devices,
305 if (rc || !atsru->devices_cnt) {
306 list_del(&atsru->list);
313 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
317 struct acpi_dmar_atsr *atsr;
318 struct dmar_atsr_unit *atsru;
320 list_for_each_entry(atsru, &dmar_atsr_units, list) {
321 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
322 if (atsr->segment == pci_domain_nr(dev->bus))
329 for (bus = dev->bus; bus; bus = bus->parent) {
330 struct pci_dev *bridge = bus->self;
332 if (!bridge || !bridge->is_pcie ||
333 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
336 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
337 for (i = 0; i < atsru->devices_cnt; i++)
338 if (atsru->devices[i] == bridge)
344 if (atsru->include_all)
352 dmar_parse_one_rhsa(struct acpi_dmar_header *header)
354 struct acpi_dmar_rhsa *rhsa;
355 struct dmar_drhd_unit *drhd;
357 rhsa = (struct acpi_dmar_rhsa *)header;
358 for_each_drhd_unit(drhd)
359 if (drhd->reg_base_addr == rhsa->base_address) {
360 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
362 if (!node_online(node))
364 drhd->iommu->node = node;
372 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
374 struct acpi_dmar_hardware_unit *drhd;
375 struct acpi_dmar_reserved_memory *rmrr;
376 struct acpi_dmar_atsr *atsr;
377 struct acpi_dmar_rhsa *rhsa;
379 switch (header->type) {
380 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
381 drhd = container_of(header, struct acpi_dmar_hardware_unit,
383 printk (KERN_INFO PREFIX
384 "DRHD base: %#016Lx flags: %#x\n",
385 (unsigned long long)drhd->address, drhd->flags);
387 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
388 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
390 printk (KERN_INFO PREFIX
391 "RMRR base: %#016Lx end: %#016Lx\n",
392 (unsigned long long)rmrr->base_address,
393 (unsigned long long)rmrr->end_address);
395 case ACPI_DMAR_TYPE_ATSR:
396 atsr = container_of(header, struct acpi_dmar_atsr, header);
397 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
399 case ACPI_DMAR_HARDWARE_AFFINITY:
400 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
401 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
402 (unsigned long long)rhsa->base_address,
403 rhsa->proximity_domain);
409 * dmar_table_detect - checks to see if the platform supports DMAR devices
411 static int __init dmar_table_detect(void)
413 acpi_status status = AE_OK;
415 /* if we could find DMAR table, then there are DMAR devices */
416 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
417 (struct acpi_table_header **)&dmar_tbl,
420 if (ACPI_SUCCESS(status) && !dmar_tbl) {
421 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
422 status = AE_NOT_FOUND;
425 return (ACPI_SUCCESS(status) ? 1 : 0);
429 * parse_dmar_table - parses the DMA reporting table
432 parse_dmar_table(void)
434 struct acpi_table_dmar *dmar;
435 struct acpi_dmar_header *entry_header;
439 * Do it again, earlier dmar_tbl mapping could be mapped with
445 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
446 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
448 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
450 dmar = (struct acpi_table_dmar *)dmar_tbl;
454 if (dmar->width < PAGE_SHIFT - 1) {
455 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
459 printk (KERN_INFO PREFIX "Host address width %d\n",
462 entry_header = (struct acpi_dmar_header *)(dmar + 1);
463 while (((unsigned long)entry_header) <
464 (((unsigned long)dmar) + dmar_tbl->length)) {
465 /* Avoid looping forever on bad ACPI tables */
466 if (entry_header->length == 0) {
467 printk(KERN_WARNING PREFIX
468 "Invalid 0-length structure\n");
473 dmar_table_print_dmar_entry(entry_header);
475 switch (entry_header->type) {
476 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
477 ret = dmar_parse_one_drhd(entry_header);
479 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
481 ret = dmar_parse_one_rmrr(entry_header);
484 case ACPI_DMAR_TYPE_ATSR:
486 ret = dmar_parse_one_atsr(entry_header);
489 case ACPI_DMAR_HARDWARE_AFFINITY:
490 ret = dmar_parse_one_rhsa(entry_header);
493 printk(KERN_WARNING PREFIX
494 "Unknown DMAR structure type %d\n",
496 ret = 0; /* for forward compatibility */
502 entry_header = ((void *)entry_header + entry_header->length);
507 int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
513 for (index = 0; index < cnt; index++)
514 if (dev == devices[index])
517 /* Check our parent */
518 dev = dev->bus->self;
524 struct dmar_drhd_unit *
525 dmar_find_matched_drhd_unit(struct pci_dev *dev)
527 struct dmar_drhd_unit *dmaru = NULL;
528 struct acpi_dmar_hardware_unit *drhd;
530 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
531 drhd = container_of(dmaru->hdr,
532 struct acpi_dmar_hardware_unit,
535 if (dmaru->include_all &&
536 drhd->segment == pci_domain_nr(dev->bus))
539 if (dmar_pci_device_match(dmaru->devices,
540 dmaru->devices_cnt, dev))
547 int __init dmar_dev_scope_init(void)
549 struct dmar_drhd_unit *drhd, *drhd_n;
552 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
553 ret = dmar_parse_dev(drhd);
560 struct dmar_rmrr_unit *rmrr, *rmrr_n;
561 struct dmar_atsr_unit *atsr, *atsr_n;
563 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
564 ret = rmrr_parse_dev(rmrr);
569 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
570 ret = atsr_parse_dev(atsr);
581 int __init dmar_table_init(void)
583 static int dmar_table_initialized;
586 if (dmar_table_initialized)
589 dmar_table_initialized = 1;
591 ret = parse_dmar_table();
594 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
598 if (list_empty(&dmar_drhd_units)) {
599 printk(KERN_INFO PREFIX "No DMAR devices found\n");
604 if (list_empty(&dmar_rmrr_units))
605 printk(KERN_INFO PREFIX "No RMRR found\n");
607 if (list_empty(&dmar_atsr_units))
608 printk(KERN_INFO PREFIX "No ATSR found\n");
614 void __init detect_intel_iommu(void)
618 ret = dmar_table_detect();
621 #ifdef CONFIG_INTR_REMAP
622 struct acpi_table_dmar *dmar;
624 * for now we will disable dma-remapping when interrupt
625 * remapping is enabled.
626 * When support for queued invalidation for IOTLB invalidation
627 * is added, we will not need this any more.
629 dmar = (struct acpi_table_dmar *) dmar_tbl;
630 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
632 "Queued invalidation will be enabled to support "
633 "x2apic and Intr-remapping.\n");
636 if (ret && !no_iommu && !iommu_detected && !swiotlb &&
641 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
646 int alloc_iommu(struct dmar_drhd_unit *drhd)
648 struct intel_iommu *iommu;
651 static int iommu_allocated = 0;
655 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
659 iommu->seq_id = iommu_allocated++;
660 sprintf (iommu->name, "dmar%d", iommu->seq_id);
662 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
664 printk(KERN_ERR "IOMMU: can't map the region\n");
667 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
668 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
670 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
671 /* Promote an attitude of violence to a BIOS engineer today */
672 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
673 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
675 dmi_get_system_info(DMI_BIOS_VENDOR),
676 dmi_get_system_info(DMI_BIOS_VERSION),
677 dmi_get_system_info(DMI_PRODUCT_VERSION));
682 agaw = iommu_calculate_agaw(iommu);
685 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
689 msagaw = iommu_calculate_max_sagaw(iommu);
692 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
698 iommu->msagaw = msagaw;
702 /* the registers might be more than one page */
703 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
704 cap_max_fault_reg_offset(iommu->cap));
705 map_size = VTD_PAGE_ALIGN(map_size);
706 if (map_size > VTD_PAGE_SIZE) {
708 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
710 printk(KERN_ERR "IOMMU: can't map the region\n");
715 ver = readl(iommu->reg + DMAR_VER_REG);
716 pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
717 (unsigned long long)drhd->reg_base_addr,
718 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
719 (unsigned long long)iommu->cap,
720 (unsigned long long)iommu->ecap);
722 spin_lock_init(&iommu->register_lock);
734 void free_iommu(struct intel_iommu *iommu)
740 free_dmar_iommu(iommu);
749 * Reclaim all the submitted descriptors which have completed its work.
751 static inline void reclaim_free_desc(struct q_inval *qi)
753 while (qi->desc_status[qi->free_tail] == QI_DONE ||
754 qi->desc_status[qi->free_tail] == QI_ABORT) {
755 qi->desc_status[qi->free_tail] = QI_FREE;
756 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
761 static int qi_check_fault(struct intel_iommu *iommu, int index)
765 struct q_inval *qi = iommu->qi;
766 int wait_index = (index + 1) % QI_LENGTH;
768 if (qi->desc_status[wait_index] == QI_ABORT)
771 fault = readl(iommu->reg + DMAR_FSTS_REG);
774 * If IQE happens, the head points to the descriptor associated
775 * with the error. No new descriptors are fetched until the IQE
778 if (fault & DMA_FSTS_IQE) {
779 head = readl(iommu->reg + DMAR_IQH_REG);
780 if ((head >> DMAR_IQ_SHIFT) == index) {
781 printk(KERN_ERR "VT-d detected invalid descriptor: "
782 "low=%llx, high=%llx\n",
783 (unsigned long long)qi->desc[index].low,
784 (unsigned long long)qi->desc[index].high);
785 memcpy(&qi->desc[index], &qi->desc[wait_index],
786 sizeof(struct qi_desc));
787 __iommu_flush_cache(iommu, &qi->desc[index],
788 sizeof(struct qi_desc));
789 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
795 * If ITE happens, all pending wait_desc commands are aborted.
796 * No new descriptors are fetched until the ITE is cleared.
798 if (fault & DMA_FSTS_ITE) {
799 head = readl(iommu->reg + DMAR_IQH_REG);
800 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
802 tail = readl(iommu->reg + DMAR_IQT_REG);
803 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
805 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
808 if (qi->desc_status[head] == QI_IN_USE)
809 qi->desc_status[head] = QI_ABORT;
810 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
811 } while (head != tail);
813 if (qi->desc_status[wait_index] == QI_ABORT)
817 if (fault & DMA_FSTS_ICE)
818 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
824 * Submit the queued invalidation descriptor to the remapping
825 * hardware unit and wait for its completion.
827 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
830 struct q_inval *qi = iommu->qi;
831 struct qi_desc *hw, wait_desc;
832 int wait_index, index;
843 spin_lock_irqsave(&qi->q_lock, flags);
844 while (qi->free_cnt < 3) {
845 spin_unlock_irqrestore(&qi->q_lock, flags);
847 spin_lock_irqsave(&qi->q_lock, flags);
850 index = qi->free_head;
851 wait_index = (index + 1) % QI_LENGTH;
853 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
857 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
858 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
859 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
861 hw[wait_index] = wait_desc;
863 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
864 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
866 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
870 * update the HW tail register indicating the presence of
873 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
875 while (qi->desc_status[wait_index] != QI_DONE) {
877 * We will leave the interrupts disabled, to prevent interrupt
878 * context to queue another cmd while a cmd is already submitted
879 * and waiting for completion on this cpu. This is to avoid
880 * a deadlock where the interrupt context can wait indefinitely
881 * for free slots in the queue.
883 rc = qi_check_fault(iommu, index);
887 spin_unlock(&qi->q_lock);
889 spin_lock(&qi->q_lock);
892 qi->desc_status[index] = QI_DONE;
894 reclaim_free_desc(qi);
895 spin_unlock_irqrestore(&qi->q_lock, flags);
904 * Flush the global interrupt entry cache.
906 void qi_global_iec(struct intel_iommu *iommu)
910 desc.low = QI_IEC_TYPE;
913 /* should never fail */
914 qi_submit_sync(&desc, iommu);
917 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
922 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
923 | QI_CC_GRAN(type) | QI_CC_TYPE;
926 qi_submit_sync(&desc, iommu);
929 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
930 unsigned int size_order, u64 type)
937 if (cap_write_drain(iommu->cap))
940 if (cap_read_drain(iommu->cap))
943 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
944 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
945 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
946 | QI_IOTLB_AM(size_order);
948 qi_submit_sync(&desc, iommu);
951 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
952 u64 addr, unsigned mask)
957 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
958 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
959 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
961 desc.high = QI_DEV_IOTLB_ADDR(addr);
963 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
966 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
969 qi_submit_sync(&desc, iommu);
973 * Disable Queued Invalidation interface.
975 void dmar_disable_qi(struct intel_iommu *iommu)
979 cycles_t start_time = get_cycles();
981 if (!ecap_qis(iommu->ecap))
984 spin_lock_irqsave(&iommu->register_lock, flags);
986 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
987 if (!(sts & DMA_GSTS_QIES))
991 * Give a chance to HW to complete the pending invalidation requests.
993 while ((readl(iommu->reg + DMAR_IQT_REG) !=
994 readl(iommu->reg + DMAR_IQH_REG)) &&
995 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
998 iommu->gcmd &= ~DMA_GCMD_QIE;
999 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1001 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1002 !(sts & DMA_GSTS_QIES), sts);
1004 spin_unlock_irqrestore(&iommu->register_lock, flags);
1008 * Enable queued invalidation.
1010 static void __dmar_enable_qi(struct intel_iommu *iommu)
1013 unsigned long flags;
1014 struct q_inval *qi = iommu->qi;
1016 qi->free_head = qi->free_tail = 0;
1017 qi->free_cnt = QI_LENGTH;
1019 spin_lock_irqsave(&iommu->register_lock, flags);
1021 /* write zero to the tail reg */
1022 writel(0, iommu->reg + DMAR_IQT_REG);
1024 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1026 iommu->gcmd |= DMA_GCMD_QIE;
1027 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1029 /* Make sure hardware complete it */
1030 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1032 spin_unlock_irqrestore(&iommu->register_lock, flags);
1036 * Enable Queued Invalidation interface. This is a must to support
1037 * interrupt-remapping. Also used by DMA-remapping, which replaces
1038 * register based IOTLB invalidation.
1040 int dmar_enable_qi(struct intel_iommu *iommu)
1044 if (!ecap_qis(iommu->ecap))
1048 * queued invalidation is already setup and enabled.
1053 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1059 qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
1066 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1067 if (!qi->desc_status) {
1068 free_page((unsigned long) qi->desc);
1074 qi->free_head = qi->free_tail = 0;
1075 qi->free_cnt = QI_LENGTH;
1077 spin_lock_init(&qi->q_lock);
1079 __dmar_enable_qi(iommu);
1084 /* iommu interrupt handling. Most stuff are MSI-like. */
1092 static const char *dma_remap_fault_reasons[] =
1095 "Present bit in root entry is clear",
1096 "Present bit in context entry is clear",
1097 "Invalid context entry",
1098 "Access beyond MGAW",
1099 "PTE Write access is not set",
1100 "PTE Read access is not set",
1101 "Next page table ptr is invalid",
1102 "Root table address invalid",
1103 "Context table ptr is invalid",
1104 "non-zero reserved fields in RTP",
1105 "non-zero reserved fields in CTP",
1106 "non-zero reserved fields in PTE",
1109 static const char *intr_remap_fault_reasons[] =
1111 "Detected reserved fields in the decoded interrupt-remapped request",
1112 "Interrupt index exceeded the interrupt-remapping table size",
1113 "Present field in the IRTE entry is clear",
1114 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1115 "Detected reserved fields in the IRTE entry",
1116 "Blocked a compatibility format interrupt request",
1117 "Blocked an interrupt request due to source-id verification failure",
1120 #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1122 const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1124 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1125 ARRAY_SIZE(intr_remap_fault_reasons))) {
1126 *fault_type = INTR_REMAP;
1127 return intr_remap_fault_reasons[fault_reason - 0x20];
1128 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1129 *fault_type = DMA_REMAP;
1130 return dma_remap_fault_reasons[fault_reason];
1132 *fault_type = UNKNOWN;
1137 void dmar_msi_unmask(unsigned int irq)
1139 struct intel_iommu *iommu = get_irq_data(irq);
1143 spin_lock_irqsave(&iommu->register_lock, flag);
1144 writel(0, iommu->reg + DMAR_FECTL_REG);
1145 /* Read a reg to force flush the post write */
1146 readl(iommu->reg + DMAR_FECTL_REG);
1147 spin_unlock_irqrestore(&iommu->register_lock, flag);
1150 void dmar_msi_mask(unsigned int irq)
1153 struct intel_iommu *iommu = get_irq_data(irq);
1156 spin_lock_irqsave(&iommu->register_lock, flag);
1157 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1158 /* Read a reg to force flush the post write */
1159 readl(iommu->reg + DMAR_FECTL_REG);
1160 spin_unlock_irqrestore(&iommu->register_lock, flag);
1163 void dmar_msi_write(int irq, struct msi_msg *msg)
1165 struct intel_iommu *iommu = get_irq_data(irq);
1168 spin_lock_irqsave(&iommu->register_lock, flag);
1169 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1170 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1171 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1172 spin_unlock_irqrestore(&iommu->register_lock, flag);
1175 void dmar_msi_read(int irq, struct msi_msg *msg)
1177 struct intel_iommu *iommu = get_irq_data(irq);
1180 spin_lock_irqsave(&iommu->register_lock, flag);
1181 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1182 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1183 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1184 spin_unlock_irqrestore(&iommu->register_lock, flag);
1187 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1188 u8 fault_reason, u16 source_id, unsigned long long addr)
1193 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1195 if (fault_type == INTR_REMAP)
1196 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1197 "fault index %llx\n"
1198 "INTR-REMAP:[fault reason %02d] %s\n",
1199 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1200 PCI_FUNC(source_id & 0xFF), addr >> 48,
1201 fault_reason, reason);
1204 "DMAR:[%s] Request device [%02x:%02x.%d] "
1205 "fault addr %llx \n"
1206 "DMAR:[fault reason %02d] %s\n",
1207 (type ? "DMA Read" : "DMA Write"),
1208 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1209 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1213 #define PRIMARY_FAULT_REG_LEN (16)
1214 irqreturn_t dmar_fault(int irq, void *dev_id)
1216 struct intel_iommu *iommu = dev_id;
1217 int reg, fault_index;
1221 spin_lock_irqsave(&iommu->register_lock, flag);
1222 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1224 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1227 /* TBD: ignore advanced fault log currently */
1228 if (!(fault_status & DMA_FSTS_PPF))
1231 fault_index = dma_fsts_fault_record_index(fault_status);
1232 reg = cap_fault_reg_offset(iommu->cap);
1240 /* highest 32 bits */
1241 data = readl(iommu->reg + reg +
1242 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1243 if (!(data & DMA_FRCD_F))
1246 fault_reason = dma_frcd_fault_reason(data);
1247 type = dma_frcd_type(data);
1249 data = readl(iommu->reg + reg +
1250 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1251 source_id = dma_frcd_source_id(data);
1253 guest_addr = dmar_readq(iommu->reg + reg +
1254 fault_index * PRIMARY_FAULT_REG_LEN);
1255 guest_addr = dma_frcd_page_addr(guest_addr);
1256 /* clear the fault */
1257 writel(DMA_FRCD_F, iommu->reg + reg +
1258 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1260 spin_unlock_irqrestore(&iommu->register_lock, flag);
1262 dmar_fault_do_one(iommu, type, fault_reason,
1263 source_id, guest_addr);
1266 if (fault_index >= cap_num_fault_regs(iommu->cap))
1268 spin_lock_irqsave(&iommu->register_lock, flag);
1271 /* clear all the other faults */
1272 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1273 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1275 spin_unlock_irqrestore(&iommu->register_lock, flag);
1279 int dmar_set_interrupt(struct intel_iommu *iommu)
1284 * Check if the fault interrupt is already initialized.
1291 printk(KERN_ERR "IOMMU: no free vectors\n");
1295 set_irq_data(irq, iommu);
1298 ret = arch_setup_dmar_msi(irq);
1300 set_irq_data(irq, NULL);
1306 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1308 printk(KERN_ERR "IOMMU: can't request irq\n");
1312 int __init enable_drhd_fault_handling(void)
1314 struct dmar_drhd_unit *drhd;
1317 * Enable fault control interrupt.
1319 for_each_drhd_unit(drhd) {
1321 struct intel_iommu *iommu = drhd->iommu;
1322 ret = dmar_set_interrupt(iommu);
1325 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1326 " interrupt, ret %d\n",
1327 (unsigned long long)drhd->reg_base_addr, ret);
1336 * Re-enable Queued Invalidation interface.
1338 int dmar_reenable_qi(struct intel_iommu *iommu)
1340 if (!ecap_qis(iommu->ecap))
1347 * First disable queued invalidation.
1349 dmar_disable_qi(iommu);
1351 * Then enable queued invalidation again. Since there is no pending
1352 * invalidation requests now, it's safe to re-enable queued
1355 __dmar_enable_qi(iommu);
1361 * Check interrupt remapping support in DMAR table description.
1363 int dmar_ir_support(void)
1365 struct acpi_table_dmar *dmar;
1366 dmar = (struct acpi_table_dmar *)dmar_tbl;
1367 return dmar->flags & 0x1;