dmar: Fix build failure without NUMA, warn on bogus RHSA tables and don't abort
[safe/jmp/linux-2.6] / drivers / pci / dmar.c
1 /*
2  * Copyright (c) 2006, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Copyright (C) 2006-2008 Intel Corporation
18  * Author: Ashok Raj <ashok.raj@intel.com>
19  * Author: Shaohua Li <shaohua.li@intel.com>
20  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21  *
22  * This file implements early detection/parsing of Remapping Devices
23  * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24  * tables.
25  *
26  * These routines are used by both DMA-remapping and Interrupt-remapping
27  */
28
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
31 #include <linux/iova.h>
32 #include <linux/intel-iommu.h>
33 #include <linux/timer.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
36 #include <linux/tboot.h>
37 #include <linux/dmi.h>
38
39 #define PREFIX "DMAR: "
40
41 /* No locks are needed as DMA remapping hardware unit
42  * list is constructed at boot time and hotplug of
43  * these units are not supported by the architecture.
44  */
45 LIST_HEAD(dmar_drhd_units);
46
47 static struct acpi_table_header * __initdata dmar_tbl;
48 static acpi_size dmar_tbl_size;
49
50 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
51 {
52         /*
53          * add INCLUDE_ALL at the tail, so scan the list will find it at
54          * the very end.
55          */
56         if (drhd->include_all)
57                 list_add_tail(&drhd->list, &dmar_drhd_units);
58         else
59                 list_add(&drhd->list, &dmar_drhd_units);
60 }
61
62 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
63                                            struct pci_dev **dev, u16 segment)
64 {
65         struct pci_bus *bus;
66         struct pci_dev *pdev = NULL;
67         struct acpi_dmar_pci_path *path;
68         int count;
69
70         bus = pci_find_bus(segment, scope->bus);
71         path = (struct acpi_dmar_pci_path *)(scope + 1);
72         count = (scope->length - sizeof(struct acpi_dmar_device_scope))
73                 / sizeof(struct acpi_dmar_pci_path);
74
75         while (count) {
76                 if (pdev)
77                         pci_dev_put(pdev);
78                 /*
79                  * Some BIOSes list non-exist devices in DMAR table, just
80                  * ignore it
81                  */
82                 if (!bus) {
83                         printk(KERN_WARNING
84                         PREFIX "Device scope bus [%d] not found\n",
85                         scope->bus);
86                         break;
87                 }
88                 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
89                 if (!pdev) {
90                         printk(KERN_WARNING PREFIX
91                         "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92                                 segment, bus->number, path->dev, path->fn);
93                         break;
94                 }
95                 path ++;
96                 count --;
97                 bus = pdev->subordinate;
98         }
99         if (!pdev) {
100                 printk(KERN_WARNING PREFIX
101                 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102                 segment, scope->bus, path->dev, path->fn);
103                 *dev = NULL;
104                 return 0;
105         }
106         if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107                         pdev->subordinate) || (scope->entry_type == \
108                         ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109                 pci_dev_put(pdev);
110                 printk(KERN_WARNING PREFIX
111                         "Device scope type does not match for %s\n",
112                          pci_name(pdev));
113                 return -EINVAL;
114         }
115         *dev = pdev;
116         return 0;
117 }
118
119 static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120                                        struct pci_dev ***devices, u16 segment)
121 {
122         struct acpi_dmar_device_scope *scope;
123         void * tmp = start;
124         int index;
125         int ret;
126
127         *cnt = 0;
128         while (start < end) {
129                 scope = start;
130                 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131                     scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
132                         (*cnt)++;
133                 else
134                         printk(KERN_WARNING PREFIX
135                                 "Unsupported device scope\n");
136                 start += scope->length;
137         }
138         if (*cnt == 0)
139                 return 0;
140
141         *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
142         if (!*devices)
143                 return -ENOMEM;
144
145         start = tmp;
146         index = 0;
147         while (start < end) {
148                 scope = start;
149                 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
150                     scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
151                         ret = dmar_parse_one_dev_scope(scope,
152                                 &(*devices)[index], segment);
153                         if (ret) {
154                                 kfree(*devices);
155                                 return ret;
156                         }
157                         index ++;
158                 }
159                 start += scope->length;
160         }
161
162         return 0;
163 }
164
165 /**
166  * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167  * structure which uniquely represent one DMA remapping hardware unit
168  * present in the platform
169  */
170 static int __init
171 dmar_parse_one_drhd(struct acpi_dmar_header *header)
172 {
173         struct acpi_dmar_hardware_unit *drhd;
174         struct dmar_drhd_unit *dmaru;
175         int ret = 0;
176
177         drhd = (struct acpi_dmar_hardware_unit *)header;
178         if (!drhd->address) {
179                 /* Promote an attitude of violence to a BIOS engineer today */
180                 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
181                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
182                      dmi_get_system_info(DMI_BIOS_VENDOR),
183                      dmi_get_system_info(DMI_BIOS_VERSION),
184                      dmi_get_system_info(DMI_PRODUCT_VERSION));
185                 return -ENODEV;
186         }
187         dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
188         if (!dmaru)
189                 return -ENOMEM;
190
191         dmaru->hdr = header;
192         dmaru->reg_base_addr = drhd->address;
193         dmaru->segment = drhd->segment;
194         dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
195
196         ret = alloc_iommu(dmaru);
197         if (ret) {
198                 kfree(dmaru);
199                 return ret;
200         }
201         dmar_register_drhd_unit(dmaru);
202         return 0;
203 }
204
205 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
206 {
207         struct acpi_dmar_hardware_unit *drhd;
208         int ret = 0;
209
210         drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
211
212         if (dmaru->include_all)
213                 return 0;
214
215         ret = dmar_parse_dev_scope((void *)(drhd + 1),
216                                 ((void *)drhd) + drhd->header.length,
217                                 &dmaru->devices_cnt, &dmaru->devices,
218                                 drhd->segment);
219         if (ret) {
220                 list_del(&dmaru->list);
221                 kfree(dmaru);
222         }
223         return ret;
224 }
225
226 #ifdef CONFIG_DMAR
227 LIST_HEAD(dmar_rmrr_units);
228
229 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
230 {
231         list_add(&rmrr->list, &dmar_rmrr_units);
232 }
233
234
235 static int __init
236 dmar_parse_one_rmrr(struct acpi_dmar_header *header)
237 {
238         struct acpi_dmar_reserved_memory *rmrr;
239         struct dmar_rmrr_unit *rmrru;
240
241         rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
242         if (!rmrru)
243                 return -ENOMEM;
244
245         rmrru->hdr = header;
246         rmrr = (struct acpi_dmar_reserved_memory *)header;
247         rmrru->base_address = rmrr->base_address;
248         rmrru->end_address = rmrr->end_address;
249
250         dmar_register_rmrr_unit(rmrru);
251         return 0;
252 }
253
254 static int __init
255 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
256 {
257         struct acpi_dmar_reserved_memory *rmrr;
258         int ret;
259
260         rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
261         ret = dmar_parse_dev_scope((void *)(rmrr + 1),
262                 ((void *)rmrr) + rmrr->header.length,
263                 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
264
265         if (ret || (rmrru->devices_cnt == 0)) {
266                 list_del(&rmrru->list);
267                 kfree(rmrru);
268         }
269         return ret;
270 }
271
272 static LIST_HEAD(dmar_atsr_units);
273
274 static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
275 {
276         struct acpi_dmar_atsr *atsr;
277         struct dmar_atsr_unit *atsru;
278
279         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
280         atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
281         if (!atsru)
282                 return -ENOMEM;
283
284         atsru->hdr = hdr;
285         atsru->include_all = atsr->flags & 0x1;
286
287         list_add(&atsru->list, &dmar_atsr_units);
288
289         return 0;
290 }
291
292 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
293 {
294         int rc;
295         struct acpi_dmar_atsr *atsr;
296
297         if (atsru->include_all)
298                 return 0;
299
300         atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
301         rc = dmar_parse_dev_scope((void *)(atsr + 1),
302                                 (void *)atsr + atsr->header.length,
303                                 &atsru->devices_cnt, &atsru->devices,
304                                 atsr->segment);
305         if (rc || !atsru->devices_cnt) {
306                 list_del(&atsru->list);
307                 kfree(atsru);
308         }
309
310         return rc;
311 }
312
313 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
314 {
315         int i;
316         struct pci_bus *bus;
317         struct acpi_dmar_atsr *atsr;
318         struct dmar_atsr_unit *atsru;
319
320         list_for_each_entry(atsru, &dmar_atsr_units, list) {
321                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
322                 if (atsr->segment == pci_domain_nr(dev->bus))
323                         goto found;
324         }
325
326         return 0;
327
328 found:
329         for (bus = dev->bus; bus; bus = bus->parent) {
330                 struct pci_dev *bridge = bus->self;
331
332                 if (!bridge || !bridge->is_pcie ||
333                     bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
334                         return 0;
335
336                 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
337                         for (i = 0; i < atsru->devices_cnt; i++)
338                                 if (atsru->devices[i] == bridge)
339                                         return 1;
340                         break;
341                 }
342         }
343
344         if (atsru->include_all)
345                 return 1;
346
347         return 0;
348 }
349 #endif
350
351 #ifdef CONFIG_ACPI_NUMA
352 static int __init
353 dmar_parse_one_rhsa(struct acpi_dmar_header *header)
354 {
355         struct acpi_dmar_rhsa *rhsa;
356         struct dmar_drhd_unit *drhd;
357
358         rhsa = (struct acpi_dmar_rhsa *)header;
359         for_each_drhd_unit(drhd) {
360                 if (drhd->reg_base_addr == rhsa->base_address) {
361                         int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
362
363                         if (!node_online(node))
364                                 node = -1;
365                         drhd->iommu->node = node;
366                         return 0;
367                 }
368         }
369         WARN(1, "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
370              "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
371              drhd->reg_base_addr,
372              dmi_get_system_info(DMI_BIOS_VENDOR),
373              dmi_get_system_info(DMI_BIOS_VERSION),
374              dmi_get_system_info(DMI_PRODUCT_VERSION));
375
376         return 0;
377 }
378 #endif
379
380 static void __init
381 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
382 {
383         struct acpi_dmar_hardware_unit *drhd;
384         struct acpi_dmar_reserved_memory *rmrr;
385         struct acpi_dmar_atsr *atsr;
386         struct acpi_dmar_rhsa *rhsa;
387
388         switch (header->type) {
389         case ACPI_DMAR_TYPE_HARDWARE_UNIT:
390                 drhd = container_of(header, struct acpi_dmar_hardware_unit,
391                                     header);
392                 printk (KERN_INFO PREFIX
393                         "DRHD base: %#016Lx flags: %#x\n",
394                         (unsigned long long)drhd->address, drhd->flags);
395                 break;
396         case ACPI_DMAR_TYPE_RESERVED_MEMORY:
397                 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
398                                     header);
399                 printk (KERN_INFO PREFIX
400                         "RMRR base: %#016Lx end: %#016Lx\n",
401                         (unsigned long long)rmrr->base_address,
402                         (unsigned long long)rmrr->end_address);
403                 break;
404         case ACPI_DMAR_TYPE_ATSR:
405                 atsr = container_of(header, struct acpi_dmar_atsr, header);
406                 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
407                 break;
408         case ACPI_DMAR_HARDWARE_AFFINITY:
409                 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
410                 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
411                        (unsigned long long)rhsa->base_address,
412                        rhsa->proximity_domain);
413                 break;
414         }
415 }
416
417 /**
418  * dmar_table_detect - checks to see if the platform supports DMAR devices
419  */
420 static int __init dmar_table_detect(void)
421 {
422         acpi_status status = AE_OK;
423
424         /* if we could find DMAR table, then there are DMAR devices */
425         status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
426                                 (struct acpi_table_header **)&dmar_tbl,
427                                 &dmar_tbl_size);
428
429         if (ACPI_SUCCESS(status) && !dmar_tbl) {
430                 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
431                 status = AE_NOT_FOUND;
432         }
433
434         return (ACPI_SUCCESS(status) ? 1 : 0);
435 }
436
437 /**
438  * parse_dmar_table - parses the DMA reporting table
439  */
440 static int __init
441 parse_dmar_table(void)
442 {
443         struct acpi_table_dmar *dmar;
444         struct acpi_dmar_header *entry_header;
445         int ret = 0;
446
447         /*
448          * Do it again, earlier dmar_tbl mapping could be mapped with
449          * fixed map.
450          */
451         dmar_table_detect();
452
453         /*
454          * ACPI tables may not be DMA protected by tboot, so use DMAR copy
455          * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
456          */
457         dmar_tbl = tboot_get_dmar_table(dmar_tbl);
458
459         dmar = (struct acpi_table_dmar *)dmar_tbl;
460         if (!dmar)
461                 return -ENODEV;
462
463         if (dmar->width < PAGE_SHIFT - 1) {
464                 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
465                 return -EINVAL;
466         }
467
468         printk (KERN_INFO PREFIX "Host address width %d\n",
469                 dmar->width + 1);
470
471         entry_header = (struct acpi_dmar_header *)(dmar + 1);
472         while (((unsigned long)entry_header) <
473                         (((unsigned long)dmar) + dmar_tbl->length)) {
474                 /* Avoid looping forever on bad ACPI tables */
475                 if (entry_header->length == 0) {
476                         printk(KERN_WARNING PREFIX
477                                 "Invalid 0-length structure\n");
478                         ret = -EINVAL;
479                         break;
480                 }
481
482                 dmar_table_print_dmar_entry(entry_header);
483
484                 switch (entry_header->type) {
485                 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
486                         ret = dmar_parse_one_drhd(entry_header);
487                         break;
488                 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
489 #ifdef CONFIG_DMAR
490                         ret = dmar_parse_one_rmrr(entry_header);
491 #endif
492                         break;
493                 case ACPI_DMAR_TYPE_ATSR:
494 #ifdef CONFIG_DMAR
495                         ret = dmar_parse_one_atsr(entry_header);
496 #endif
497                         break;
498                 case ACPI_DMAR_HARDWARE_AFFINITY:
499 #ifdef CONFIG_ACPI_NUMA
500                         ret = dmar_parse_one_rhsa(entry_header);
501 #endif
502                         break;
503                 default:
504                         printk(KERN_WARNING PREFIX
505                                 "Unknown DMAR structure type %d\n",
506                                 entry_header->type);
507                         ret = 0; /* for forward compatibility */
508                         break;
509                 }
510                 if (ret)
511                         break;
512
513                 entry_header = ((void *)entry_header + entry_header->length);
514         }
515         return ret;
516 }
517
518 int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
519                           struct pci_dev *dev)
520 {
521         int index;
522
523         while (dev) {
524                 for (index = 0; index < cnt; index++)
525                         if (dev == devices[index])
526                                 return 1;
527
528                 /* Check our parent */
529                 dev = dev->bus->self;
530         }
531
532         return 0;
533 }
534
535 struct dmar_drhd_unit *
536 dmar_find_matched_drhd_unit(struct pci_dev *dev)
537 {
538         struct dmar_drhd_unit *dmaru = NULL;
539         struct acpi_dmar_hardware_unit *drhd;
540
541         list_for_each_entry(dmaru, &dmar_drhd_units, list) {
542                 drhd = container_of(dmaru->hdr,
543                                     struct acpi_dmar_hardware_unit,
544                                     header);
545
546                 if (dmaru->include_all &&
547                     drhd->segment == pci_domain_nr(dev->bus))
548                         return dmaru;
549
550                 if (dmar_pci_device_match(dmaru->devices,
551                                           dmaru->devices_cnt, dev))
552                         return dmaru;
553         }
554
555         return NULL;
556 }
557
558 int __init dmar_dev_scope_init(void)
559 {
560         struct dmar_drhd_unit *drhd, *drhd_n;
561         int ret = -ENODEV;
562
563         list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
564                 ret = dmar_parse_dev(drhd);
565                 if (ret)
566                         return ret;
567         }
568
569 #ifdef CONFIG_DMAR
570         {
571                 struct dmar_rmrr_unit *rmrr, *rmrr_n;
572                 struct dmar_atsr_unit *atsr, *atsr_n;
573
574                 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
575                         ret = rmrr_parse_dev(rmrr);
576                         if (ret)
577                                 return ret;
578                 }
579
580                 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
581                         ret = atsr_parse_dev(atsr);
582                         if (ret)
583                                 return ret;
584                 }
585         }
586 #endif
587
588         return ret;
589 }
590
591
592 int __init dmar_table_init(void)
593 {
594         static int dmar_table_initialized;
595         int ret;
596
597         if (dmar_table_initialized)
598                 return 0;
599
600         dmar_table_initialized = 1;
601
602         ret = parse_dmar_table();
603         if (ret) {
604                 if (ret != -ENODEV)
605                         printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
606                 return ret;
607         }
608
609         if (list_empty(&dmar_drhd_units)) {
610                 printk(KERN_INFO PREFIX "No DMAR devices found\n");
611                 return -ENODEV;
612         }
613
614 #ifdef CONFIG_DMAR
615         if (list_empty(&dmar_rmrr_units))
616                 printk(KERN_INFO PREFIX "No RMRR found\n");
617
618         if (list_empty(&dmar_atsr_units))
619                 printk(KERN_INFO PREFIX "No ATSR found\n");
620 #endif
621
622         return 0;
623 }
624
625 void __init detect_intel_iommu(void)
626 {
627         int ret;
628
629         ret = dmar_table_detect();
630
631         {
632 #ifdef CONFIG_INTR_REMAP
633                 struct acpi_table_dmar *dmar;
634                 /*
635                  * for now we will disable dma-remapping when interrupt
636                  * remapping is enabled.
637                  * When support for queued invalidation for IOTLB invalidation
638                  * is added, we will not need this any more.
639                  */
640                 dmar = (struct acpi_table_dmar *) dmar_tbl;
641                 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
642                         printk(KERN_INFO
643                                "Queued invalidation will be enabled to support "
644                                "x2apic and Intr-remapping.\n");
645 #endif
646 #ifdef CONFIG_DMAR
647                 if (ret && !no_iommu && !iommu_detected && !swiotlb &&
648                     !dmar_disabled)
649                         iommu_detected = 1;
650 #endif
651         }
652         early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
653         dmar_tbl = NULL;
654 }
655
656
657 int alloc_iommu(struct dmar_drhd_unit *drhd)
658 {
659         struct intel_iommu *iommu;
660         int map_size;
661         u32 ver;
662         static int iommu_allocated = 0;
663         int agaw = 0;
664         int msagaw = 0;
665
666         iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
667         if (!iommu)
668                 return -ENOMEM;
669
670         iommu->seq_id = iommu_allocated++;
671         sprintf (iommu->name, "dmar%d", iommu->seq_id);
672
673         iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
674         if (!iommu->reg) {
675                 printk(KERN_ERR "IOMMU: can't map the region\n");
676                 goto error;
677         }
678         iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
679         iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
680
681         if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
682                 /* Promote an attitude of violence to a BIOS engineer today */
683                 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
684                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
685                      drhd->reg_base_addr,
686                      dmi_get_system_info(DMI_BIOS_VENDOR),
687                      dmi_get_system_info(DMI_BIOS_VERSION),
688                      dmi_get_system_info(DMI_PRODUCT_VERSION));
689                 goto err_unmap;
690         }
691
692 #ifdef CONFIG_DMAR
693         agaw = iommu_calculate_agaw(iommu);
694         if (agaw < 0) {
695                 printk(KERN_ERR
696                        "Cannot get a valid agaw for iommu (seq_id = %d)\n",
697                        iommu->seq_id);
698                 goto err_unmap;
699         }
700         msagaw = iommu_calculate_max_sagaw(iommu);
701         if (msagaw < 0) {
702                 printk(KERN_ERR
703                         "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
704                         iommu->seq_id);
705                 goto err_unmap;
706         }
707 #endif
708         iommu->agaw = agaw;
709         iommu->msagaw = msagaw;
710
711         iommu->node = -1;
712
713         /* the registers might be more than one page */
714         map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
715                 cap_max_fault_reg_offset(iommu->cap));
716         map_size = VTD_PAGE_ALIGN(map_size);
717         if (map_size > VTD_PAGE_SIZE) {
718                 iounmap(iommu->reg);
719                 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
720                 if (!iommu->reg) {
721                         printk(KERN_ERR "IOMMU: can't map the region\n");
722                         goto error;
723                 }
724         }
725
726         ver = readl(iommu->reg + DMAR_VER_REG);
727         pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
728                 (unsigned long long)drhd->reg_base_addr,
729                 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
730                 (unsigned long long)iommu->cap,
731                 (unsigned long long)iommu->ecap);
732
733         spin_lock_init(&iommu->register_lock);
734
735         drhd->iommu = iommu;
736         return 0;
737
738  err_unmap:
739         iounmap(iommu->reg);
740  error:
741         kfree(iommu);
742         return -1;
743 }
744
745 void free_iommu(struct intel_iommu *iommu)
746 {
747         if (!iommu)
748                 return;
749
750 #ifdef CONFIG_DMAR
751         free_dmar_iommu(iommu);
752 #endif
753
754         if (iommu->reg)
755                 iounmap(iommu->reg);
756         kfree(iommu);
757 }
758
759 /*
760  * Reclaim all the submitted descriptors which have completed its work.
761  */
762 static inline void reclaim_free_desc(struct q_inval *qi)
763 {
764         while (qi->desc_status[qi->free_tail] == QI_DONE ||
765                qi->desc_status[qi->free_tail] == QI_ABORT) {
766                 qi->desc_status[qi->free_tail] = QI_FREE;
767                 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
768                 qi->free_cnt++;
769         }
770 }
771
772 static int qi_check_fault(struct intel_iommu *iommu, int index)
773 {
774         u32 fault;
775         int head, tail;
776         struct q_inval *qi = iommu->qi;
777         int wait_index = (index + 1) % QI_LENGTH;
778
779         if (qi->desc_status[wait_index] == QI_ABORT)
780                 return -EAGAIN;
781
782         fault = readl(iommu->reg + DMAR_FSTS_REG);
783
784         /*
785          * If IQE happens, the head points to the descriptor associated
786          * with the error. No new descriptors are fetched until the IQE
787          * is cleared.
788          */
789         if (fault & DMA_FSTS_IQE) {
790                 head = readl(iommu->reg + DMAR_IQH_REG);
791                 if ((head >> DMAR_IQ_SHIFT) == index) {
792                         printk(KERN_ERR "VT-d detected invalid descriptor: "
793                                 "low=%llx, high=%llx\n",
794                                 (unsigned long long)qi->desc[index].low,
795                                 (unsigned long long)qi->desc[index].high);
796                         memcpy(&qi->desc[index], &qi->desc[wait_index],
797                                         sizeof(struct qi_desc));
798                         __iommu_flush_cache(iommu, &qi->desc[index],
799                                         sizeof(struct qi_desc));
800                         writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
801                         return -EINVAL;
802                 }
803         }
804
805         /*
806          * If ITE happens, all pending wait_desc commands are aborted.
807          * No new descriptors are fetched until the ITE is cleared.
808          */
809         if (fault & DMA_FSTS_ITE) {
810                 head = readl(iommu->reg + DMAR_IQH_REG);
811                 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
812                 head |= 1;
813                 tail = readl(iommu->reg + DMAR_IQT_REG);
814                 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
815
816                 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
817
818                 do {
819                         if (qi->desc_status[head] == QI_IN_USE)
820                                 qi->desc_status[head] = QI_ABORT;
821                         head = (head - 2 + QI_LENGTH) % QI_LENGTH;
822                 } while (head != tail);
823
824                 if (qi->desc_status[wait_index] == QI_ABORT)
825                         return -EAGAIN;
826         }
827
828         if (fault & DMA_FSTS_ICE)
829                 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
830
831         return 0;
832 }
833
834 /*
835  * Submit the queued invalidation descriptor to the remapping
836  * hardware unit and wait for its completion.
837  */
838 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
839 {
840         int rc;
841         struct q_inval *qi = iommu->qi;
842         struct qi_desc *hw, wait_desc;
843         int wait_index, index;
844         unsigned long flags;
845
846         if (!qi)
847                 return 0;
848
849         hw = qi->desc;
850
851 restart:
852         rc = 0;
853
854         spin_lock_irqsave(&qi->q_lock, flags);
855         while (qi->free_cnt < 3) {
856                 spin_unlock_irqrestore(&qi->q_lock, flags);
857                 cpu_relax();
858                 spin_lock_irqsave(&qi->q_lock, flags);
859         }
860
861         index = qi->free_head;
862         wait_index = (index + 1) % QI_LENGTH;
863
864         qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
865
866         hw[index] = *desc;
867
868         wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
869                         QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
870         wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
871
872         hw[wait_index] = wait_desc;
873
874         __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
875         __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
876
877         qi->free_head = (qi->free_head + 2) % QI_LENGTH;
878         qi->free_cnt -= 2;
879
880         /*
881          * update the HW tail register indicating the presence of
882          * new descriptors.
883          */
884         writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
885
886         while (qi->desc_status[wait_index] != QI_DONE) {
887                 /*
888                  * We will leave the interrupts disabled, to prevent interrupt
889                  * context to queue another cmd while a cmd is already submitted
890                  * and waiting for completion on this cpu. This is to avoid
891                  * a deadlock where the interrupt context can wait indefinitely
892                  * for free slots in the queue.
893                  */
894                 rc = qi_check_fault(iommu, index);
895                 if (rc)
896                         break;
897
898                 spin_unlock(&qi->q_lock);
899                 cpu_relax();
900                 spin_lock(&qi->q_lock);
901         }
902
903         qi->desc_status[index] = QI_DONE;
904
905         reclaim_free_desc(qi);
906         spin_unlock_irqrestore(&qi->q_lock, flags);
907
908         if (rc == -EAGAIN)
909                 goto restart;
910
911         return rc;
912 }
913
914 /*
915  * Flush the global interrupt entry cache.
916  */
917 void qi_global_iec(struct intel_iommu *iommu)
918 {
919         struct qi_desc desc;
920
921         desc.low = QI_IEC_TYPE;
922         desc.high = 0;
923
924         /* should never fail */
925         qi_submit_sync(&desc, iommu);
926 }
927
928 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
929                       u64 type)
930 {
931         struct qi_desc desc;
932
933         desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
934                         | QI_CC_GRAN(type) | QI_CC_TYPE;
935         desc.high = 0;
936
937         qi_submit_sync(&desc, iommu);
938 }
939
940 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
941                     unsigned int size_order, u64 type)
942 {
943         u8 dw = 0, dr = 0;
944
945         struct qi_desc desc;
946         int ih = 0;
947
948         if (cap_write_drain(iommu->cap))
949                 dw = 1;
950
951         if (cap_read_drain(iommu->cap))
952                 dr = 1;
953
954         desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
955                 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
956         desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
957                 | QI_IOTLB_AM(size_order);
958
959         qi_submit_sync(&desc, iommu);
960 }
961
962 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
963                         u64 addr, unsigned mask)
964 {
965         struct qi_desc desc;
966
967         if (mask) {
968                 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
969                 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
970                 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
971         } else
972                 desc.high = QI_DEV_IOTLB_ADDR(addr);
973
974         if (qdep >= QI_DEV_IOTLB_MAX_INVS)
975                 qdep = 0;
976
977         desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
978                    QI_DIOTLB_TYPE;
979
980         qi_submit_sync(&desc, iommu);
981 }
982
983 /*
984  * Disable Queued Invalidation interface.
985  */
986 void dmar_disable_qi(struct intel_iommu *iommu)
987 {
988         unsigned long flags;
989         u32 sts;
990         cycles_t start_time = get_cycles();
991
992         if (!ecap_qis(iommu->ecap))
993                 return;
994
995         spin_lock_irqsave(&iommu->register_lock, flags);
996
997         sts =  dmar_readq(iommu->reg + DMAR_GSTS_REG);
998         if (!(sts & DMA_GSTS_QIES))
999                 goto end;
1000
1001         /*
1002          * Give a chance to HW to complete the pending invalidation requests.
1003          */
1004         while ((readl(iommu->reg + DMAR_IQT_REG) !=
1005                 readl(iommu->reg + DMAR_IQH_REG)) &&
1006                 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1007                 cpu_relax();
1008
1009         iommu->gcmd &= ~DMA_GCMD_QIE;
1010         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1011
1012         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1013                       !(sts & DMA_GSTS_QIES), sts);
1014 end:
1015         spin_unlock_irqrestore(&iommu->register_lock, flags);
1016 }
1017
1018 /*
1019  * Enable queued invalidation.
1020  */
1021 static void __dmar_enable_qi(struct intel_iommu *iommu)
1022 {
1023         u32 sts;
1024         unsigned long flags;
1025         struct q_inval *qi = iommu->qi;
1026
1027         qi->free_head = qi->free_tail = 0;
1028         qi->free_cnt = QI_LENGTH;
1029
1030         spin_lock_irqsave(&iommu->register_lock, flags);
1031
1032         /* write zero to the tail reg */
1033         writel(0, iommu->reg + DMAR_IQT_REG);
1034
1035         dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1036
1037         iommu->gcmd |= DMA_GCMD_QIE;
1038         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1039
1040         /* Make sure hardware complete it */
1041         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1042
1043         spin_unlock_irqrestore(&iommu->register_lock, flags);
1044 }
1045
1046 /*
1047  * Enable Queued Invalidation interface. This is a must to support
1048  * interrupt-remapping. Also used by DMA-remapping, which replaces
1049  * register based IOTLB invalidation.
1050  */
1051 int dmar_enable_qi(struct intel_iommu *iommu)
1052 {
1053         struct q_inval *qi;
1054         struct page *desc_page;
1055
1056         if (!ecap_qis(iommu->ecap))
1057                 return -ENOENT;
1058
1059         /*
1060          * queued invalidation is already setup and enabled.
1061          */
1062         if (iommu->qi)
1063                 return 0;
1064
1065         iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1066         if (!iommu->qi)
1067                 return -ENOMEM;
1068
1069         qi = iommu->qi;
1070
1071
1072         desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1073         if (!desc_page) {
1074                 kfree(qi);
1075                 iommu->qi = 0;
1076                 return -ENOMEM;
1077         }
1078
1079         qi->desc = page_address(desc_page);
1080
1081         qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1082         if (!qi->desc_status) {
1083                 free_page((unsigned long) qi->desc);
1084                 kfree(qi);
1085                 iommu->qi = 0;
1086                 return -ENOMEM;
1087         }
1088
1089         qi->free_head = qi->free_tail = 0;
1090         qi->free_cnt = QI_LENGTH;
1091
1092         spin_lock_init(&qi->q_lock);
1093
1094         __dmar_enable_qi(iommu);
1095
1096         return 0;
1097 }
1098
1099 /* iommu interrupt handling. Most stuff are MSI-like. */
1100
1101 enum faulttype {
1102         DMA_REMAP,
1103         INTR_REMAP,
1104         UNKNOWN,
1105 };
1106
1107 static const char *dma_remap_fault_reasons[] =
1108 {
1109         "Software",
1110         "Present bit in root entry is clear",
1111         "Present bit in context entry is clear",
1112         "Invalid context entry",
1113         "Access beyond MGAW",
1114         "PTE Write access is not set",
1115         "PTE Read access is not set",
1116         "Next page table ptr is invalid",
1117         "Root table address invalid",
1118         "Context table ptr is invalid",
1119         "non-zero reserved fields in RTP",
1120         "non-zero reserved fields in CTP",
1121         "non-zero reserved fields in PTE",
1122 };
1123
1124 static const char *intr_remap_fault_reasons[] =
1125 {
1126         "Detected reserved fields in the decoded interrupt-remapped request",
1127         "Interrupt index exceeded the interrupt-remapping table size",
1128         "Present field in the IRTE entry is clear",
1129         "Error accessing interrupt-remapping table pointed by IRTA_REG",
1130         "Detected reserved fields in the IRTE entry",
1131         "Blocked a compatibility format interrupt request",
1132         "Blocked an interrupt request due to source-id verification failure",
1133 };
1134
1135 #define MAX_FAULT_REASON_IDX    (ARRAY_SIZE(fault_reason_strings) - 1)
1136
1137 const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1138 {
1139         if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1140                                      ARRAY_SIZE(intr_remap_fault_reasons))) {
1141                 *fault_type = INTR_REMAP;
1142                 return intr_remap_fault_reasons[fault_reason - 0x20];
1143         } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1144                 *fault_type = DMA_REMAP;
1145                 return dma_remap_fault_reasons[fault_reason];
1146         } else {
1147                 *fault_type = UNKNOWN;
1148                 return "Unknown";
1149         }
1150 }
1151
1152 void dmar_msi_unmask(unsigned int irq)
1153 {
1154         struct intel_iommu *iommu = get_irq_data(irq);
1155         unsigned long flag;
1156
1157         /* unmask it */
1158         spin_lock_irqsave(&iommu->register_lock, flag);
1159         writel(0, iommu->reg + DMAR_FECTL_REG);
1160         /* Read a reg to force flush the post write */
1161         readl(iommu->reg + DMAR_FECTL_REG);
1162         spin_unlock_irqrestore(&iommu->register_lock, flag);
1163 }
1164
1165 void dmar_msi_mask(unsigned int irq)
1166 {
1167         unsigned long flag;
1168         struct intel_iommu *iommu = get_irq_data(irq);
1169
1170         /* mask it */
1171         spin_lock_irqsave(&iommu->register_lock, flag);
1172         writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1173         /* Read a reg to force flush the post write */
1174         readl(iommu->reg + DMAR_FECTL_REG);
1175         spin_unlock_irqrestore(&iommu->register_lock, flag);
1176 }
1177
1178 void dmar_msi_write(int irq, struct msi_msg *msg)
1179 {
1180         struct intel_iommu *iommu = get_irq_data(irq);
1181         unsigned long flag;
1182
1183         spin_lock_irqsave(&iommu->register_lock, flag);
1184         writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1185         writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1186         writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1187         spin_unlock_irqrestore(&iommu->register_lock, flag);
1188 }
1189
1190 void dmar_msi_read(int irq, struct msi_msg *msg)
1191 {
1192         struct intel_iommu *iommu = get_irq_data(irq);
1193         unsigned long flag;
1194
1195         spin_lock_irqsave(&iommu->register_lock, flag);
1196         msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1197         msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1198         msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1199         spin_unlock_irqrestore(&iommu->register_lock, flag);
1200 }
1201
1202 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1203                 u8 fault_reason, u16 source_id, unsigned long long addr)
1204 {
1205         const char *reason;
1206         int fault_type;
1207
1208         reason = dmar_get_fault_reason(fault_reason, &fault_type);
1209
1210         if (fault_type == INTR_REMAP)
1211                 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1212                        "fault index %llx\n"
1213                         "INTR-REMAP:[fault reason %02d] %s\n",
1214                         (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1215                         PCI_FUNC(source_id & 0xFF), addr >> 48,
1216                         fault_reason, reason);
1217         else
1218                 printk(KERN_ERR
1219                        "DMAR:[%s] Request device [%02x:%02x.%d] "
1220                        "fault addr %llx \n"
1221                        "DMAR:[fault reason %02d] %s\n",
1222                        (type ? "DMA Read" : "DMA Write"),
1223                        (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1224                        PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1225         return 0;
1226 }
1227
1228 #define PRIMARY_FAULT_REG_LEN (16)
1229 irqreturn_t dmar_fault(int irq, void *dev_id)
1230 {
1231         struct intel_iommu *iommu = dev_id;
1232         int reg, fault_index;
1233         u32 fault_status;
1234         unsigned long flag;
1235
1236         spin_lock_irqsave(&iommu->register_lock, flag);
1237         fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1238         if (fault_status)
1239                 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1240                        fault_status);
1241
1242         /* TBD: ignore advanced fault log currently */
1243         if (!(fault_status & DMA_FSTS_PPF))
1244                 goto clear_rest;
1245
1246         fault_index = dma_fsts_fault_record_index(fault_status);
1247         reg = cap_fault_reg_offset(iommu->cap);
1248         while (1) {
1249                 u8 fault_reason;
1250                 u16 source_id;
1251                 u64 guest_addr;
1252                 int type;
1253                 u32 data;
1254
1255                 /* highest 32 bits */
1256                 data = readl(iommu->reg + reg +
1257                                 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1258                 if (!(data & DMA_FRCD_F))
1259                         break;
1260
1261                 fault_reason = dma_frcd_fault_reason(data);
1262                 type = dma_frcd_type(data);
1263
1264                 data = readl(iommu->reg + reg +
1265                                 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1266                 source_id = dma_frcd_source_id(data);
1267
1268                 guest_addr = dmar_readq(iommu->reg + reg +
1269                                 fault_index * PRIMARY_FAULT_REG_LEN);
1270                 guest_addr = dma_frcd_page_addr(guest_addr);
1271                 /* clear the fault */
1272                 writel(DMA_FRCD_F, iommu->reg + reg +
1273                         fault_index * PRIMARY_FAULT_REG_LEN + 12);
1274
1275                 spin_unlock_irqrestore(&iommu->register_lock, flag);
1276
1277                 dmar_fault_do_one(iommu, type, fault_reason,
1278                                 source_id, guest_addr);
1279
1280                 fault_index++;
1281                 if (fault_index >= cap_num_fault_regs(iommu->cap))
1282                         fault_index = 0;
1283                 spin_lock_irqsave(&iommu->register_lock, flag);
1284         }
1285 clear_rest:
1286         /* clear all the other faults */
1287         fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1288         writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1289
1290         spin_unlock_irqrestore(&iommu->register_lock, flag);
1291         return IRQ_HANDLED;
1292 }
1293
1294 int dmar_set_interrupt(struct intel_iommu *iommu)
1295 {
1296         int irq, ret;
1297
1298         /*
1299          * Check if the fault interrupt is already initialized.
1300          */
1301         if (iommu->irq)
1302                 return 0;
1303
1304         irq = create_irq();
1305         if (!irq) {
1306                 printk(KERN_ERR "IOMMU: no free vectors\n");
1307                 return -EINVAL;
1308         }
1309
1310         set_irq_data(irq, iommu);
1311         iommu->irq = irq;
1312
1313         ret = arch_setup_dmar_msi(irq);
1314         if (ret) {
1315                 set_irq_data(irq, NULL);
1316                 iommu->irq = 0;
1317                 destroy_irq(irq);
1318                 return ret;
1319         }
1320
1321         ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1322         if (ret)
1323                 printk(KERN_ERR "IOMMU: can't request irq\n");
1324         return ret;
1325 }
1326
1327 int __init enable_drhd_fault_handling(void)
1328 {
1329         struct dmar_drhd_unit *drhd;
1330
1331         /*
1332          * Enable fault control interrupt.
1333          */
1334         for_each_drhd_unit(drhd) {
1335                 int ret;
1336                 struct intel_iommu *iommu = drhd->iommu;
1337                 ret = dmar_set_interrupt(iommu);
1338
1339                 if (ret) {
1340                         printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1341                                " interrupt, ret %d\n",
1342                                (unsigned long long)drhd->reg_base_addr, ret);
1343                         return -1;
1344                 }
1345         }
1346
1347         return 0;
1348 }
1349
1350 /*
1351  * Re-enable Queued Invalidation interface.
1352  */
1353 int dmar_reenable_qi(struct intel_iommu *iommu)
1354 {
1355         if (!ecap_qis(iommu->ecap))
1356                 return -ENOENT;
1357
1358         if (!iommu->qi)
1359                 return -ENOENT;
1360
1361         /*
1362          * First disable queued invalidation.
1363          */
1364         dmar_disable_qi(iommu);
1365         /*
1366          * Then enable queued invalidation again. Since there is no pending
1367          * invalidation requests now, it's safe to re-enable queued
1368          * invalidation.
1369          */
1370         __dmar_enable_qi(iommu);
1371
1372         return 0;
1373 }
1374
1375 /*
1376  * Check interrupt remapping support in DMAR table description.
1377  */
1378 int dmar_ir_support(void)
1379 {
1380         struct acpi_table_dmar *dmar;
1381         dmar = (struct acpi_table_dmar *)dmar_tbl;
1382         return dmar->flags & 0x1;
1383 }