ixgbe: Cleanup incorrect header comments
[safe/jmp/linux-2.6] / drivers / pci / dmar.c
1 /*
2  * Copyright (c) 2006, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Copyright (C) 2006-2008 Intel Corporation
18  * Author: Ashok Raj <ashok.raj@intel.com>
19  * Author: Shaohua Li <shaohua.li@intel.com>
20  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21  *
22  * This file implements early detection/parsing of Remapping Devices
23  * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24  * tables.
25  *
26  * These routines are used by both DMA-remapping and Interrupt-remapping
27  */
28
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
31 #include <linux/iova.h>
32 #include <linux/intel-iommu.h>
33 #include <linux/timer.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
36 #include <linux/tboot.h>
37 #include <linux/dmi.h>
38
39 #define PREFIX "DMAR: "
40
41 /* No locks are needed as DMA remapping hardware unit
42  * list is constructed at boot time and hotplug of
43  * these units are not supported by the architecture.
44  */
45 LIST_HEAD(dmar_drhd_units);
46
47 static struct acpi_table_header * __initdata dmar_tbl;
48 static acpi_size dmar_tbl_size;
49
50 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
51 {
52         /*
53          * add INCLUDE_ALL at the tail, so scan the list will find it at
54          * the very end.
55          */
56         if (drhd->include_all)
57                 list_add_tail(&drhd->list, &dmar_drhd_units);
58         else
59                 list_add(&drhd->list, &dmar_drhd_units);
60 }
61
62 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
63                                            struct pci_dev **dev, u16 segment)
64 {
65         struct pci_bus *bus;
66         struct pci_dev *pdev = NULL;
67         struct acpi_dmar_pci_path *path;
68         int count;
69
70         bus = pci_find_bus(segment, scope->bus);
71         path = (struct acpi_dmar_pci_path *)(scope + 1);
72         count = (scope->length - sizeof(struct acpi_dmar_device_scope))
73                 / sizeof(struct acpi_dmar_pci_path);
74
75         while (count) {
76                 if (pdev)
77                         pci_dev_put(pdev);
78                 /*
79                  * Some BIOSes list non-exist devices in DMAR table, just
80                  * ignore it
81                  */
82                 if (!bus) {
83                         printk(KERN_WARNING
84                         PREFIX "Device scope bus [%d] not found\n",
85                         scope->bus);
86                         break;
87                 }
88                 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
89                 if (!pdev) {
90                         printk(KERN_WARNING PREFIX
91                         "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92                                 segment, bus->number, path->dev, path->fn);
93                         break;
94                 }
95                 path ++;
96                 count --;
97                 bus = pdev->subordinate;
98         }
99         if (!pdev) {
100                 printk(KERN_WARNING PREFIX
101                 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102                 segment, scope->bus, path->dev, path->fn);
103                 *dev = NULL;
104                 return 0;
105         }
106         if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107                         pdev->subordinate) || (scope->entry_type == \
108                         ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109                 pci_dev_put(pdev);
110                 printk(KERN_WARNING PREFIX
111                         "Device scope type does not match for %s\n",
112                          pci_name(pdev));
113                 return -EINVAL;
114         }
115         *dev = pdev;
116         return 0;
117 }
118
119 static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120                                        struct pci_dev ***devices, u16 segment)
121 {
122         struct acpi_dmar_device_scope *scope;
123         void * tmp = start;
124         int index;
125         int ret;
126
127         *cnt = 0;
128         while (start < end) {
129                 scope = start;
130                 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131                     scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
132                         (*cnt)++;
133                 else
134                         printk(KERN_WARNING PREFIX
135                                 "Unsupported device scope\n");
136                 start += scope->length;
137         }
138         if (*cnt == 0)
139                 return 0;
140
141         *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
142         if (!*devices)
143                 return -ENOMEM;
144
145         start = tmp;
146         index = 0;
147         while (start < end) {
148                 scope = start;
149                 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
150                     scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
151                         ret = dmar_parse_one_dev_scope(scope,
152                                 &(*devices)[index], segment);
153                         if (ret) {
154                                 kfree(*devices);
155                                 return ret;
156                         }
157                         index ++;
158                 }
159                 start += scope->length;
160         }
161
162         return 0;
163 }
164
165 /**
166  * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167  * structure which uniquely represent one DMA remapping hardware unit
168  * present in the platform
169  */
170 static int __init
171 dmar_parse_one_drhd(struct acpi_dmar_header *header)
172 {
173         struct acpi_dmar_hardware_unit *drhd;
174         struct dmar_drhd_unit *dmaru;
175         int ret = 0;
176
177         drhd = (struct acpi_dmar_hardware_unit *)header;
178         dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
179         if (!dmaru)
180                 return -ENOMEM;
181
182         dmaru->hdr = header;
183         dmaru->reg_base_addr = drhd->address;
184         dmaru->segment = drhd->segment;
185         dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
186
187         ret = alloc_iommu(dmaru);
188         if (ret) {
189                 kfree(dmaru);
190                 return ret;
191         }
192         dmar_register_drhd_unit(dmaru);
193         return 0;
194 }
195
196 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
197 {
198         struct acpi_dmar_hardware_unit *drhd;
199         int ret = 0;
200
201         drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
202
203         if (dmaru->include_all)
204                 return 0;
205
206         ret = dmar_parse_dev_scope((void *)(drhd + 1),
207                                 ((void *)drhd) + drhd->header.length,
208                                 &dmaru->devices_cnt, &dmaru->devices,
209                                 drhd->segment);
210         if (ret) {
211                 list_del(&dmaru->list);
212                 kfree(dmaru);
213         }
214         return ret;
215 }
216
217 #ifdef CONFIG_DMAR
218 LIST_HEAD(dmar_rmrr_units);
219
220 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
221 {
222         list_add(&rmrr->list, &dmar_rmrr_units);
223 }
224
225
226 static int __init
227 dmar_parse_one_rmrr(struct acpi_dmar_header *header)
228 {
229         struct acpi_dmar_reserved_memory *rmrr;
230         struct dmar_rmrr_unit *rmrru;
231
232         rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
233         if (!rmrru)
234                 return -ENOMEM;
235
236         rmrru->hdr = header;
237         rmrr = (struct acpi_dmar_reserved_memory *)header;
238         rmrru->base_address = rmrr->base_address;
239         rmrru->end_address = rmrr->end_address;
240
241         dmar_register_rmrr_unit(rmrru);
242         return 0;
243 }
244
245 static int __init
246 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
247 {
248         struct acpi_dmar_reserved_memory *rmrr;
249         int ret;
250
251         rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
252         ret = dmar_parse_dev_scope((void *)(rmrr + 1),
253                 ((void *)rmrr) + rmrr->header.length,
254                 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
255
256         if (ret || (rmrru->devices_cnt == 0)) {
257                 list_del(&rmrru->list);
258                 kfree(rmrru);
259         }
260         return ret;
261 }
262
263 static LIST_HEAD(dmar_atsr_units);
264
265 static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
266 {
267         struct acpi_dmar_atsr *atsr;
268         struct dmar_atsr_unit *atsru;
269
270         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
271         atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
272         if (!atsru)
273                 return -ENOMEM;
274
275         atsru->hdr = hdr;
276         atsru->include_all = atsr->flags & 0x1;
277
278         list_add(&atsru->list, &dmar_atsr_units);
279
280         return 0;
281 }
282
283 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
284 {
285         int rc;
286         struct acpi_dmar_atsr *atsr;
287
288         if (atsru->include_all)
289                 return 0;
290
291         atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
292         rc = dmar_parse_dev_scope((void *)(atsr + 1),
293                                 (void *)atsr + atsr->header.length,
294                                 &atsru->devices_cnt, &atsru->devices,
295                                 atsr->segment);
296         if (rc || !atsru->devices_cnt) {
297                 list_del(&atsru->list);
298                 kfree(atsru);
299         }
300
301         return rc;
302 }
303
304 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
305 {
306         int i;
307         struct pci_bus *bus;
308         struct acpi_dmar_atsr *atsr;
309         struct dmar_atsr_unit *atsru;
310
311         list_for_each_entry(atsru, &dmar_atsr_units, list) {
312                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
313                 if (atsr->segment == pci_domain_nr(dev->bus))
314                         goto found;
315         }
316
317         return 0;
318
319 found:
320         for (bus = dev->bus; bus; bus = bus->parent) {
321                 struct pci_dev *bridge = bus->self;
322
323                 if (!bridge || !pci_is_pcie(bridge) ||
324                     bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
325                         return 0;
326
327                 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
328                         for (i = 0; i < atsru->devices_cnt; i++)
329                                 if (atsru->devices[i] == bridge)
330                                         return 1;
331                         break;
332                 }
333         }
334
335         if (atsru->include_all)
336                 return 1;
337
338         return 0;
339 }
340 #endif
341
342 #ifdef CONFIG_ACPI_NUMA
343 static int __init
344 dmar_parse_one_rhsa(struct acpi_dmar_header *header)
345 {
346         struct acpi_dmar_rhsa *rhsa;
347         struct dmar_drhd_unit *drhd;
348
349         rhsa = (struct acpi_dmar_rhsa *)header;
350         for_each_drhd_unit(drhd) {
351                 if (drhd->reg_base_addr == rhsa->base_address) {
352                         int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
353
354                         if (!node_online(node))
355                                 node = -1;
356                         drhd->iommu->node = node;
357                         return 0;
358                 }
359         }
360         WARN(1, "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
361              "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
362              drhd->reg_base_addr,
363              dmi_get_system_info(DMI_BIOS_VENDOR),
364              dmi_get_system_info(DMI_BIOS_VERSION),
365              dmi_get_system_info(DMI_PRODUCT_VERSION));
366
367         return 0;
368 }
369 #endif
370
371 static void __init
372 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
373 {
374         struct acpi_dmar_hardware_unit *drhd;
375         struct acpi_dmar_reserved_memory *rmrr;
376         struct acpi_dmar_atsr *atsr;
377         struct acpi_dmar_rhsa *rhsa;
378
379         switch (header->type) {
380         case ACPI_DMAR_TYPE_HARDWARE_UNIT:
381                 drhd = container_of(header, struct acpi_dmar_hardware_unit,
382                                     header);
383                 printk (KERN_INFO PREFIX
384                         "DRHD base: %#016Lx flags: %#x\n",
385                         (unsigned long long)drhd->address, drhd->flags);
386                 break;
387         case ACPI_DMAR_TYPE_RESERVED_MEMORY:
388                 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
389                                     header);
390                 printk (KERN_INFO PREFIX
391                         "RMRR base: %#016Lx end: %#016Lx\n",
392                         (unsigned long long)rmrr->base_address,
393                         (unsigned long long)rmrr->end_address);
394                 break;
395         case ACPI_DMAR_TYPE_ATSR:
396                 atsr = container_of(header, struct acpi_dmar_atsr, header);
397                 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
398                 break;
399         case ACPI_DMAR_HARDWARE_AFFINITY:
400                 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
401                 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
402                        (unsigned long long)rhsa->base_address,
403                        rhsa->proximity_domain);
404                 break;
405         }
406 }
407
408 /**
409  * dmar_table_detect - checks to see if the platform supports DMAR devices
410  */
411 static int __init dmar_table_detect(void)
412 {
413         acpi_status status = AE_OK;
414
415         /* if we could find DMAR table, then there are DMAR devices */
416         status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
417                                 (struct acpi_table_header **)&dmar_tbl,
418                                 &dmar_tbl_size);
419
420         if (ACPI_SUCCESS(status) && !dmar_tbl) {
421                 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
422                 status = AE_NOT_FOUND;
423         }
424
425         return (ACPI_SUCCESS(status) ? 1 : 0);
426 }
427
428 /**
429  * parse_dmar_table - parses the DMA reporting table
430  */
431 static int __init
432 parse_dmar_table(void)
433 {
434         struct acpi_table_dmar *dmar;
435         struct acpi_dmar_header *entry_header;
436         int ret = 0;
437
438         /*
439          * Do it again, earlier dmar_tbl mapping could be mapped with
440          * fixed map.
441          */
442         dmar_table_detect();
443
444         /*
445          * ACPI tables may not be DMA protected by tboot, so use DMAR copy
446          * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
447          */
448         dmar_tbl = tboot_get_dmar_table(dmar_tbl);
449
450         dmar = (struct acpi_table_dmar *)dmar_tbl;
451         if (!dmar)
452                 return -ENODEV;
453
454         if (dmar->width < PAGE_SHIFT - 1) {
455                 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
456                 return -EINVAL;
457         }
458
459         printk (KERN_INFO PREFIX "Host address width %d\n",
460                 dmar->width + 1);
461
462         entry_header = (struct acpi_dmar_header *)(dmar + 1);
463         while (((unsigned long)entry_header) <
464                         (((unsigned long)dmar) + dmar_tbl->length)) {
465                 /* Avoid looping forever on bad ACPI tables */
466                 if (entry_header->length == 0) {
467                         printk(KERN_WARNING PREFIX
468                                 "Invalid 0-length structure\n");
469                         ret = -EINVAL;
470                         break;
471                 }
472
473                 dmar_table_print_dmar_entry(entry_header);
474
475                 switch (entry_header->type) {
476                 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
477                         ret = dmar_parse_one_drhd(entry_header);
478                         break;
479                 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
480 #ifdef CONFIG_DMAR
481                         ret = dmar_parse_one_rmrr(entry_header);
482 #endif
483                         break;
484                 case ACPI_DMAR_TYPE_ATSR:
485 #ifdef CONFIG_DMAR
486                         ret = dmar_parse_one_atsr(entry_header);
487 #endif
488                         break;
489                 case ACPI_DMAR_HARDWARE_AFFINITY:
490 #ifdef CONFIG_ACPI_NUMA
491                         ret = dmar_parse_one_rhsa(entry_header);
492 #endif
493                         break;
494                 default:
495                         printk(KERN_WARNING PREFIX
496                                 "Unknown DMAR structure type %d\n",
497                                 entry_header->type);
498                         ret = 0; /* for forward compatibility */
499                         break;
500                 }
501                 if (ret)
502                         break;
503
504                 entry_header = ((void *)entry_header + entry_header->length);
505         }
506         return ret;
507 }
508
509 int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
510                           struct pci_dev *dev)
511 {
512         int index;
513
514         while (dev) {
515                 for (index = 0; index < cnt; index++)
516                         if (dev == devices[index])
517                                 return 1;
518
519                 /* Check our parent */
520                 dev = dev->bus->self;
521         }
522
523         return 0;
524 }
525
526 struct dmar_drhd_unit *
527 dmar_find_matched_drhd_unit(struct pci_dev *dev)
528 {
529         struct dmar_drhd_unit *dmaru = NULL;
530         struct acpi_dmar_hardware_unit *drhd;
531
532         list_for_each_entry(dmaru, &dmar_drhd_units, list) {
533                 drhd = container_of(dmaru->hdr,
534                                     struct acpi_dmar_hardware_unit,
535                                     header);
536
537                 if (dmaru->include_all &&
538                     drhd->segment == pci_domain_nr(dev->bus))
539                         return dmaru;
540
541                 if (dmar_pci_device_match(dmaru->devices,
542                                           dmaru->devices_cnt, dev))
543                         return dmaru;
544         }
545
546         return NULL;
547 }
548
549 int __init dmar_dev_scope_init(void)
550 {
551         struct dmar_drhd_unit *drhd, *drhd_n;
552         int ret = -ENODEV;
553
554         list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
555                 ret = dmar_parse_dev(drhd);
556                 if (ret)
557                         return ret;
558         }
559
560 #ifdef CONFIG_DMAR
561         {
562                 struct dmar_rmrr_unit *rmrr, *rmrr_n;
563                 struct dmar_atsr_unit *atsr, *atsr_n;
564
565                 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
566                         ret = rmrr_parse_dev(rmrr);
567                         if (ret)
568                                 return ret;
569                 }
570
571                 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
572                         ret = atsr_parse_dev(atsr);
573                         if (ret)
574                                 return ret;
575                 }
576         }
577 #endif
578
579         return ret;
580 }
581
582
583 int __init dmar_table_init(void)
584 {
585         static int dmar_table_initialized;
586         int ret;
587
588         if (dmar_table_initialized)
589                 return 0;
590
591         dmar_table_initialized = 1;
592
593         ret = parse_dmar_table();
594         if (ret) {
595                 if (ret != -ENODEV)
596                         printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
597                 return ret;
598         }
599
600         if (list_empty(&dmar_drhd_units)) {
601                 printk(KERN_INFO PREFIX "No DMAR devices found\n");
602                 return -ENODEV;
603         }
604
605 #ifdef CONFIG_DMAR
606         if (list_empty(&dmar_rmrr_units))
607                 printk(KERN_INFO PREFIX "No RMRR found\n");
608
609         if (list_empty(&dmar_atsr_units))
610                 printk(KERN_INFO PREFIX "No ATSR found\n");
611 #endif
612
613         return 0;
614 }
615
616 static int bios_warned;
617
618 int __init check_zero_address(void)
619 {
620         struct acpi_table_dmar *dmar;
621         struct acpi_dmar_header *entry_header;
622         struct acpi_dmar_hardware_unit *drhd;
623
624         dmar = (struct acpi_table_dmar *)dmar_tbl;
625         entry_header = (struct acpi_dmar_header *)(dmar + 1);
626
627         while (((unsigned long)entry_header) <
628                         (((unsigned long)dmar) + dmar_tbl->length)) {
629                 /* Avoid looping forever on bad ACPI tables */
630                 if (entry_header->length == 0) {
631                         printk(KERN_WARNING PREFIX
632                                 "Invalid 0-length structure\n");
633                         return 0;
634                 }
635
636                 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
637                         void __iomem *addr;
638                         u64 cap, ecap;
639
640                         drhd = (void *)entry_header;
641                         if (!drhd->address) {
642                                 /* Promote an attitude of violence to a BIOS engineer today */
643                                 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
644                                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
645                                      dmi_get_system_info(DMI_BIOS_VENDOR),
646                                      dmi_get_system_info(DMI_BIOS_VERSION),
647                                      dmi_get_system_info(DMI_PRODUCT_VERSION));
648                                 bios_warned = 1;
649                                 goto failed;
650                         }
651
652                         addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
653                         if (!addr ) {
654                                 printk("IOMMU: can't validate: %llx\n", drhd->address);
655                                 goto failed;
656                         }
657                         cap = dmar_readq(addr + DMAR_CAP_REG);
658                         ecap = dmar_readq(addr + DMAR_ECAP_REG);
659                         early_iounmap(addr, VTD_PAGE_SIZE);
660                         if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
661                                 /* Promote an attitude of violence to a BIOS engineer today */
662                                 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
663                                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
664                                       drhd->address,
665                                       dmi_get_system_info(DMI_BIOS_VENDOR),
666                                       dmi_get_system_info(DMI_BIOS_VERSION),
667                                       dmi_get_system_info(DMI_PRODUCT_VERSION));
668                                 bios_warned = 1;
669                                 goto failed;
670                         }
671                 }
672
673                 entry_header = ((void *)entry_header + entry_header->length);
674         }
675         return 1;
676
677 failed:
678 #ifdef CONFIG_DMAR
679         dmar_disabled = 1;
680 #endif
681         return 0;
682 }
683
684 void __init detect_intel_iommu(void)
685 {
686         int ret;
687
688         ret = dmar_table_detect();
689         if (ret)
690                 ret = check_zero_address();
691         {
692 #ifdef CONFIG_INTR_REMAP
693                 struct acpi_table_dmar *dmar;
694                 /*
695                  * for now we will disable dma-remapping when interrupt
696                  * remapping is enabled.
697                  * When support for queued invalidation for IOTLB invalidation
698                  * is added, we will not need this any more.
699                  */
700                 dmar = (struct acpi_table_dmar *) dmar_tbl;
701                 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
702                         printk(KERN_INFO
703                                "Queued invalidation will be enabled to support "
704                                "x2apic and Intr-remapping.\n");
705 #endif
706 #ifdef CONFIG_DMAR
707                 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
708                         iommu_detected = 1;
709                         /* Make sure ACS will be enabled */
710                         pci_request_acs();
711                 }
712 #endif
713 #ifdef CONFIG_X86
714                 if (ret)
715                         x86_init.iommu.iommu_init = intel_iommu_init;
716 #endif
717         }
718         early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
719         dmar_tbl = NULL;
720 }
721
722
723 int alloc_iommu(struct dmar_drhd_unit *drhd)
724 {
725         struct intel_iommu *iommu;
726         int map_size;
727         u32 ver;
728         static int iommu_allocated = 0;
729         int agaw = 0;
730         int msagaw = 0;
731
732         if (!drhd->reg_base_addr) {
733                 if (!bios_warned) {
734                         WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
735                              "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
736                              dmi_get_system_info(DMI_BIOS_VENDOR),
737                              dmi_get_system_info(DMI_BIOS_VERSION),
738                              dmi_get_system_info(DMI_PRODUCT_VERSION));
739                         bios_warned = 1;
740                 }
741                 return -EINVAL;
742         }
743
744         iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
745         if (!iommu)
746                 return -ENOMEM;
747
748         iommu->seq_id = iommu_allocated++;
749         sprintf (iommu->name, "dmar%d", iommu->seq_id);
750
751         iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
752         if (!iommu->reg) {
753                 printk(KERN_ERR "IOMMU: can't map the region\n");
754                 goto error;
755         }
756         iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
757         iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
758
759         if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
760                 if (!bios_warned) {
761                         /* Promote an attitude of violence to a BIOS engineer today */
762                         WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
763                              "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
764                              drhd->reg_base_addr,
765                              dmi_get_system_info(DMI_BIOS_VENDOR),
766                              dmi_get_system_info(DMI_BIOS_VERSION),
767                              dmi_get_system_info(DMI_PRODUCT_VERSION));
768                         bios_warned = 1;
769                 }
770                 goto err_unmap;
771         }
772
773 #ifdef CONFIG_DMAR
774         agaw = iommu_calculate_agaw(iommu);
775         if (agaw < 0) {
776                 printk(KERN_ERR
777                        "Cannot get a valid agaw for iommu (seq_id = %d)\n",
778                        iommu->seq_id);
779                 goto err_unmap;
780         }
781         msagaw = iommu_calculate_max_sagaw(iommu);
782         if (msagaw < 0) {
783                 printk(KERN_ERR
784                         "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
785                         iommu->seq_id);
786                 goto err_unmap;
787         }
788 #endif
789         iommu->agaw = agaw;
790         iommu->msagaw = msagaw;
791
792         iommu->node = -1;
793
794         /* the registers might be more than one page */
795         map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
796                 cap_max_fault_reg_offset(iommu->cap));
797         map_size = VTD_PAGE_ALIGN(map_size);
798         if (map_size > VTD_PAGE_SIZE) {
799                 iounmap(iommu->reg);
800                 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
801                 if (!iommu->reg) {
802                         printk(KERN_ERR "IOMMU: can't map the region\n");
803                         goto error;
804                 }
805         }
806
807         ver = readl(iommu->reg + DMAR_VER_REG);
808         pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
809                 (unsigned long long)drhd->reg_base_addr,
810                 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
811                 (unsigned long long)iommu->cap,
812                 (unsigned long long)iommu->ecap);
813
814         spin_lock_init(&iommu->register_lock);
815
816         drhd->iommu = iommu;
817         return 0;
818
819  err_unmap:
820         iounmap(iommu->reg);
821  error:
822         kfree(iommu);
823         return -1;
824 }
825
826 void free_iommu(struct intel_iommu *iommu)
827 {
828         if (!iommu)
829                 return;
830
831 #ifdef CONFIG_DMAR
832         free_dmar_iommu(iommu);
833 #endif
834
835         if (iommu->reg)
836                 iounmap(iommu->reg);
837         kfree(iommu);
838 }
839
840 /*
841  * Reclaim all the submitted descriptors which have completed its work.
842  */
843 static inline void reclaim_free_desc(struct q_inval *qi)
844 {
845         while (qi->desc_status[qi->free_tail] == QI_DONE ||
846                qi->desc_status[qi->free_tail] == QI_ABORT) {
847                 qi->desc_status[qi->free_tail] = QI_FREE;
848                 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
849                 qi->free_cnt++;
850         }
851 }
852
853 static int qi_check_fault(struct intel_iommu *iommu, int index)
854 {
855         u32 fault;
856         int head, tail;
857         struct q_inval *qi = iommu->qi;
858         int wait_index = (index + 1) % QI_LENGTH;
859
860         if (qi->desc_status[wait_index] == QI_ABORT)
861                 return -EAGAIN;
862
863         fault = readl(iommu->reg + DMAR_FSTS_REG);
864
865         /*
866          * If IQE happens, the head points to the descriptor associated
867          * with the error. No new descriptors are fetched until the IQE
868          * is cleared.
869          */
870         if (fault & DMA_FSTS_IQE) {
871                 head = readl(iommu->reg + DMAR_IQH_REG);
872                 if ((head >> DMAR_IQ_SHIFT) == index) {
873                         printk(KERN_ERR "VT-d detected invalid descriptor: "
874                                 "low=%llx, high=%llx\n",
875                                 (unsigned long long)qi->desc[index].low,
876                                 (unsigned long long)qi->desc[index].high);
877                         memcpy(&qi->desc[index], &qi->desc[wait_index],
878                                         sizeof(struct qi_desc));
879                         __iommu_flush_cache(iommu, &qi->desc[index],
880                                         sizeof(struct qi_desc));
881                         writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
882                         return -EINVAL;
883                 }
884         }
885
886         /*
887          * If ITE happens, all pending wait_desc commands are aborted.
888          * No new descriptors are fetched until the ITE is cleared.
889          */
890         if (fault & DMA_FSTS_ITE) {
891                 head = readl(iommu->reg + DMAR_IQH_REG);
892                 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
893                 head |= 1;
894                 tail = readl(iommu->reg + DMAR_IQT_REG);
895                 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
896
897                 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
898
899                 do {
900                         if (qi->desc_status[head] == QI_IN_USE)
901                                 qi->desc_status[head] = QI_ABORT;
902                         head = (head - 2 + QI_LENGTH) % QI_LENGTH;
903                 } while (head != tail);
904
905                 if (qi->desc_status[wait_index] == QI_ABORT)
906                         return -EAGAIN;
907         }
908
909         if (fault & DMA_FSTS_ICE)
910                 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
911
912         return 0;
913 }
914
915 /*
916  * Submit the queued invalidation descriptor to the remapping
917  * hardware unit and wait for its completion.
918  */
919 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
920 {
921         int rc;
922         struct q_inval *qi = iommu->qi;
923         struct qi_desc *hw, wait_desc;
924         int wait_index, index;
925         unsigned long flags;
926
927         if (!qi)
928                 return 0;
929
930         hw = qi->desc;
931
932 restart:
933         rc = 0;
934
935         spin_lock_irqsave(&qi->q_lock, flags);
936         while (qi->free_cnt < 3) {
937                 spin_unlock_irqrestore(&qi->q_lock, flags);
938                 cpu_relax();
939                 spin_lock_irqsave(&qi->q_lock, flags);
940         }
941
942         index = qi->free_head;
943         wait_index = (index + 1) % QI_LENGTH;
944
945         qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
946
947         hw[index] = *desc;
948
949         wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
950                         QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
951         wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
952
953         hw[wait_index] = wait_desc;
954
955         __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
956         __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
957
958         qi->free_head = (qi->free_head + 2) % QI_LENGTH;
959         qi->free_cnt -= 2;
960
961         /*
962          * update the HW tail register indicating the presence of
963          * new descriptors.
964          */
965         writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
966
967         while (qi->desc_status[wait_index] != QI_DONE) {
968                 /*
969                  * We will leave the interrupts disabled, to prevent interrupt
970                  * context to queue another cmd while a cmd is already submitted
971                  * and waiting for completion on this cpu. This is to avoid
972                  * a deadlock where the interrupt context can wait indefinitely
973                  * for free slots in the queue.
974                  */
975                 rc = qi_check_fault(iommu, index);
976                 if (rc)
977                         break;
978
979                 spin_unlock(&qi->q_lock);
980                 cpu_relax();
981                 spin_lock(&qi->q_lock);
982         }
983
984         qi->desc_status[index] = QI_DONE;
985
986         reclaim_free_desc(qi);
987         spin_unlock_irqrestore(&qi->q_lock, flags);
988
989         if (rc == -EAGAIN)
990                 goto restart;
991
992         return rc;
993 }
994
995 /*
996  * Flush the global interrupt entry cache.
997  */
998 void qi_global_iec(struct intel_iommu *iommu)
999 {
1000         struct qi_desc desc;
1001
1002         desc.low = QI_IEC_TYPE;
1003         desc.high = 0;
1004
1005         /* should never fail */
1006         qi_submit_sync(&desc, iommu);
1007 }
1008
1009 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1010                       u64 type)
1011 {
1012         struct qi_desc desc;
1013
1014         desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1015                         | QI_CC_GRAN(type) | QI_CC_TYPE;
1016         desc.high = 0;
1017
1018         qi_submit_sync(&desc, iommu);
1019 }
1020
1021 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1022                     unsigned int size_order, u64 type)
1023 {
1024         u8 dw = 0, dr = 0;
1025
1026         struct qi_desc desc;
1027         int ih = 0;
1028
1029         if (cap_write_drain(iommu->cap))
1030                 dw = 1;
1031
1032         if (cap_read_drain(iommu->cap))
1033                 dr = 1;
1034
1035         desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1036                 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1037         desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1038                 | QI_IOTLB_AM(size_order);
1039
1040         qi_submit_sync(&desc, iommu);
1041 }
1042
1043 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1044                         u64 addr, unsigned mask)
1045 {
1046         struct qi_desc desc;
1047
1048         if (mask) {
1049                 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1050                 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1051                 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1052         } else
1053                 desc.high = QI_DEV_IOTLB_ADDR(addr);
1054
1055         if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1056                 qdep = 0;
1057
1058         desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1059                    QI_DIOTLB_TYPE;
1060
1061         qi_submit_sync(&desc, iommu);
1062 }
1063
1064 /*
1065  * Disable Queued Invalidation interface.
1066  */
1067 void dmar_disable_qi(struct intel_iommu *iommu)
1068 {
1069         unsigned long flags;
1070         u32 sts;
1071         cycles_t start_time = get_cycles();
1072
1073         if (!ecap_qis(iommu->ecap))
1074                 return;
1075
1076         spin_lock_irqsave(&iommu->register_lock, flags);
1077
1078         sts =  dmar_readq(iommu->reg + DMAR_GSTS_REG);
1079         if (!(sts & DMA_GSTS_QIES))
1080                 goto end;
1081
1082         /*
1083          * Give a chance to HW to complete the pending invalidation requests.
1084          */
1085         while ((readl(iommu->reg + DMAR_IQT_REG) !=
1086                 readl(iommu->reg + DMAR_IQH_REG)) &&
1087                 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1088                 cpu_relax();
1089
1090         iommu->gcmd &= ~DMA_GCMD_QIE;
1091         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1092
1093         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1094                       !(sts & DMA_GSTS_QIES), sts);
1095 end:
1096         spin_unlock_irqrestore(&iommu->register_lock, flags);
1097 }
1098
1099 /*
1100  * Enable queued invalidation.
1101  */
1102 static void __dmar_enable_qi(struct intel_iommu *iommu)
1103 {
1104         u32 sts;
1105         unsigned long flags;
1106         struct q_inval *qi = iommu->qi;
1107
1108         qi->free_head = qi->free_tail = 0;
1109         qi->free_cnt = QI_LENGTH;
1110
1111         spin_lock_irqsave(&iommu->register_lock, flags);
1112
1113         /* write zero to the tail reg */
1114         writel(0, iommu->reg + DMAR_IQT_REG);
1115
1116         dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1117
1118         iommu->gcmd |= DMA_GCMD_QIE;
1119         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1120
1121         /* Make sure hardware complete it */
1122         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1123
1124         spin_unlock_irqrestore(&iommu->register_lock, flags);
1125 }
1126
1127 /*
1128  * Enable Queued Invalidation interface. This is a must to support
1129  * interrupt-remapping. Also used by DMA-remapping, which replaces
1130  * register based IOTLB invalidation.
1131  */
1132 int dmar_enable_qi(struct intel_iommu *iommu)
1133 {
1134         struct q_inval *qi;
1135         struct page *desc_page;
1136
1137         if (!ecap_qis(iommu->ecap))
1138                 return -ENOENT;
1139
1140         /*
1141          * queued invalidation is already setup and enabled.
1142          */
1143         if (iommu->qi)
1144                 return 0;
1145
1146         iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1147         if (!iommu->qi)
1148                 return -ENOMEM;
1149
1150         qi = iommu->qi;
1151
1152
1153         desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1154         if (!desc_page) {
1155                 kfree(qi);
1156                 iommu->qi = 0;
1157                 return -ENOMEM;
1158         }
1159
1160         qi->desc = page_address(desc_page);
1161
1162         qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1163         if (!qi->desc_status) {
1164                 free_page((unsigned long) qi->desc);
1165                 kfree(qi);
1166                 iommu->qi = 0;
1167                 return -ENOMEM;
1168         }
1169
1170         qi->free_head = qi->free_tail = 0;
1171         qi->free_cnt = QI_LENGTH;
1172
1173         spin_lock_init(&qi->q_lock);
1174
1175         __dmar_enable_qi(iommu);
1176
1177         return 0;
1178 }
1179
1180 /* iommu interrupt handling. Most stuff are MSI-like. */
1181
1182 enum faulttype {
1183         DMA_REMAP,
1184         INTR_REMAP,
1185         UNKNOWN,
1186 };
1187
1188 static const char *dma_remap_fault_reasons[] =
1189 {
1190         "Software",
1191         "Present bit in root entry is clear",
1192         "Present bit in context entry is clear",
1193         "Invalid context entry",
1194         "Access beyond MGAW",
1195         "PTE Write access is not set",
1196         "PTE Read access is not set",
1197         "Next page table ptr is invalid",
1198         "Root table address invalid",
1199         "Context table ptr is invalid",
1200         "non-zero reserved fields in RTP",
1201         "non-zero reserved fields in CTP",
1202         "non-zero reserved fields in PTE",
1203 };
1204
1205 static const char *intr_remap_fault_reasons[] =
1206 {
1207         "Detected reserved fields in the decoded interrupt-remapped request",
1208         "Interrupt index exceeded the interrupt-remapping table size",
1209         "Present field in the IRTE entry is clear",
1210         "Error accessing interrupt-remapping table pointed by IRTA_REG",
1211         "Detected reserved fields in the IRTE entry",
1212         "Blocked a compatibility format interrupt request",
1213         "Blocked an interrupt request due to source-id verification failure",
1214 };
1215
1216 #define MAX_FAULT_REASON_IDX    (ARRAY_SIZE(fault_reason_strings) - 1)
1217
1218 const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1219 {
1220         if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1221                                      ARRAY_SIZE(intr_remap_fault_reasons))) {
1222                 *fault_type = INTR_REMAP;
1223                 return intr_remap_fault_reasons[fault_reason - 0x20];
1224         } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1225                 *fault_type = DMA_REMAP;
1226                 return dma_remap_fault_reasons[fault_reason];
1227         } else {
1228                 *fault_type = UNKNOWN;
1229                 return "Unknown";
1230         }
1231 }
1232
1233 void dmar_msi_unmask(unsigned int irq)
1234 {
1235         struct intel_iommu *iommu = get_irq_data(irq);
1236         unsigned long flag;
1237
1238         /* unmask it */
1239         spin_lock_irqsave(&iommu->register_lock, flag);
1240         writel(0, iommu->reg + DMAR_FECTL_REG);
1241         /* Read a reg to force flush the post write */
1242         readl(iommu->reg + DMAR_FECTL_REG);
1243         spin_unlock_irqrestore(&iommu->register_lock, flag);
1244 }
1245
1246 void dmar_msi_mask(unsigned int irq)
1247 {
1248         unsigned long flag;
1249         struct intel_iommu *iommu = get_irq_data(irq);
1250
1251         /* mask it */
1252         spin_lock_irqsave(&iommu->register_lock, flag);
1253         writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1254         /* Read a reg to force flush the post write */
1255         readl(iommu->reg + DMAR_FECTL_REG);
1256         spin_unlock_irqrestore(&iommu->register_lock, flag);
1257 }
1258
1259 void dmar_msi_write(int irq, struct msi_msg *msg)
1260 {
1261         struct intel_iommu *iommu = get_irq_data(irq);
1262         unsigned long flag;
1263
1264         spin_lock_irqsave(&iommu->register_lock, flag);
1265         writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1266         writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1267         writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1268         spin_unlock_irqrestore(&iommu->register_lock, flag);
1269 }
1270
1271 void dmar_msi_read(int irq, struct msi_msg *msg)
1272 {
1273         struct intel_iommu *iommu = get_irq_data(irq);
1274         unsigned long flag;
1275
1276         spin_lock_irqsave(&iommu->register_lock, flag);
1277         msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1278         msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1279         msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1280         spin_unlock_irqrestore(&iommu->register_lock, flag);
1281 }
1282
1283 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1284                 u8 fault_reason, u16 source_id, unsigned long long addr)
1285 {
1286         const char *reason;
1287         int fault_type;
1288
1289         reason = dmar_get_fault_reason(fault_reason, &fault_type);
1290
1291         if (fault_type == INTR_REMAP)
1292                 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1293                        "fault index %llx\n"
1294                         "INTR-REMAP:[fault reason %02d] %s\n",
1295                         (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1296                         PCI_FUNC(source_id & 0xFF), addr >> 48,
1297                         fault_reason, reason);
1298         else
1299                 printk(KERN_ERR
1300                        "DMAR:[%s] Request device [%02x:%02x.%d] "
1301                        "fault addr %llx \n"
1302                        "DMAR:[fault reason %02d] %s\n",
1303                        (type ? "DMA Read" : "DMA Write"),
1304                        (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1305                        PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1306         return 0;
1307 }
1308
1309 #define PRIMARY_FAULT_REG_LEN (16)
1310 irqreturn_t dmar_fault(int irq, void *dev_id)
1311 {
1312         struct intel_iommu *iommu = dev_id;
1313         int reg, fault_index;
1314         u32 fault_status;
1315         unsigned long flag;
1316
1317         spin_lock_irqsave(&iommu->register_lock, flag);
1318         fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1319         if (fault_status)
1320                 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1321                        fault_status);
1322
1323         /* TBD: ignore advanced fault log currently */
1324         if (!(fault_status & DMA_FSTS_PPF))
1325                 goto clear_rest;
1326
1327         fault_index = dma_fsts_fault_record_index(fault_status);
1328         reg = cap_fault_reg_offset(iommu->cap);
1329         while (1) {
1330                 u8 fault_reason;
1331                 u16 source_id;
1332                 u64 guest_addr;
1333                 int type;
1334                 u32 data;
1335
1336                 /* highest 32 bits */
1337                 data = readl(iommu->reg + reg +
1338                                 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1339                 if (!(data & DMA_FRCD_F))
1340                         break;
1341
1342                 fault_reason = dma_frcd_fault_reason(data);
1343                 type = dma_frcd_type(data);
1344
1345                 data = readl(iommu->reg + reg +
1346                                 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1347                 source_id = dma_frcd_source_id(data);
1348
1349                 guest_addr = dmar_readq(iommu->reg + reg +
1350                                 fault_index * PRIMARY_FAULT_REG_LEN);
1351                 guest_addr = dma_frcd_page_addr(guest_addr);
1352                 /* clear the fault */
1353                 writel(DMA_FRCD_F, iommu->reg + reg +
1354                         fault_index * PRIMARY_FAULT_REG_LEN + 12);
1355
1356                 spin_unlock_irqrestore(&iommu->register_lock, flag);
1357
1358                 dmar_fault_do_one(iommu, type, fault_reason,
1359                                 source_id, guest_addr);
1360
1361                 fault_index++;
1362                 if (fault_index >= cap_num_fault_regs(iommu->cap))
1363                         fault_index = 0;
1364                 spin_lock_irqsave(&iommu->register_lock, flag);
1365         }
1366 clear_rest:
1367         /* clear all the other faults */
1368         fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1369         writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1370
1371         spin_unlock_irqrestore(&iommu->register_lock, flag);
1372         return IRQ_HANDLED;
1373 }
1374
1375 int dmar_set_interrupt(struct intel_iommu *iommu)
1376 {
1377         int irq, ret;
1378
1379         /*
1380          * Check if the fault interrupt is already initialized.
1381          */
1382         if (iommu->irq)
1383                 return 0;
1384
1385         irq = create_irq();
1386         if (!irq) {
1387                 printk(KERN_ERR "IOMMU: no free vectors\n");
1388                 return -EINVAL;
1389         }
1390
1391         set_irq_data(irq, iommu);
1392         iommu->irq = irq;
1393
1394         ret = arch_setup_dmar_msi(irq);
1395         if (ret) {
1396                 set_irq_data(irq, NULL);
1397                 iommu->irq = 0;
1398                 destroy_irq(irq);
1399                 return ret;
1400         }
1401
1402         ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1403         if (ret)
1404                 printk(KERN_ERR "IOMMU: can't request irq\n");
1405         return ret;
1406 }
1407
1408 int __init enable_drhd_fault_handling(void)
1409 {
1410         struct dmar_drhd_unit *drhd;
1411
1412         /*
1413          * Enable fault control interrupt.
1414          */
1415         for_each_drhd_unit(drhd) {
1416                 int ret;
1417                 struct intel_iommu *iommu = drhd->iommu;
1418                 ret = dmar_set_interrupt(iommu);
1419
1420                 if (ret) {
1421                         printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1422                                " interrupt, ret %d\n",
1423                                (unsigned long long)drhd->reg_base_addr, ret);
1424                         return -1;
1425                 }
1426         }
1427
1428         return 0;
1429 }
1430
1431 /*
1432  * Re-enable Queued Invalidation interface.
1433  */
1434 int dmar_reenable_qi(struct intel_iommu *iommu)
1435 {
1436         if (!ecap_qis(iommu->ecap))
1437                 return -ENOENT;
1438
1439         if (!iommu->qi)
1440                 return -ENOENT;
1441
1442         /*
1443          * First disable queued invalidation.
1444          */
1445         dmar_disable_qi(iommu);
1446         /*
1447          * Then enable queued invalidation again. Since there is no pending
1448          * invalidation requests now, it's safe to re-enable queued
1449          * invalidation.
1450          */
1451         __dmar_enable_qi(iommu);
1452
1453         return 0;
1454 }
1455
1456 /*
1457  * Check interrupt remapping support in DMAR table description.
1458  */
1459 int dmar_ir_support(void)
1460 {
1461         struct acpi_table_dmar *dmar;
1462         dmar = (struct acpi_table_dmar *)dmar_tbl;
1463         return dmar->flags & 0x1;
1464 }