2dcf973890c46cfd96a9a4f2b7c06973c4ee399c
[safe/jmp/linux-2.6] / drivers / pci / intr_remapping.c
1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
5 #include <linux/pci.h>
6 #include <linux/irq.h>
7 #include <asm/io_apic.h>
8 #include "intel-iommu.h"
9 #include "intr_remapping.h"
10
11 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
12 static int ir_ioapic_num;
13 int intr_remapping_enabled;
14
15 struct irq_2_iommu {
16         struct intel_iommu *iommu;
17         u16 irte_index;
18         u16 sub_handle;
19         u8  irte_mask;
20 };
21
22 #ifdef CONFIG_HAVE_SPARSE_IRQ
23 static struct irq_2_iommu *irq_2_iommuX;
24 /* fill one page ? */
25 static int nr_irq_2_iommu = 0x100;
26 static int irq_2_iommu_index;
27 DEFINE_DYN_ARRAY(irq_2_iommuX, sizeof(struct irq_2_iommu), nr_irq_2_iommu, PAGE_SIZE, NULL);
28
29 extern void *__alloc_bootmem_nopanic(unsigned long size,
30                                      unsigned long align,
31                                      unsigned long goal);
32
33 static struct irq_2_iommu *get_one_free_irq_2_iommu(int not_used)
34 {
35         struct irq_2_iommu *iommu;
36         unsigned long total_bytes;
37
38         if (irq_2_iommu_index >= nr_irq_2_iommu) {
39                 /*
40                  *  we run out of pre-allocate ones, allocate more
41                  */
42                 printk(KERN_DEBUG "try to get more irq_2_iommu %d\n", nr_irq_2_iommu);
43
44                 total_bytes = sizeof(struct irq_2_iommu)*nr_irq_2_iommu;
45
46                 if (after_bootmem)
47                         iommu = kzalloc(total_bytes, GFP_ATOMIC);
48                 else
49                         iommu = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
50
51                 if (!iommu)
52                         panic("can not get more irq_2_iommu\n");
53
54                 irq_2_iommuX = iommu;
55                 irq_2_iommu_index = 0;
56         }
57
58         iommu = &irq_2_iommuX[irq_2_iommu_index];
59         irq_2_iommu_index++;
60         return iommu;
61 }
62
63 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
64 {
65         struct irq_desc *desc;
66
67         desc = irq_to_desc(irq);
68
69         BUG_ON(!desc);
70
71         return desc->irq_2_iommu;
72 }
73
74 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
75 {
76         struct irq_desc *desc;
77         struct irq_2_iommu *irq_iommu;
78
79         /*
80          * alloc irq desc if not allocated already.
81          */
82         desc = irq_to_desc_alloc(irq);
83
84         irq_iommu = desc->irq_2_iommu;
85
86         if (!irq_iommu)
87                 desc->irq_2_iommu = get_one_free_irq_2_iommu(irq);
88
89         return desc->irq_2_iommu;
90 }
91
92 #else /* !CONFIG_HAVE_SPARSE_IRQ */
93
94 #ifdef CONFIG_HAVE_DYN_ARRAY
95 static struct irq_2_iommu *irq_2_iommuX;
96 DEFINE_DYN_ARRAY(irq_2_iommuX, sizeof(struct irq_2_iommu), nr_irqs, PAGE_SIZE, NULL);
97 #else
98 static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
99 #endif
100
101 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
102 {
103         if (irq < nr_irqs)
104                 return &irq_2_iommuX[irq];
105
106         return NULL;
107 }
108 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
109 {
110         return irq_2_iommu(irq);
111 }
112 #endif
113
114 static DEFINE_SPINLOCK(irq_2_ir_lock);
115
116 static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
117 {
118         struct irq_2_iommu *irq_iommu;
119
120         irq_iommu = irq_2_iommu(irq);
121
122         if (!irq_iommu)
123                 return NULL;
124
125         if (!irq_iommu->iommu)
126                 return NULL;
127
128         return irq_iommu;
129 }
130
131 int irq_remapped(int irq)
132 {
133         return valid_irq_2_iommu(irq) != NULL;
134 }
135
136 int get_irte(int irq, struct irte *entry)
137 {
138         int index;
139         struct irq_2_iommu *irq_iommu;
140
141         if (!entry)
142                 return -1;
143
144         spin_lock(&irq_2_ir_lock);
145         irq_iommu = valid_irq_2_iommu(irq);
146         if (!irq_iommu) {
147                 spin_unlock(&irq_2_ir_lock);
148                 return -1;
149         }
150
151         index = irq_iommu->irte_index + irq_iommu->sub_handle;
152         *entry = *(irq_iommu->iommu->ir_table->base + index);
153
154         spin_unlock(&irq_2_ir_lock);
155         return 0;
156 }
157
158 int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
159 {
160         struct ir_table *table = iommu->ir_table;
161         struct irq_2_iommu *irq_iommu;
162         u16 index, start_index;
163         unsigned int mask = 0;
164         int i;
165
166         if (!count)
167                 return -1;
168
169 #ifndef CONFIG_HAVE_SPARSE_IRQ
170         /* protect irq_2_iommu_alloc later */
171         if (irq >= nr_irqs)
172                 return -1;
173 #endif
174
175         /*
176          * start the IRTE search from index 0.
177          */
178         index = start_index = 0;
179
180         if (count > 1) {
181                 count = __roundup_pow_of_two(count);
182                 mask = ilog2(count);
183         }
184
185         if (mask > ecap_max_handle_mask(iommu->ecap)) {
186                 printk(KERN_ERR
187                        "Requested mask %x exceeds the max invalidation handle"
188                        " mask value %Lx\n", mask,
189                        ecap_max_handle_mask(iommu->ecap));
190                 return -1;
191         }
192
193         spin_lock(&irq_2_ir_lock);
194         do {
195                 for (i = index; i < index + count; i++)
196                         if  (table->base[i].present)
197                                 break;
198                 /* empty index found */
199                 if (i == index + count)
200                         break;
201
202                 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
203
204                 if (index == start_index) {
205                         spin_unlock(&irq_2_ir_lock);
206                         printk(KERN_ERR "can't allocate an IRTE\n");
207                         return -1;
208                 }
209         } while (1);
210
211         for (i = index; i < index + count; i++)
212                 table->base[i].present = 1;
213
214         irq_iommu = irq_2_iommu_alloc(irq);
215         irq_iommu->iommu = iommu;
216         irq_iommu->irte_index =  index;
217         irq_iommu->sub_handle = 0;
218         irq_iommu->irte_mask = mask;
219
220         spin_unlock(&irq_2_ir_lock);
221
222         return index;
223 }
224
225 static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
226 {
227         struct qi_desc desc;
228
229         desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
230                    | QI_IEC_SELECTIVE;
231         desc.high = 0;
232
233         qi_submit_sync(&desc, iommu);
234 }
235
236 int map_irq_to_irte_handle(int irq, u16 *sub_handle)
237 {
238         int index;
239         struct irq_2_iommu *irq_iommu;
240
241         spin_lock(&irq_2_ir_lock);
242         irq_iommu = valid_irq_2_iommu(irq);
243         if (!irq_iommu) {
244                 spin_unlock(&irq_2_ir_lock);
245                 return -1;
246         }
247
248         *sub_handle = irq_iommu->sub_handle;
249         index = irq_iommu->irte_index;
250         spin_unlock(&irq_2_ir_lock);
251         return index;
252 }
253
254 int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
255 {
256         struct irq_2_iommu *irq_iommu;
257
258         spin_lock(&irq_2_ir_lock);
259
260         irq_iommu = irq_2_iommu_alloc(irq);
261
262         irq_iommu->iommu = iommu;
263         irq_iommu->irte_index = index;
264         irq_iommu->sub_handle = subhandle;
265         irq_iommu->irte_mask = 0;
266
267         spin_unlock(&irq_2_ir_lock);
268
269         return 0;
270 }
271
272 int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
273 {
274         struct irq_2_iommu *irq_iommu;
275
276         spin_lock(&irq_2_ir_lock);
277         irq_iommu = valid_irq_2_iommu(irq);
278         if (!irq_iommu) {
279                 spin_unlock(&irq_2_ir_lock);
280                 return -1;
281         }
282
283         irq_iommu->iommu = NULL;
284         irq_iommu->irte_index = 0;
285         irq_iommu->sub_handle = 0;
286         irq_2_iommu(irq)->irte_mask = 0;
287
288         spin_unlock(&irq_2_ir_lock);
289
290         return 0;
291 }
292
293 int modify_irte(int irq, struct irte *irte_modified)
294 {
295         int index;
296         struct irte *irte;
297         struct intel_iommu *iommu;
298         struct irq_2_iommu *irq_iommu;
299
300         spin_lock(&irq_2_ir_lock);
301         irq_iommu = valid_irq_2_iommu(irq);
302         if (!irq_iommu) {
303                 spin_unlock(&irq_2_ir_lock);
304                 return -1;
305         }
306
307         iommu = irq_iommu->iommu;
308
309         index = irq_iommu->irte_index + irq_iommu->sub_handle;
310         irte = &iommu->ir_table->base[index];
311
312         set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
313         __iommu_flush_cache(iommu, irte, sizeof(*irte));
314
315         qi_flush_iec(iommu, index, 0);
316
317         spin_unlock(&irq_2_ir_lock);
318         return 0;
319 }
320
321 int flush_irte(int irq)
322 {
323         int index;
324         struct intel_iommu *iommu;
325         struct irq_2_iommu *irq_iommu;
326
327         spin_lock(&irq_2_ir_lock);
328         irq_iommu = valid_irq_2_iommu(irq);
329         if (!irq_iommu) {
330                 spin_unlock(&irq_2_ir_lock);
331                 return -1;
332         }
333
334         iommu = irq_iommu->iommu;
335
336         index = irq_iommu->irte_index + irq_iommu->sub_handle;
337
338         qi_flush_iec(iommu, index, irq_iommu->irte_mask);
339         spin_unlock(&irq_2_ir_lock);
340
341         return 0;
342 }
343
344 struct intel_iommu *map_ioapic_to_ir(int apic)
345 {
346         int i;
347
348         for (i = 0; i < MAX_IO_APICS; i++)
349                 if (ir_ioapic[i].id == apic)
350                         return ir_ioapic[i].iommu;
351         return NULL;
352 }
353
354 struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
355 {
356         struct dmar_drhd_unit *drhd;
357
358         drhd = dmar_find_matched_drhd_unit(dev);
359         if (!drhd)
360                 return NULL;
361
362         return drhd->iommu;
363 }
364
365 int free_irte(int irq)
366 {
367         int index, i;
368         struct irte *irte;
369         struct intel_iommu *iommu;
370         struct irq_2_iommu *irq_iommu;
371
372         spin_lock(&irq_2_ir_lock);
373         irq_iommu = valid_irq_2_iommu(irq);
374         if (!irq_iommu) {
375                 spin_unlock(&irq_2_ir_lock);
376                 return -1;
377         }
378
379         iommu = irq_iommu->iommu;
380
381         index = irq_iommu->irte_index + irq_iommu->sub_handle;
382         irte = &iommu->ir_table->base[index];
383
384         if (!irq_iommu->sub_handle) {
385                 for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
386                         set_64bit((unsigned long *)irte, 0);
387                 qi_flush_iec(iommu, index, irq_iommu->irte_mask);
388         }
389
390         irq_iommu->iommu = NULL;
391         irq_iommu->irte_index = 0;
392         irq_iommu->sub_handle = 0;
393         irq_iommu->irte_mask = 0;
394
395         spin_unlock(&irq_2_ir_lock);
396
397         return 0;
398 }
399
400 static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
401 {
402         u64 addr;
403         u32 cmd, sts;
404         unsigned long flags;
405
406         addr = virt_to_phys((void *)iommu->ir_table->base);
407
408         spin_lock_irqsave(&iommu->register_lock, flags);
409
410         dmar_writeq(iommu->reg + DMAR_IRTA_REG,
411                     (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
412
413         /* Set interrupt-remapping table pointer */
414         cmd = iommu->gcmd | DMA_GCMD_SIRTP;
415         writel(cmd, iommu->reg + DMAR_GCMD_REG);
416
417         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
418                       readl, (sts & DMA_GSTS_IRTPS), sts);
419         spin_unlock_irqrestore(&iommu->register_lock, flags);
420
421         /*
422          * global invalidation of interrupt entry cache before enabling
423          * interrupt-remapping.
424          */
425         qi_global_iec(iommu);
426
427         spin_lock_irqsave(&iommu->register_lock, flags);
428
429         /* Enable interrupt-remapping */
430         cmd = iommu->gcmd | DMA_GCMD_IRE;
431         iommu->gcmd |= DMA_GCMD_IRE;
432         writel(cmd, iommu->reg + DMAR_GCMD_REG);
433
434         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
435                       readl, (sts & DMA_GSTS_IRES), sts);
436
437         spin_unlock_irqrestore(&iommu->register_lock, flags);
438 }
439
440
441 static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
442 {
443         struct ir_table *ir_table;
444         struct page *pages;
445
446         ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
447                                              GFP_KERNEL);
448
449         if (!iommu->ir_table)
450                 return -ENOMEM;
451
452         pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
453
454         if (!pages) {
455                 printk(KERN_ERR "failed to allocate pages of order %d\n",
456                        INTR_REMAP_PAGE_ORDER);
457                 kfree(iommu->ir_table);
458                 return -ENOMEM;
459         }
460
461         ir_table->base = page_address(pages);
462
463         iommu_set_intr_remapping(iommu, mode);
464         return 0;
465 }
466
467 int __init enable_intr_remapping(int eim)
468 {
469         struct dmar_drhd_unit *drhd;
470         int setup = 0;
471
472         /*
473          * check for the Interrupt-remapping support
474          */
475         for_each_drhd_unit(drhd) {
476                 struct intel_iommu *iommu = drhd->iommu;
477
478                 if (!ecap_ir_support(iommu->ecap))
479                         continue;
480
481                 if (eim && !ecap_eim_support(iommu->ecap)) {
482                         printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
483                                " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
484                         return -1;
485                 }
486         }
487
488         /*
489          * Enable queued invalidation for all the DRHD's.
490          */
491         for_each_drhd_unit(drhd) {
492                 int ret;
493                 struct intel_iommu *iommu = drhd->iommu;
494                 ret = dmar_enable_qi(iommu);
495
496                 if (ret) {
497                         printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
498                                " invalidation, ecap %Lx, ret %d\n",
499                                drhd->reg_base_addr, iommu->ecap, ret);
500                         return -1;
501                 }
502         }
503
504         /*
505          * Setup Interrupt-remapping for all the DRHD's now.
506          */
507         for_each_drhd_unit(drhd) {
508                 struct intel_iommu *iommu = drhd->iommu;
509
510                 if (!ecap_ir_support(iommu->ecap))
511                         continue;
512
513                 if (setup_intr_remapping(iommu, eim))
514                         goto error;
515
516                 setup = 1;
517         }
518
519         if (!setup)
520                 goto error;
521
522         intr_remapping_enabled = 1;
523
524         return 0;
525
526 error:
527         /*
528          * handle error condition gracefully here!
529          */
530         return -1;
531 }
532
533 static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
534                                  struct intel_iommu *iommu)
535 {
536         struct acpi_dmar_hardware_unit *drhd;
537         struct acpi_dmar_device_scope *scope;
538         void *start, *end;
539
540         drhd = (struct acpi_dmar_hardware_unit *)header;
541
542         start = (void *)(drhd + 1);
543         end = ((void *)drhd) + header->length;
544
545         while (start < end) {
546                 scope = start;
547                 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
548                         if (ir_ioapic_num == MAX_IO_APICS) {
549                                 printk(KERN_WARNING "Exceeded Max IO APICS\n");
550                                 return -1;
551                         }
552
553                         printk(KERN_INFO "IOAPIC id %d under DRHD base"
554                                " 0x%Lx\n", scope->enumeration_id,
555                                drhd->address);
556
557                         ir_ioapic[ir_ioapic_num].iommu = iommu;
558                         ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
559                         ir_ioapic_num++;
560                 }
561                 start += scope->length;
562         }
563
564         return 0;
565 }
566
567 /*
568  * Finds the assocaition between IOAPIC's and its Interrupt-remapping
569  * hardware unit.
570  */
571 int __init parse_ioapics_under_ir(void)
572 {
573         struct dmar_drhd_unit *drhd;
574         int ir_supported = 0;
575
576         for_each_drhd_unit(drhd) {
577                 struct intel_iommu *iommu = drhd->iommu;
578
579                 if (ecap_ir_support(iommu->ecap)) {
580                         if (ir_parse_ioapic_scope(drhd->hdr, iommu))
581                                 return -1;
582
583                         ir_supported = 1;
584                 }
585         }
586
587         if (ir_supported && ir_ioapic_num != nr_ioapics) {
588                 printk(KERN_WARNING
589                        "Not all IO-APIC's listed under remapping hardware\n");
590                 return -1;
591         }
592
593         return ir_supported;
594 }