genirq: remove sparse irq code
[safe/jmp/linux-2.6] / drivers / pci / intr_remapping.c
1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
5 #include <linux/pci.h>
6 #include <linux/irq.h>
7 #include <asm/io_apic.h>
8 #include "intel-iommu.h"
9 #include "intr_remapping.h"
10
11 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
12 static int ir_ioapic_num;
13 int intr_remapping_enabled;
14
15 struct irq_2_iommu {
16         struct intel_iommu *iommu;
17         u16 irte_index;
18         u16 sub_handle;
19         u8  irte_mask;
20 };
21
22 #ifdef CONFIG_HAVE_DYN_ARRAY
23 static struct irq_2_iommu *irq_2_iommuX;
24 DEFINE_DYN_ARRAY(irq_2_iommuX, sizeof(struct irq_2_iommu), nr_irqs, PAGE_SIZE, NULL);
25 #else
26 static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
27 #endif
28
29 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
30 {
31         if (irq < nr_irqs)
32                 return &irq_2_iommuX[irq];
33
34         return NULL;
35 }
36 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
37 {
38         return irq_2_iommu(irq);
39 }
40
41 static DEFINE_SPINLOCK(irq_2_ir_lock);
42
43 static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
44 {
45         struct irq_2_iommu *irq_iommu;
46
47         irq_iommu = irq_2_iommu(irq);
48
49         if (!irq_iommu)
50                 return NULL;
51
52         if (!irq_iommu->iommu)
53                 return NULL;
54
55         return irq_iommu;
56 }
57
58 int irq_remapped(int irq)
59 {
60         return valid_irq_2_iommu(irq) != NULL;
61 }
62
63 int get_irte(int irq, struct irte *entry)
64 {
65         int index;
66         struct irq_2_iommu *irq_iommu;
67
68         if (!entry)
69                 return -1;
70
71         spin_lock(&irq_2_ir_lock);
72         irq_iommu = valid_irq_2_iommu(irq);
73         if (!irq_iommu) {
74                 spin_unlock(&irq_2_ir_lock);
75                 return -1;
76         }
77
78         index = irq_iommu->irte_index + irq_iommu->sub_handle;
79         *entry = *(irq_iommu->iommu->ir_table->base + index);
80
81         spin_unlock(&irq_2_ir_lock);
82         return 0;
83 }
84
85 int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
86 {
87         struct ir_table *table = iommu->ir_table;
88         struct irq_2_iommu *irq_iommu;
89         u16 index, start_index;
90         unsigned int mask = 0;
91         int i;
92
93         if (!count)
94                 return -1;
95
96         /* protect irq_2_iommu_alloc later */
97         if (irq >= nr_irqs)
98                 return -1;
99
100         /*
101          * start the IRTE search from index 0.
102          */
103         index = start_index = 0;
104
105         if (count > 1) {
106                 count = __roundup_pow_of_two(count);
107                 mask = ilog2(count);
108         }
109
110         if (mask > ecap_max_handle_mask(iommu->ecap)) {
111                 printk(KERN_ERR
112                        "Requested mask %x exceeds the max invalidation handle"
113                        " mask value %Lx\n", mask,
114                        ecap_max_handle_mask(iommu->ecap));
115                 return -1;
116         }
117
118         spin_lock(&irq_2_ir_lock);
119         do {
120                 for (i = index; i < index + count; i++)
121                         if  (table->base[i].present)
122                                 break;
123                 /* empty index found */
124                 if (i == index + count)
125                         break;
126
127                 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
128
129                 if (index == start_index) {
130                         spin_unlock(&irq_2_ir_lock);
131                         printk(KERN_ERR "can't allocate an IRTE\n");
132                         return -1;
133                 }
134         } while (1);
135
136         for (i = index; i < index + count; i++)
137                 table->base[i].present = 1;
138
139         irq_iommu = irq_2_iommu_alloc(irq);
140         irq_iommu->iommu = iommu;
141         irq_iommu->irte_index =  index;
142         irq_iommu->sub_handle = 0;
143         irq_iommu->irte_mask = mask;
144
145         spin_unlock(&irq_2_ir_lock);
146
147         return index;
148 }
149
150 static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
151 {
152         struct qi_desc desc;
153
154         desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
155                    | QI_IEC_SELECTIVE;
156         desc.high = 0;
157
158         qi_submit_sync(&desc, iommu);
159 }
160
161 int map_irq_to_irte_handle(int irq, u16 *sub_handle)
162 {
163         int index;
164         struct irq_2_iommu *irq_iommu;
165
166         spin_lock(&irq_2_ir_lock);
167         irq_iommu = valid_irq_2_iommu(irq);
168         if (!irq_iommu) {
169                 spin_unlock(&irq_2_ir_lock);
170                 return -1;
171         }
172
173         *sub_handle = irq_iommu->sub_handle;
174         index = irq_iommu->irte_index;
175         spin_unlock(&irq_2_ir_lock);
176         return index;
177 }
178
179 int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
180 {
181         struct irq_2_iommu *irq_iommu;
182
183         spin_lock(&irq_2_ir_lock);
184
185         irq_iommu = irq_2_iommu_alloc(irq);
186
187         irq_iommu->iommu = iommu;
188         irq_iommu->irte_index = index;
189         irq_iommu->sub_handle = subhandle;
190         irq_iommu->irte_mask = 0;
191
192         spin_unlock(&irq_2_ir_lock);
193
194         return 0;
195 }
196
197 int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
198 {
199         struct irq_2_iommu *irq_iommu;
200
201         spin_lock(&irq_2_ir_lock);
202         irq_iommu = valid_irq_2_iommu(irq);
203         if (!irq_iommu) {
204                 spin_unlock(&irq_2_ir_lock);
205                 return -1;
206         }
207
208         irq_iommu->iommu = NULL;
209         irq_iommu->irte_index = 0;
210         irq_iommu->sub_handle = 0;
211         irq_2_iommu(irq)->irte_mask = 0;
212
213         spin_unlock(&irq_2_ir_lock);
214
215         return 0;
216 }
217
218 int modify_irte(int irq, struct irte *irte_modified)
219 {
220         int index;
221         struct irte *irte;
222         struct intel_iommu *iommu;
223         struct irq_2_iommu *irq_iommu;
224
225         spin_lock(&irq_2_ir_lock);
226         irq_iommu = valid_irq_2_iommu(irq);
227         if (!irq_iommu) {
228                 spin_unlock(&irq_2_ir_lock);
229                 return -1;
230         }
231
232         iommu = irq_iommu->iommu;
233
234         index = irq_iommu->irte_index + irq_iommu->sub_handle;
235         irte = &iommu->ir_table->base[index];
236
237         set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
238         __iommu_flush_cache(iommu, irte, sizeof(*irte));
239
240         qi_flush_iec(iommu, index, 0);
241
242         spin_unlock(&irq_2_ir_lock);
243         return 0;
244 }
245
246 int flush_irte(int irq)
247 {
248         int index;
249         struct intel_iommu *iommu;
250         struct irq_2_iommu *irq_iommu;
251
252         spin_lock(&irq_2_ir_lock);
253         irq_iommu = valid_irq_2_iommu(irq);
254         if (!irq_iommu) {
255                 spin_unlock(&irq_2_ir_lock);
256                 return -1;
257         }
258
259         iommu = irq_iommu->iommu;
260
261         index = irq_iommu->irte_index + irq_iommu->sub_handle;
262
263         qi_flush_iec(iommu, index, irq_iommu->irte_mask);
264         spin_unlock(&irq_2_ir_lock);
265
266         return 0;
267 }
268
269 struct intel_iommu *map_ioapic_to_ir(int apic)
270 {
271         int i;
272
273         for (i = 0; i < MAX_IO_APICS; i++)
274                 if (ir_ioapic[i].id == apic)
275                         return ir_ioapic[i].iommu;
276         return NULL;
277 }
278
279 struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
280 {
281         struct dmar_drhd_unit *drhd;
282
283         drhd = dmar_find_matched_drhd_unit(dev);
284         if (!drhd)
285                 return NULL;
286
287         return drhd->iommu;
288 }
289
290 int free_irte(int irq)
291 {
292         int index, i;
293         struct irte *irte;
294         struct intel_iommu *iommu;
295         struct irq_2_iommu *irq_iommu;
296
297         spin_lock(&irq_2_ir_lock);
298         irq_iommu = valid_irq_2_iommu(irq);
299         if (!irq_iommu) {
300                 spin_unlock(&irq_2_ir_lock);
301                 return -1;
302         }
303
304         iommu = irq_iommu->iommu;
305
306         index = irq_iommu->irte_index + irq_iommu->sub_handle;
307         irte = &iommu->ir_table->base[index];
308
309         if (!irq_iommu->sub_handle) {
310                 for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
311                         set_64bit((unsigned long *)irte, 0);
312                 qi_flush_iec(iommu, index, irq_iommu->irte_mask);
313         }
314
315         irq_iommu->iommu = NULL;
316         irq_iommu->irte_index = 0;
317         irq_iommu->sub_handle = 0;
318         irq_iommu->irte_mask = 0;
319
320         spin_unlock(&irq_2_ir_lock);
321
322         return 0;
323 }
324
325 static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
326 {
327         u64 addr;
328         u32 cmd, sts;
329         unsigned long flags;
330
331         addr = virt_to_phys((void *)iommu->ir_table->base);
332
333         spin_lock_irqsave(&iommu->register_lock, flags);
334
335         dmar_writeq(iommu->reg + DMAR_IRTA_REG,
336                     (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
337
338         /* Set interrupt-remapping table pointer */
339         cmd = iommu->gcmd | DMA_GCMD_SIRTP;
340         writel(cmd, iommu->reg + DMAR_GCMD_REG);
341
342         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
343                       readl, (sts & DMA_GSTS_IRTPS), sts);
344         spin_unlock_irqrestore(&iommu->register_lock, flags);
345
346         /*
347          * global invalidation of interrupt entry cache before enabling
348          * interrupt-remapping.
349          */
350         qi_global_iec(iommu);
351
352         spin_lock_irqsave(&iommu->register_lock, flags);
353
354         /* Enable interrupt-remapping */
355         cmd = iommu->gcmd | DMA_GCMD_IRE;
356         iommu->gcmd |= DMA_GCMD_IRE;
357         writel(cmd, iommu->reg + DMAR_GCMD_REG);
358
359         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
360                       readl, (sts & DMA_GSTS_IRES), sts);
361
362         spin_unlock_irqrestore(&iommu->register_lock, flags);
363 }
364
365
366 static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
367 {
368         struct ir_table *ir_table;
369         struct page *pages;
370
371         ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
372                                              GFP_KERNEL);
373
374         if (!iommu->ir_table)
375                 return -ENOMEM;
376
377         pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
378
379         if (!pages) {
380                 printk(KERN_ERR "failed to allocate pages of order %d\n",
381                        INTR_REMAP_PAGE_ORDER);
382                 kfree(iommu->ir_table);
383                 return -ENOMEM;
384         }
385
386         ir_table->base = page_address(pages);
387
388         iommu_set_intr_remapping(iommu, mode);
389         return 0;
390 }
391
392 int __init enable_intr_remapping(int eim)
393 {
394         struct dmar_drhd_unit *drhd;
395         int setup = 0;
396
397         /*
398          * check for the Interrupt-remapping support
399          */
400         for_each_drhd_unit(drhd) {
401                 struct intel_iommu *iommu = drhd->iommu;
402
403                 if (!ecap_ir_support(iommu->ecap))
404                         continue;
405
406                 if (eim && !ecap_eim_support(iommu->ecap)) {
407                         printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
408                                " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
409                         return -1;
410                 }
411         }
412
413         /*
414          * Enable queued invalidation for all the DRHD's.
415          */
416         for_each_drhd_unit(drhd) {
417                 int ret;
418                 struct intel_iommu *iommu = drhd->iommu;
419                 ret = dmar_enable_qi(iommu);
420
421                 if (ret) {
422                         printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
423                                " invalidation, ecap %Lx, ret %d\n",
424                                drhd->reg_base_addr, iommu->ecap, ret);
425                         return -1;
426                 }
427         }
428
429         /*
430          * Setup Interrupt-remapping for all the DRHD's now.
431          */
432         for_each_drhd_unit(drhd) {
433                 struct intel_iommu *iommu = drhd->iommu;
434
435                 if (!ecap_ir_support(iommu->ecap))
436                         continue;
437
438                 if (setup_intr_remapping(iommu, eim))
439                         goto error;
440
441                 setup = 1;
442         }
443
444         if (!setup)
445                 goto error;
446
447         intr_remapping_enabled = 1;
448
449         return 0;
450
451 error:
452         /*
453          * handle error condition gracefully here!
454          */
455         return -1;
456 }
457
458 static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
459                                  struct intel_iommu *iommu)
460 {
461         struct acpi_dmar_hardware_unit *drhd;
462         struct acpi_dmar_device_scope *scope;
463         void *start, *end;
464
465         drhd = (struct acpi_dmar_hardware_unit *)header;
466
467         start = (void *)(drhd + 1);
468         end = ((void *)drhd) + header->length;
469
470         while (start < end) {
471                 scope = start;
472                 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
473                         if (ir_ioapic_num == MAX_IO_APICS) {
474                                 printk(KERN_WARNING "Exceeded Max IO APICS\n");
475                                 return -1;
476                         }
477
478                         printk(KERN_INFO "IOAPIC id %d under DRHD base"
479                                " 0x%Lx\n", scope->enumeration_id,
480                                drhd->address);
481
482                         ir_ioapic[ir_ioapic_num].iommu = iommu;
483                         ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
484                         ir_ioapic_num++;
485                 }
486                 start += scope->length;
487         }
488
489         return 0;
490 }
491
492 /*
493  * Finds the assocaition between IOAPIC's and its Interrupt-remapping
494  * hardware unit.
495  */
496 int __init parse_ioapics_under_ir(void)
497 {
498         struct dmar_drhd_unit *drhd;
499         int ir_supported = 0;
500
501         for_each_drhd_unit(drhd) {
502                 struct intel_iommu *iommu = drhd->iommu;
503
504                 if (ecap_ir_support(iommu->ecap)) {
505                         if (ir_parse_ioapic_scope(drhd->hdr, iommu))
506                                 return -1;
507
508                         ir_supported = 1;
509                 }
510         }
511
512         if (ir_supported && ir_ioapic_num != nr_ioapics) {
513                 printk(KERN_WARNING
514                        "Not all IO-APIC's listed under remapping hardware\n");
515                 return -1;
516         }
517
518         return ir_supported;
519 }