[PATCH] genirq: i386 irq: Remove the msi assumption that irq == vector
[safe/jmp/linux-2.6] / arch / i386 / kernel / acpi / boot.c
1 /*
2  *  boot.c - Architecture-Specific Low-Level ACPI Boot Support
3  *
4  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
5  *  Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
6  *
7  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8  *
9  *  This program is free software; you can redistribute it and/or modify
10  *  it under the terms of the GNU General Public License as published by
11  *  the Free Software Foundation; either version 2 of the License, or
12  *  (at your option) any later version.
13  *
14  *  This program is distributed in the hope that it will be useful,
15  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *  GNU General Public License for more details.
18  *
19  *  You should have received a copy of the GNU General Public License
20  *  along with this program; if not, write to the Free Software
21  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
22  *
23  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24  */
25
26 #include <linux/init.h>
27 #include <linux/acpi.h>
28 #include <linux/efi.h>
29 #include <linux/cpumask.h>
30 #include <linux/module.h>
31 #include <linux/dmi.h>
32 #include <linux/irq.h>
33 #include <linux/bootmem.h>
34 #include <linux/ioport.h>
35
36 #include <asm/pgtable.h>
37 #include <asm/io_apic.h>
38 #include <asm/apic.h>
39 #include <asm/io.h>
40 #include <asm/mpspec.h>
41
42 static int __initdata acpi_force = 0;
43
44 #ifdef  CONFIG_ACPI
45 int acpi_disabled = 0;
46 #else
47 int acpi_disabled = 1;
48 #endif
49 EXPORT_SYMBOL(acpi_disabled);
50
51 #ifdef  CONFIG_X86_64
52
53 #include <asm/proto.h>
54
55 static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; }
56
57
58 #else                           /* X86 */
59
60 #ifdef  CONFIG_X86_LOCAL_APIC
61 #include <mach_apic.h>
62 #include <mach_mpparse.h>
63 #endif                          /* CONFIG_X86_LOCAL_APIC */
64
65 static inline int gsi_irq_sharing(int gsi) { return gsi; }
66
67 #endif                          /* X86 */
68
69 #define BAD_MADT_ENTRY(entry, end) (                                        \
70                 (!entry) || (unsigned long)entry + sizeof(*entry) > end ||  \
71                 ((acpi_table_entry_header *)entry)->length < sizeof(*entry))
72
73 #define PREFIX                  "ACPI: "
74
75 int acpi_noirq __initdata;      /* skip ACPI IRQ initialization */
76 int acpi_pci_disabled __initdata;       /* skip ACPI PCI scan and IRQ initialization */
77 int acpi_ht __initdata = 1;     /* enable HT */
78
79 int acpi_lapic;
80 int acpi_ioapic;
81 int acpi_strict;
82 EXPORT_SYMBOL(acpi_strict);
83
84 acpi_interrupt_flags acpi_sci_flags __initdata;
85 int acpi_sci_override_gsi __initdata;
86 int acpi_skip_timer_override __initdata;
87
88 #ifdef CONFIG_X86_LOCAL_APIC
89 static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
90 #endif
91
92 #ifndef __HAVE_ARCH_CMPXCHG
93 #warning ACPI uses CMPXCHG, i486 and later hardware
94 #endif
95
96 #define MAX_MADT_ENTRIES        256
97 u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
98     {[0 ... MAX_MADT_ENTRIES - 1] = 0xff };
99 EXPORT_SYMBOL(x86_acpiid_to_apicid);
100
101 /* --------------------------------------------------------------------------
102                               Boot-time Configuration
103    -------------------------------------------------------------------------- */
104
105 /*
106  * The default interrupt routing model is PIC (8259).  This gets
107  * overriden if IOAPICs are enumerated (below).
108  */
109 enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
110
111 #ifdef  CONFIG_X86_64
112
113 /* rely on all ACPI tables being in the direct mapping */
114 char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
115 {
116         if (!phys_addr || !size)
117                 return NULL;
118
119         if (phys_addr+size <= (end_pfn_map << PAGE_SHIFT) + PAGE_SIZE)
120                 return __va(phys_addr);
121
122         return NULL;
123 }
124
125 #else
126
127 /*
128  * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
129  * to map the target physical address. The problem is that set_fixmap()
130  * provides a single page, and it is possible that the page is not
131  * sufficient.
132  * By using this area, we can map up to MAX_IO_APICS pages temporarily,
133  * i.e. until the next __va_range() call.
134  *
135  * Important Safety Note:  The fixed I/O APIC page numbers are *subtracted*
136  * from the fixed base.  That's why we start at FIX_IO_APIC_BASE_END and
137  * count idx down while incrementing the phys address.
138  */
139 char *__acpi_map_table(unsigned long phys, unsigned long size)
140 {
141         unsigned long base, offset, mapped_size;
142         int idx;
143
144         if (phys + size < 8 * 1024 * 1024)
145                 return __va(phys);
146
147         offset = phys & (PAGE_SIZE - 1);
148         mapped_size = PAGE_SIZE - offset;
149         set_fixmap(FIX_ACPI_END, phys);
150         base = fix_to_virt(FIX_ACPI_END);
151
152         /*
153          * Most cases can be covered by the below.
154          */
155         idx = FIX_ACPI_END;
156         while (mapped_size < size) {
157                 if (--idx < FIX_ACPI_BEGIN)
158                         return NULL;    /* cannot handle this */
159                 phys += PAGE_SIZE;
160                 set_fixmap(idx, phys);
161                 mapped_size += PAGE_SIZE;
162         }
163
164         return ((unsigned char *)base + offset);
165 }
166 #endif
167
168 #ifdef CONFIG_PCI_MMCONFIG
169 /* The physical address of the MMCONFIG aperture.  Set from ACPI tables. */
170 struct acpi_table_mcfg_config *pci_mmcfg_config;
171 int pci_mmcfg_config_num;
172
173 int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
174 {
175         struct acpi_table_mcfg *mcfg;
176         unsigned long i;
177         int config_size;
178
179         if (!phys_addr || !size)
180                 return -EINVAL;
181
182         mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size);
183         if (!mcfg) {
184                 printk(KERN_WARNING PREFIX "Unable to map MCFG\n");
185                 return -ENODEV;
186         }
187
188         /* how many config structures do we have */
189         pci_mmcfg_config_num = 0;
190         i = size - sizeof(struct acpi_table_mcfg);
191         while (i >= sizeof(struct acpi_table_mcfg_config)) {
192                 ++pci_mmcfg_config_num;
193                 i -= sizeof(struct acpi_table_mcfg_config);
194         };
195         if (pci_mmcfg_config_num == 0) {
196                 printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
197                 return -ENODEV;
198         }
199
200         config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config);
201         pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL);
202         if (!pci_mmcfg_config) {
203                 printk(KERN_WARNING PREFIX
204                        "No memory for MCFG config tables\n");
205                 return -ENOMEM;
206         }
207
208         memcpy(pci_mmcfg_config, &mcfg->config, config_size);
209         for (i = 0; i < pci_mmcfg_config_num; ++i) {
210                 if (mcfg->config[i].base_reserved) {
211                         printk(KERN_ERR PREFIX
212                                "MMCONFIG not in low 4GB of memory\n");
213                         kfree(pci_mmcfg_config);
214                         pci_mmcfg_config_num = 0;
215                         return -ENODEV;
216                 }
217         }
218
219         return 0;
220 }
221 #endif                          /* CONFIG_PCI_MMCONFIG */
222
223 #ifdef CONFIG_X86_LOCAL_APIC
224 static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
225 {
226         struct acpi_table_madt *madt = NULL;
227
228         if (!phys_addr || !size || !cpu_has_apic)
229                 return -EINVAL;
230
231         madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size);
232         if (!madt) {
233                 printk(KERN_WARNING PREFIX "Unable to map MADT\n");
234                 return -ENODEV;
235         }
236
237         if (madt->lapic_address) {
238                 acpi_lapic_addr = (u64) madt->lapic_address;
239
240                 printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
241                        madt->lapic_address);
242         }
243
244         acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
245
246         return 0;
247 }
248
249 static int __init
250 acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end)
251 {
252         struct acpi_table_lapic *processor = NULL;
253
254         processor = (struct acpi_table_lapic *)header;
255
256         if (BAD_MADT_ENTRY(processor, end))
257                 return -EINVAL;
258
259         acpi_table_print_madt_entry(header);
260
261         /* Record local apic id only when enabled */
262         if (processor->flags.enabled)
263                 x86_acpiid_to_apicid[processor->acpi_id] = processor->id;
264
265         /*
266          * We need to register disabled CPU as well to permit
267          * counting disabled CPUs. This allows us to size
268          * cpus_possible_map more accurately, to permit
269          * to not preallocating memory for all NR_CPUS
270          * when we use CPU hotplug.
271          */
272         mp_register_lapic(processor->id,        /* APIC ID */
273                           processor->flags.enabled);    /* Enabled? */
274
275         return 0;
276 }
277
278 static int __init
279 acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
280                           const unsigned long end)
281 {
282         struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL;
283
284         lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr *)header;
285
286         if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
287                 return -EINVAL;
288
289         acpi_lapic_addr = lapic_addr_ovr->address;
290
291         return 0;
292 }
293
294 static int __init
295 acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
296 {
297         struct acpi_table_lapic_nmi *lapic_nmi = NULL;
298
299         lapic_nmi = (struct acpi_table_lapic_nmi *)header;
300
301         if (BAD_MADT_ENTRY(lapic_nmi, end))
302                 return -EINVAL;
303
304         acpi_table_print_madt_entry(header);
305
306         if (lapic_nmi->lint != 1)
307                 printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
308
309         return 0;
310 }
311
312 #endif                          /*CONFIG_X86_LOCAL_APIC */
313
314 #ifdef CONFIG_X86_IO_APIC
315
316 static int __init
317 acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end)
318 {
319         struct acpi_table_ioapic *ioapic = NULL;
320
321         ioapic = (struct acpi_table_ioapic *)header;
322
323         if (BAD_MADT_ENTRY(ioapic, end))
324                 return -EINVAL;
325
326         acpi_table_print_madt_entry(header);
327
328         mp_register_ioapic(ioapic->id,
329                            ioapic->address, ioapic->global_irq_base);
330
331         return 0;
332 }
333
334 /*
335  * Parse Interrupt Source Override for the ACPI SCI
336  */
337 static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
338 {
339         if (trigger == 0)       /* compatible SCI trigger is level */
340                 trigger = 3;
341
342         if (polarity == 0)      /* compatible SCI polarity is low */
343                 polarity = 3;
344
345         /* Command-line over-ride via acpi_sci= */
346         if (acpi_sci_flags.trigger)
347                 trigger = acpi_sci_flags.trigger;
348
349         if (acpi_sci_flags.polarity)
350                 polarity = acpi_sci_flags.polarity;
351
352         /*
353          * mp_config_acpi_legacy_irqs() already setup IRQs < 16
354          * If GSI is < 16, this will update its flags,
355          * else it will create a new mp_irqs[] entry.
356          */
357         mp_override_legacy_irq(gsi, polarity, trigger, gsi);
358
359         /*
360          * stash over-ride to indicate we've been here
361          * and for later update of acpi_fadt
362          */
363         acpi_sci_override_gsi = gsi;
364         return;
365 }
366
367 static int __init
368 acpi_parse_int_src_ovr(acpi_table_entry_header * header,
369                        const unsigned long end)
370 {
371         struct acpi_table_int_src_ovr *intsrc = NULL;
372
373         intsrc = (struct acpi_table_int_src_ovr *)header;
374
375         if (BAD_MADT_ENTRY(intsrc, end))
376                 return -EINVAL;
377
378         acpi_table_print_madt_entry(header);
379
380         if (intsrc->bus_irq == acpi_fadt.sci_int) {
381                 acpi_sci_ioapic_setup(intsrc->global_irq,
382                                       intsrc->flags.polarity,
383                                       intsrc->flags.trigger);
384                 return 0;
385         }
386
387         if (acpi_skip_timer_override &&
388             intsrc->bus_irq == 0 && intsrc->global_irq == 2) {
389                 printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
390                 return 0;
391         }
392
393         mp_override_legacy_irq(intsrc->bus_irq,
394                                intsrc->flags.polarity,
395                                intsrc->flags.trigger, intsrc->global_irq);
396
397         return 0;
398 }
399
400 static int __init
401 acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
402 {
403         struct acpi_table_nmi_src *nmi_src = NULL;
404
405         nmi_src = (struct acpi_table_nmi_src *)header;
406
407         if (BAD_MADT_ENTRY(nmi_src, end))
408                 return -EINVAL;
409
410         acpi_table_print_madt_entry(header);
411
412         /* TBD: Support nimsrc entries? */
413
414         return 0;
415 }
416
417 #endif                          /* CONFIG_X86_IO_APIC */
418
419 /*
420  * acpi_pic_sci_set_trigger()
421  * 
422  * use ELCR to set PIC-mode trigger type for SCI
423  *
424  * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
425  * it may require Edge Trigger -- use "acpi_sci=edge"
426  *
427  * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
428  * for the 8259 PIC.  bit[n] = 1 means irq[n] is Level, otherwise Edge.
429  * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0)
430  * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0)
431  */
432
433 void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
434 {
435         unsigned int mask = 1 << irq;
436         unsigned int old, new;
437
438         /* Real old ELCR mask */
439         old = inb(0x4d0) | (inb(0x4d1) << 8);
440
441         /*
442          * If we use ACPI to set PCI irq's, then we should clear ELCR
443          * since we will set it correctly as we enable the PCI irq
444          * routing.
445          */
446         new = acpi_noirq ? old : 0;
447
448         /*
449          * Update SCI information in the ELCR, it isn't in the PCI
450          * routing tables..
451          */
452         switch (trigger) {
453         case 1:         /* Edge - clear */
454                 new &= ~mask;
455                 break;
456         case 3:         /* Level - set */
457                 new |= mask;
458                 break;
459         }
460
461         if (old == new)
462                 return;
463
464         printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
465         outb(new, 0x4d0);
466         outb(new >> 8, 0x4d1);
467 }
468
469 int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
470 {
471         *irq = gsi_irq_sharing(gsi);
472         return 0;
473 }
474
475 /*
476  * success: return IRQ number (>=0)
477  * failure: return < 0
478  */
479 int acpi_register_gsi(u32 gsi, int triggering, int polarity)
480 {
481         unsigned int irq;
482         unsigned int plat_gsi = gsi;
483
484 #ifdef CONFIG_PCI
485         /*
486          * Make sure all (legacy) PCI IRQs are set as level-triggered.
487          */
488         if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
489                 extern void eisa_set_level_irq(unsigned int irq);
490
491                 if (triggering == ACPI_LEVEL_SENSITIVE)
492                         eisa_set_level_irq(gsi);
493         }
494 #endif
495
496 #ifdef CONFIG_X86_IO_APIC
497         if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
498                 plat_gsi = mp_register_gsi(gsi, triggering, polarity);
499         }
500 #endif
501         acpi_gsi_to_irq(plat_gsi, &irq);
502         return irq;
503 }
504
505 EXPORT_SYMBOL(acpi_register_gsi);
506
507 /*
508  *  ACPI based hotplug support for CPU
509  */
510 #ifdef CONFIG_ACPI_HOTPLUG_CPU
511 int acpi_map_lsapic(acpi_handle handle, int *pcpu)
512 {
513         struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
514         union acpi_object *obj;
515         struct acpi_table_lapic *lapic;
516         cpumask_t tmp_map, new_map;
517         u8 physid;
518         int cpu;
519
520         if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
521                 return -EINVAL;
522
523         if (!buffer.length || !buffer.pointer)
524                 return -EINVAL;
525
526         obj = buffer.pointer;
527         if (obj->type != ACPI_TYPE_BUFFER ||
528             obj->buffer.length < sizeof(*lapic)) {
529                 kfree(buffer.pointer);
530                 return -EINVAL;
531         }
532
533         lapic = (struct acpi_table_lapic *)obj->buffer.pointer;
534
535         if ((lapic->header.type != ACPI_MADT_LAPIC) ||
536             (!lapic->flags.enabled)) {
537                 kfree(buffer.pointer);
538                 return -EINVAL;
539         }
540
541         physid = lapic->id;
542
543         kfree(buffer.pointer);
544         buffer.length = ACPI_ALLOCATE_BUFFER;
545         buffer.pointer = NULL;
546
547         tmp_map = cpu_present_map;
548         mp_register_lapic(physid, lapic->flags.enabled);
549
550         /*
551          * If mp_register_lapic successfully generates a new logical cpu
552          * number, then the following will get us exactly what was mapped
553          */
554         cpus_andnot(new_map, cpu_present_map, tmp_map);
555         if (cpus_empty(new_map)) {
556                 printk ("Unable to map lapic to logical cpu number\n");
557                 return -EINVAL;
558         }
559
560         cpu = first_cpu(new_map);
561
562         *pcpu = cpu;
563         return 0;
564 }
565
566 EXPORT_SYMBOL(acpi_map_lsapic);
567
568 int acpi_unmap_lsapic(int cpu)
569 {
570         int i;
571
572         for_each_possible_cpu(i) {
573                 if (x86_acpiid_to_apicid[i] == x86_cpu_to_apicid[cpu]) {
574                         x86_acpiid_to_apicid[i] = -1;
575                         break;
576                 }
577         }
578         x86_cpu_to_apicid[cpu] = -1;
579         cpu_clear(cpu, cpu_present_map);
580         num_processors--;
581
582         return (0);
583 }
584
585 EXPORT_SYMBOL(acpi_unmap_lsapic);
586 #endif                          /* CONFIG_ACPI_HOTPLUG_CPU */
587
588 int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
589 {
590         /* TBD */
591         return -EINVAL;
592 }
593
594 EXPORT_SYMBOL(acpi_register_ioapic);
595
596 int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
597 {
598         /* TBD */
599         return -EINVAL;
600 }
601
602 EXPORT_SYMBOL(acpi_unregister_ioapic);
603
604 static unsigned long __init
605 acpi_scan_rsdp(unsigned long start, unsigned long length)
606 {
607         unsigned long offset = 0;
608         unsigned long sig_len = sizeof("RSD PTR ") - 1;
609
610         /*
611          * Scan all 16-byte boundaries of the physical memory region for the
612          * RSDP signature.
613          */
614         for (offset = 0; offset < length; offset += 16) {
615                 if (strncmp((char *)(phys_to_virt(start) + offset), "RSD PTR ", sig_len))
616                         continue;
617                 return (start + offset);
618         }
619
620         return 0;
621 }
622
623 static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size)
624 {
625         struct acpi_table_sbf *sb;
626
627         if (!phys_addr || !size)
628                 return -EINVAL;
629
630         sb = (struct acpi_table_sbf *)__acpi_map_table(phys_addr, size);
631         if (!sb) {
632                 printk(KERN_WARNING PREFIX "Unable to map SBF\n");
633                 return -ENODEV;
634         }
635
636         sbf_port = sb->sbf_cmos;        /* Save CMOS port */
637
638         return 0;
639 }
640
641 #ifdef CONFIG_HPET_TIMER
642
643 static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
644 {
645         struct acpi_table_hpet *hpet_tbl;
646         struct resource *hpet_res;
647         resource_size_t res_start;
648
649         if (!phys || !size)
650                 return -EINVAL;
651
652         hpet_tbl = (struct acpi_table_hpet *)__acpi_map_table(phys, size);
653         if (!hpet_tbl) {
654                 printk(KERN_WARNING PREFIX "Unable to map HPET\n");
655                 return -ENODEV;
656         }
657
658         if (hpet_tbl->addr.space_id != ACPI_SPACE_MEM) {
659                 printk(KERN_WARNING PREFIX "HPET timers must be located in "
660                        "memory.\n");
661                 return -1;
662         }
663
664 #define HPET_RESOURCE_NAME_SIZE 9
665         hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
666         if (hpet_res) {
667                 memset(hpet_res, 0, sizeof(*hpet_res));
668                 hpet_res->name = (void *)&hpet_res[1];
669                 hpet_res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
670                 snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE,
671                          "HPET %u", hpet_tbl->number);
672                 hpet_res->end = (1 * 1024) - 1;
673         }
674
675 #ifdef  CONFIG_X86_64
676         vxtime.hpet_address = hpet_tbl->addr.addrl |
677             ((long)hpet_tbl->addr.addrh << 32);
678
679         printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
680                hpet_tbl->id, vxtime.hpet_address);
681
682         res_start = vxtime.hpet_address;
683 #else                           /* X86 */
684         {
685                 extern unsigned long hpet_address;
686
687                 hpet_address = hpet_tbl->addr.addrl;
688                 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
689                        hpet_tbl->id, hpet_address);
690
691                 res_start = hpet_address;
692         }
693 #endif                          /* X86 */
694
695         if (hpet_res) {
696                 hpet_res->start = res_start;
697                 hpet_res->end += res_start;
698                 insert_resource(&iomem_resource, hpet_res);
699         }
700
701         return 0;
702 }
703 #else
704 #define acpi_parse_hpet NULL
705 #endif
706
707 #ifdef CONFIG_X86_PM_TIMER
708 extern u32 pmtmr_ioport;
709 #endif
710
711 static int __init acpi_parse_fadt(unsigned long phys, unsigned long size)
712 {
713         struct fadt_descriptor *fadt = NULL;
714
715         fadt = (struct fadt_descriptor *)__acpi_map_table(phys, size);
716         if (!fadt) {
717                 printk(KERN_WARNING PREFIX "Unable to map FADT\n");
718                 return 0;
719         }
720         /* initialize sci_int early for INT_SRC_OVR MADT parsing */
721         acpi_fadt.sci_int = fadt->sci_int;
722
723         /* initialize rev and apic_phys_dest_mode for x86_64 genapic */
724         acpi_fadt.revision = fadt->revision;
725         acpi_fadt.force_apic_physical_destination_mode =
726             fadt->force_apic_physical_destination_mode;
727
728 #ifdef CONFIG_X86_PM_TIMER
729         /* detect the location of the ACPI PM Timer */
730         if (fadt->revision >= FADT2_REVISION_ID) {
731                 /* FADT rev. 2 */
732                 if (fadt->xpm_tmr_blk.address_space_id !=
733                     ACPI_ADR_SPACE_SYSTEM_IO)
734                         return 0;
735
736                 pmtmr_ioport = fadt->xpm_tmr_blk.address;
737                 /*
738                  * "X" fields are optional extensions to the original V1.0
739                  * fields, so we must selectively expand V1.0 fields if the
740                  * corresponding X field is zero.
741                  */
742                 if (!pmtmr_ioport)
743                         pmtmr_ioport = fadt->V1_pm_tmr_blk;
744         } else {
745                 /* FADT rev. 1 */
746                 pmtmr_ioport = fadt->V1_pm_tmr_blk;
747         }
748         if (pmtmr_ioport)
749                 printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
750                        pmtmr_ioport);
751 #endif
752         return 0;
753 }
754
755 unsigned long __init acpi_find_rsdp(void)
756 {
757         unsigned long rsdp_phys = 0;
758
759         if (efi_enabled) {
760                 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
761                         return efi.acpi20;
762                 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
763                         return efi.acpi;
764         }
765         /*
766          * Scan memory looking for the RSDP signature. First search EBDA (low
767          * memory) paragraphs and then search upper memory (E0000-FFFFF).
768          */
769         rsdp_phys = acpi_scan_rsdp(0, 0x400);
770         if (!rsdp_phys)
771                 rsdp_phys = acpi_scan_rsdp(0xE0000, 0x20000);
772
773         return rsdp_phys;
774 }
775
776 #ifdef  CONFIG_X86_LOCAL_APIC
777 /*
778  * Parse LAPIC entries in MADT
779  * returns 0 on success, < 0 on error
780  */
781 static int __init acpi_parse_madt_lapic_entries(void)
782 {
783         int count;
784
785         if (!cpu_has_apic)
786                 return -ENODEV;
787
788         /* 
789          * Note that the LAPIC address is obtained from the MADT (32-bit value)
790          * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
791          */
792
793         count =
794             acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR,
795                                   acpi_parse_lapic_addr_ovr, 0);
796         if (count < 0) {
797                 printk(KERN_ERR PREFIX
798                        "Error parsing LAPIC address override entry\n");
799                 return count;
800         }
801
802         mp_register_lapic_address(acpi_lapic_addr);
803
804         count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic,
805                                       MAX_APICS);
806         if (!count) {
807                 printk(KERN_ERR PREFIX "No LAPIC entries present\n");
808                 /* TBD: Cleanup to allow fallback to MPS */
809                 return -ENODEV;
810         } else if (count < 0) {
811                 printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
812                 /* TBD: Cleanup to allow fallback to MPS */
813                 return count;
814         }
815
816         count =
817             acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0);
818         if (count < 0) {
819                 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
820                 /* TBD: Cleanup to allow fallback to MPS */
821                 return count;
822         }
823         return 0;
824 }
825 #endif                          /* CONFIG_X86_LOCAL_APIC */
826
827 #ifdef  CONFIG_X86_IO_APIC
828 /*
829  * Parse IOAPIC related entries in MADT
830  * returns 0 on success, < 0 on error
831  */
832 static int __init acpi_parse_madt_ioapic_entries(void)
833 {
834         int count;
835
836         /*
837          * ACPI interpreter is required to complete interrupt setup,
838          * so if it is off, don't enumerate the io-apics with ACPI.
839          * If MPS is present, it will handle them,
840          * otherwise the system will stay in PIC mode
841          */
842         if (acpi_disabled || acpi_noirq) {
843                 return -ENODEV;
844         }
845
846         if (!cpu_has_apic) 
847                 return -ENODEV;
848
849         /*
850          * if "noapic" boot option, don't look for IO-APICs
851          */
852         if (skip_ioapic_setup) {
853                 printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
854                        "due to 'noapic' option.\n");
855                 return -ENODEV;
856         }
857
858         count =
859             acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic,
860                                   MAX_IO_APICS);
861         if (!count) {
862                 printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
863                 return -ENODEV;
864         } else if (count < 0) {
865                 printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
866                 return count;
867         }
868
869         count =
870             acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr,
871                                   NR_IRQ_VECTORS);
872         if (count < 0) {
873                 printk(KERN_ERR PREFIX
874                        "Error parsing interrupt source overrides entry\n");
875                 /* TBD: Cleanup to allow fallback to MPS */
876                 return count;
877         }
878
879         /*
880          * If BIOS did not supply an INT_SRC_OVR for the SCI
881          * pretend we got one so we can set the SCI flags.
882          */
883         if (!acpi_sci_override_gsi)
884                 acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
885
886         /* Fill in identity legacy mapings where no override */
887         mp_config_acpi_legacy_irqs();
888
889         count =
890             acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src,
891                                   NR_IRQ_VECTORS);
892         if (count < 0) {
893                 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
894                 /* TBD: Cleanup to allow fallback to MPS */
895                 return count;
896         }
897
898         return 0;
899 }
900 #else
901 static inline int acpi_parse_madt_ioapic_entries(void)
902 {
903         return -1;
904 }
905 #endif  /* !CONFIG_X86_IO_APIC */
906
907 static void __init acpi_process_madt(void)
908 {
909 #ifdef CONFIG_X86_LOCAL_APIC
910         int count, error;
911
912         count = acpi_table_parse(ACPI_APIC, acpi_parse_madt);
913         if (count >= 1) {
914
915                 /*
916                  * Parse MADT LAPIC entries
917                  */
918                 error = acpi_parse_madt_lapic_entries();
919                 if (!error) {
920                         acpi_lapic = 1;
921
922 #ifdef CONFIG_X86_GENERICARCH
923                         generic_bigsmp_probe();
924 #endif
925                         /*
926                          * Parse MADT IO-APIC entries
927                          */
928                         error = acpi_parse_madt_ioapic_entries();
929                         if (!error) {
930                                 acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
931                                 acpi_irq_balance_set(NULL);
932                                 acpi_ioapic = 1;
933
934                                 smp_found_config = 1;
935                                 clustered_apic_check();
936                         }
937                 }
938                 if (error == -EINVAL) {
939                         /*
940                          * Dell Precision Workstation 410, 610 come here.
941                          */
942                         printk(KERN_ERR PREFIX
943                                "Invalid BIOS MADT, disabling ACPI\n");
944                         disable_acpi();
945                 }
946         }
947 #endif
948         return;
949 }
950
951 #ifdef __i386__
952
953 static int __init disable_acpi_irq(struct dmi_system_id *d)
954 {
955         if (!acpi_force) {
956                 printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
957                        d->ident);
958                 acpi_noirq_set();
959         }
960         return 0;
961 }
962
963 static int __init disable_acpi_pci(struct dmi_system_id *d)
964 {
965         if (!acpi_force) {
966                 printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
967                        d->ident);
968                 acpi_disable_pci();
969         }
970         return 0;
971 }
972
973 static int __init dmi_disable_acpi(struct dmi_system_id *d)
974 {
975         if (!acpi_force) {
976                 printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
977                 disable_acpi();
978         } else {
979                 printk(KERN_NOTICE
980                        "Warning: DMI blacklist says broken, but acpi forced\n");
981         }
982         return 0;
983 }
984
985 /*
986  * Limit ACPI to CPU enumeration for HT
987  */
988 static int __init force_acpi_ht(struct dmi_system_id *d)
989 {
990         if (!acpi_force) {
991                 printk(KERN_NOTICE "%s detected: force use of acpi=ht\n",
992                        d->ident);
993                 disable_acpi();
994                 acpi_ht = 1;
995         } else {
996                 printk(KERN_NOTICE
997                        "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
998         }
999         return 0;
1000 }
1001
1002 /*
1003  * If your system is blacklisted here, but you find that acpi=force
1004  * works for you, please contact acpi-devel@sourceforge.net
1005  */
1006 static struct dmi_system_id __initdata acpi_dmi_table[] = {
1007         /*
1008          * Boxes that need ACPI disabled
1009          */
1010         {
1011          .callback = dmi_disable_acpi,
1012          .ident = "IBM Thinkpad",
1013          .matches = {
1014                      DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1015                      DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
1016                      },
1017          },
1018
1019         /*
1020          * Boxes that need acpi=ht
1021          */
1022         {
1023          .callback = force_acpi_ht,
1024          .ident = "FSC Primergy T850",
1025          .matches = {
1026                      DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
1027                      DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
1028                      },
1029          },
1030         {
1031          .callback = force_acpi_ht,
1032          .ident = "DELL GX240",
1033          .matches = {
1034                      DMI_MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"),
1035                      DMI_MATCH(DMI_BOARD_NAME, "OptiPlex GX240"),
1036                      },
1037          },
1038         {
1039          .callback = force_acpi_ht,
1040          .ident = "HP VISUALIZE NT Workstation",
1041          .matches = {
1042                      DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
1043                      DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
1044                      },
1045          },
1046         {
1047          .callback = force_acpi_ht,
1048          .ident = "Compaq Workstation W8000",
1049          .matches = {
1050                      DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
1051                      DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
1052                      },
1053          },
1054         {
1055          .callback = force_acpi_ht,
1056          .ident = "ASUS P4B266",
1057          .matches = {
1058                      DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1059                      DMI_MATCH(DMI_BOARD_NAME, "P4B266"),
1060                      },
1061          },
1062         {
1063          .callback = force_acpi_ht,
1064          .ident = "ASUS P2B-DS",
1065          .matches = {
1066                      DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1067                      DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
1068                      },
1069          },
1070         {
1071          .callback = force_acpi_ht,
1072          .ident = "ASUS CUR-DLS",
1073          .matches = {
1074                      DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1075                      DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"),
1076                      },
1077          },
1078         {
1079          .callback = force_acpi_ht,
1080          .ident = "ABIT i440BX-W83977",
1081          .matches = {
1082                      DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
1083                      DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
1084                      },
1085          },
1086         {
1087          .callback = force_acpi_ht,
1088          .ident = "IBM Bladecenter",
1089          .matches = {
1090                      DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1091                      DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
1092                      },
1093          },
1094         {
1095          .callback = force_acpi_ht,
1096          .ident = "IBM eServer xSeries 360",
1097          .matches = {
1098                      DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1099                      DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
1100                      },
1101          },
1102         {
1103          .callback = force_acpi_ht,
1104          .ident = "IBM eserver xSeries 330",
1105          .matches = {
1106                      DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1107                      DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
1108                      },
1109          },
1110         {
1111          .callback = force_acpi_ht,
1112          .ident = "IBM eserver xSeries 440",
1113          .matches = {
1114                      DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1115                      DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
1116                      },
1117          },
1118
1119         /*
1120          * Boxes that need ACPI PCI IRQ routing disabled
1121          */
1122         {
1123          .callback = disable_acpi_irq,
1124          .ident = "ASUS A7V",
1125          .matches = {
1126                      DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
1127                      DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
1128                      /* newer BIOS, Revision 1011, does work */
1129                      DMI_MATCH(DMI_BIOS_VERSION,
1130                                "ASUS A7V ACPI BIOS Revision 1007"),
1131                      },
1132          },
1133
1134         /*
1135          * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
1136          */
1137         {                       /* _BBN 0 bug */
1138          .callback = disable_acpi_pci,
1139          .ident = "ASUS PR-DLS",
1140          .matches = {
1141                      DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1142                      DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
1143                      DMI_MATCH(DMI_BIOS_VERSION,
1144                                "ASUS PR-DLS ACPI BIOS Revision 1010"),
1145                      DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
1146                      },
1147          },
1148         {
1149          .callback = disable_acpi_pci,
1150          .ident = "Acer TravelMate 36x Laptop",
1151          .matches = {
1152                      DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1153                      DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
1154                      },
1155          },
1156         {}
1157 };
1158
1159 #endif                          /* __i386__ */
1160
1161 /*
1162  * acpi_boot_table_init() and acpi_boot_init()
1163  *  called from setup_arch(), always.
1164  *      1. checksums all tables
1165  *      2. enumerates lapics
1166  *      3. enumerates io-apics
1167  *
1168  * acpi_table_init() is separate to allow reading SRAT without
1169  * other side effects.
1170  *
1171  * side effects of acpi_boot_init:
1172  *      acpi_lapic = 1 if LAPIC found
1173  *      acpi_ioapic = 1 if IOAPIC found
1174  *      if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
1175  *      if acpi_blacklisted() acpi_disabled = 1;
1176  *      acpi_irq_model=...
1177  *      ...
1178  *
1179  * return value: (currently ignored)
1180  *      0: success
1181  *      !0: failure
1182  */
1183
1184 int __init acpi_boot_table_init(void)
1185 {
1186         int error;
1187
1188 #ifdef __i386__
1189         dmi_check_system(acpi_dmi_table);
1190 #endif
1191
1192         /*
1193          * If acpi_disabled, bail out
1194          * One exception: acpi=ht continues far enough to enumerate LAPICs
1195          */
1196         if (acpi_disabled && !acpi_ht)
1197                 return 1;
1198
1199         /* 
1200          * Initialize the ACPI boot-time table parser.
1201          */
1202         error = acpi_table_init();
1203         if (error) {
1204                 disable_acpi();
1205                 return error;
1206         }
1207
1208         acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
1209
1210         /*
1211          * blacklist may disable ACPI entirely
1212          */
1213         error = acpi_blacklisted();
1214         if (error) {
1215                 if (acpi_force) {
1216                         printk(KERN_WARNING PREFIX "acpi=force override\n");
1217                 } else {
1218                         printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
1219                         disable_acpi();
1220                         return error;
1221                 }
1222         }
1223
1224         return 0;
1225 }
1226
1227 int __init acpi_boot_init(void)
1228 {
1229         /*
1230          * If acpi_disabled, bail out
1231          * One exception: acpi=ht continues far enough to enumerate LAPICs
1232          */
1233         if (acpi_disabled && !acpi_ht)
1234                 return 1;
1235
1236         acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
1237
1238         /*
1239          * set sci_int and PM timer address
1240          */
1241         acpi_table_parse(ACPI_FADT, acpi_parse_fadt);
1242
1243         /*
1244          * Process the Multiple APIC Description Table (MADT), if present
1245          */
1246         acpi_process_madt();
1247
1248         acpi_table_parse(ACPI_HPET, acpi_parse_hpet);
1249
1250         return 0;
1251 }
1252
1253 static int __init parse_acpi(char *arg)
1254 {
1255         if (!arg)
1256                 return -EINVAL;
1257
1258         /* "acpi=off" disables both ACPI table parsing and interpreter */
1259         if (strcmp(arg, "off") == 0) {
1260                 disable_acpi();
1261         }
1262         /* acpi=force to over-ride black-list */
1263         else if (strcmp(arg, "force") == 0) {
1264                 acpi_force = 1;
1265                 acpi_ht = 1;
1266                 acpi_disabled = 0;
1267         }
1268         /* acpi=strict disables out-of-spec workarounds */
1269         else if (strcmp(arg, "strict") == 0) {
1270                 acpi_strict = 1;
1271         }
1272         /* Limit ACPI just to boot-time to enable HT */
1273         else if (strcmp(arg, "ht") == 0) {
1274                 if (!acpi_force)
1275                         disable_acpi();
1276                 acpi_ht = 1;
1277         }
1278         /* "acpi=noirq" disables ACPI interrupt routing */
1279         else if (strcmp(arg, "noirq") == 0) {
1280                 acpi_noirq_set();
1281         } else {
1282                 /* Core will printk when we return error. */
1283                 return -EINVAL;
1284         }
1285         return 0;
1286 }
1287 early_param("acpi", parse_acpi);
1288
1289 /* FIXME: Using pci= for an ACPI parameter is a travesty. */
1290 static int __init parse_pci(char *arg)
1291 {
1292         if (arg && strcmp(arg, "noacpi") == 0)
1293                 acpi_disable_pci();
1294         return 0;
1295 }
1296 early_param("pci", parse_pci);
1297
1298 #ifdef CONFIG_X86_IO_APIC
1299 static int __init parse_acpi_skip_timer_override(char *arg)
1300 {
1301         acpi_skip_timer_override = 1;
1302         return 0;
1303 }
1304 early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override);
1305 #endif /* CONFIG_X86_IO_APIC */
1306
1307 static int __init setup_acpi_sci(char *s)
1308 {
1309         if (!s)
1310                 return -EINVAL;
1311         if (!strcmp(s, "edge"))
1312                 acpi_sci_flags.trigger = 1;
1313         else if (!strcmp(s, "level"))
1314                 acpi_sci_flags.trigger = 3;
1315         else if (!strcmp(s, "high"))
1316                 acpi_sci_flags.polarity = 1;
1317         else if (!strcmp(s, "low"))
1318                 acpi_sci_flags.polarity = 3;
1319         else
1320                 return -EINVAL;
1321         return 0;
1322 }
1323 early_param("acpi_sci", setup_acpi_sci);