Merge branches 'release' and 'fluff' into release
[safe/jmp/linux-2.6] / drivers / acpi / osl.c
1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *
8  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9  *
10  *  This program is free software; you can redistribute it and/or modify
11  *  it under the terms of the GNU General Public License as published by
12  *  the Free Software Foundation; either version 2 of the License, or
13  *  (at your option) any later version.
14  *
15  *  This program is distributed in the hope that it will be useful,
16  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *  GNU General Public License for more details.
19  *
20  *  You should have received a copy of the GNU General Public License
21  *  along with this program; if not, write to the Free Software
22  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23  *
24  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25  *
26  */
27
28 #include <linux/module.h>
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/mm.h>
32 #include <linux/pci.h>
33 #include <linux/interrupt.h>
34 #include <linux/kmod.h>
35 #include <linux/delay.h>
36 #include <linux/dmi.h>
37 #include <linux/workqueue.h>
38 #include <linux/nmi.h>
39 #include <linux/acpi.h>
40 #include <acpi/acpi.h>
41 #include <asm/io.h>
42 #include <acpi/acpi_bus.h>
43 #include <acpi/processor.h>
44 #include <asm/uaccess.h>
45
46 #include <linux/efi.h>
47 #include <linux/ioport.h>
48 #include <linux/list.h>
49
50 #define _COMPONENT              ACPI_OS_SERVICES
51 ACPI_MODULE_NAME("osl");
52 #define PREFIX          "ACPI: "
53 struct acpi_os_dpc {
54         acpi_osd_exec_callback function;
55         void *context;
56         struct work_struct work;
57 };
58
59 #ifdef CONFIG_ACPI_CUSTOM_DSDT
60 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
61 #endif
62
63 #ifdef ENABLE_DEBUGGER
64 #include <linux/kdb.h>
65
66 /* stuff for debugger support */
67 int acpi_in_debugger;
68 EXPORT_SYMBOL(acpi_in_debugger);
69
70 extern char line_buf[80];
71 #endif                          /*ENABLE_DEBUGGER */
72
73 static unsigned int acpi_irq_irq;
74 static acpi_osd_handler acpi_irq_handler;
75 static void *acpi_irq_context;
76 static struct workqueue_struct *kacpid_wq;
77 static struct workqueue_struct *kacpi_notify_wq;
78
79 struct acpi_res_list {
80         resource_size_t start;
81         resource_size_t end;
82         acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
83         char name[5];   /* only can have a length of 4 chars, make use of this
84                            one instead of res->name, no need to kalloc then */
85         struct list_head resource_list;
86 };
87
88 static LIST_HEAD(resource_list_head);
89 static DEFINE_SPINLOCK(acpi_res_lock);
90
91 #define OSI_STRING_LENGTH_MAX 64        /* arbitrary */
92 static char osi_additional_string[OSI_STRING_LENGTH_MAX];
93
94 /*
95  * "Ode to _OSI(Linux)"
96  *
97  * osi_linux -- Control response to BIOS _OSI(Linux) query.
98  *
99  * As Linux evolves, the features that it supports change.
100  * So an OSI string such as "Linux" is not specific enough
101  * to be useful across multiple versions of Linux.  It
102  * doesn't identify any particular feature, interface,
103  * or even any particular version of Linux...
104  *
105  * Unfortunately, Linux-2.6.22 and earlier responded "yes"
106  * to a BIOS _OSI(Linux) query.  When
107  * a reference mobile BIOS started using it, its use
108  * started to spread to many vendor platforms.
109  * As it is not supportable, we need to halt that spread.
110  *
111  * Today, most BIOS references to _OSI(Linux) are noise --
112  * they have no functional effect and are just dead code
113  * carried over from the reference BIOS.
114  *
115  * The next most common case is that _OSI(Linux) harms Linux,
116  * usually by causing the BIOS to follow paths that are
117  * not tested during Windows validation.
118  *
119  * Finally, there is a short list of platforms
120  * where OSI(Linux) benefits Linux.
121  *
122  * In Linux-2.6.23, OSI(Linux) is first disabled by default.
123  * DMI is used to disable the dmesg warning about OSI(Linux)
124  * on platforms where it is known to have no effect.
125  * But a dmesg warning remains for systems where
126  * we do not know if OSI(Linux) is good or bad for the system.
127  * DMI is also used to enable OSI(Linux) for the machines
128  * that are known to need it.
129  *
130  * BIOS writers should NOT query _OSI(Linux) on future systems.
131  * It will be ignored by default, and to get Linux to
132  * not ignore it will require a kernel source update to
133  * add a DMI entry, or a boot-time "acpi_osi=Linux" invocation.
134  */
135 #define OSI_LINUX_ENABLE 0
136
137 static struct osi_linux {
138         unsigned int    enable:1;
139         unsigned int    dmi:1;
140         unsigned int    cmdline:1;
141         unsigned int    known:1;
142 } osi_linux = { OSI_LINUX_ENABLE, 0, 0, 0};
143
144 static void __init acpi_request_region (struct acpi_generic_address *addr,
145         unsigned int length, char *desc)
146 {
147         struct resource *res;
148
149         if (!addr->address || !length)
150                 return;
151
152         if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
153                 res = request_region(addr->address, length, desc);
154         else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
155                 res = request_mem_region(addr->address, length, desc);
156 }
157
158 static int __init acpi_reserve_resources(void)
159 {
160         acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
161                 "ACPI PM1a_EVT_BLK");
162
163         acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
164                 "ACPI PM1b_EVT_BLK");
165
166         acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
167                 "ACPI PM1a_CNT_BLK");
168
169         acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
170                 "ACPI PM1b_CNT_BLK");
171
172         if (acpi_gbl_FADT.pm_timer_length == 4)
173                 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
174
175         acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
176                 "ACPI PM2_CNT_BLK");
177
178         /* Length of GPE blocks must be a non-negative multiple of 2 */
179
180         if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
181                 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
182                                acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
183
184         if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
185                 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
186                                acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
187
188         return 0;
189 }
190 device_initcall(acpi_reserve_resources);
191
192 acpi_status __init acpi_os_initialize(void)
193 {
194         return AE_OK;
195 }
196
197 acpi_status acpi_os_initialize1(void)
198 {
199         /*
200          * Initialize PCI configuration space access, as we'll need to access
201          * it while walking the namespace (bus 0 and root bridges w/ _BBNs).
202          */
203         if (!raw_pci_ops) {
204                 printk(KERN_ERR PREFIX
205                        "Access to PCI configuration space unavailable\n");
206                 return AE_NULL_ENTRY;
207         }
208         kacpid_wq = create_singlethread_workqueue("kacpid");
209         kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
210         BUG_ON(!kacpid_wq);
211         BUG_ON(!kacpi_notify_wq);
212         return AE_OK;
213 }
214
215 acpi_status acpi_os_terminate(void)
216 {
217         if (acpi_irq_handler) {
218                 acpi_os_remove_interrupt_handler(acpi_irq_irq,
219                                                  acpi_irq_handler);
220         }
221
222         destroy_workqueue(kacpid_wq);
223         destroy_workqueue(kacpi_notify_wq);
224
225         return AE_OK;
226 }
227
228 void acpi_os_printf(const char *fmt, ...)
229 {
230         va_list args;
231         va_start(args, fmt);
232         acpi_os_vprintf(fmt, args);
233         va_end(args);
234 }
235
236 void acpi_os_vprintf(const char *fmt, va_list args)
237 {
238         static char buffer[512];
239
240         vsprintf(buffer, fmt, args);
241
242 #ifdef ENABLE_DEBUGGER
243         if (acpi_in_debugger) {
244                 kdb_printf("%s", buffer);
245         } else {
246                 printk("%s", buffer);
247         }
248 #else
249         printk("%s", buffer);
250 #endif
251 }
252
253 acpi_physical_address __init acpi_os_get_root_pointer(void)
254 {
255         if (efi_enabled) {
256                 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
257                         return efi.acpi20;
258                 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
259                         return efi.acpi;
260                 else {
261                         printk(KERN_ERR PREFIX
262                                "System description tables not found\n");
263                         return 0;
264                 }
265         } else {
266                 acpi_physical_address pa = 0;
267
268                 acpi_find_root_pointer(&pa);
269                 return pa;
270         }
271 }
272
273 void __iomem *__init_refok
274 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
275 {
276         if (phys > ULONG_MAX) {
277                 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
278                 return NULL;
279         }
280         if (acpi_gbl_permanent_mmap)
281                 /*
282                 * ioremap checks to ensure this is in reserved space
283                 */
284                 return ioremap((unsigned long)phys, size);
285         else
286                 return __acpi_map_table((unsigned long)phys, size);
287 }
288 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
289
290 void acpi_os_unmap_memory(void __iomem * virt, acpi_size size)
291 {
292         if (acpi_gbl_permanent_mmap) {
293                 iounmap(virt);
294         }
295 }
296 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
297
298 #ifdef ACPI_FUTURE_USAGE
299 acpi_status
300 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
301 {
302         if (!phys || !virt)
303                 return AE_BAD_PARAMETER;
304
305         *phys = virt_to_phys(virt);
306
307         return AE_OK;
308 }
309 #endif
310
311 #define ACPI_MAX_OVERRIDE_LEN 100
312
313 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
314
315 acpi_status
316 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
317                             acpi_string * new_val)
318 {
319         if (!init_val || !new_val)
320                 return AE_BAD_PARAMETER;
321
322         *new_val = NULL;
323         if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
324                 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
325                        acpi_os_name);
326                 *new_val = acpi_os_name;
327         }
328
329         return AE_OK;
330 }
331
332 acpi_status
333 acpi_os_table_override(struct acpi_table_header * existing_table,
334                        struct acpi_table_header ** new_table)
335 {
336         if (!existing_table || !new_table)
337                 return AE_BAD_PARAMETER;
338
339 #ifdef CONFIG_ACPI_CUSTOM_DSDT
340         if (strncmp(existing_table->signature, "DSDT", 4) == 0)
341                 *new_table = (struct acpi_table_header *)AmlCode;
342         else
343                 *new_table = NULL;
344 #else
345         *new_table = NULL;
346 #endif
347         return AE_OK;
348 }
349
350 static irqreturn_t acpi_irq(int irq, void *dev_id)
351 {
352         u32 handled;
353
354         handled = (*acpi_irq_handler) (acpi_irq_context);
355
356         if (handled) {
357                 acpi_irq_handled++;
358                 return IRQ_HANDLED;
359         } else
360                 return IRQ_NONE;
361 }
362
363 acpi_status
364 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
365                                   void *context)
366 {
367         unsigned int irq;
368
369         acpi_irq_stats_init();
370
371         /*
372          * Ignore the GSI from the core, and use the value in our copy of the
373          * FADT. It may not be the same if an interrupt source override exists
374          * for the SCI.
375          */
376         gsi = acpi_gbl_FADT.sci_interrupt;
377         if (acpi_gsi_to_irq(gsi, &irq) < 0) {
378                 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
379                        gsi);
380                 return AE_OK;
381         }
382
383         acpi_irq_handler = handler;
384         acpi_irq_context = context;
385         if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
386                 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
387                 return AE_NOT_ACQUIRED;
388         }
389         acpi_irq_irq = irq;
390
391         return AE_OK;
392 }
393
394 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
395 {
396         if (irq) {
397                 free_irq(irq, acpi_irq);
398                 acpi_irq_handler = NULL;
399                 acpi_irq_irq = 0;
400         }
401
402         return AE_OK;
403 }
404
405 /*
406  * Running in interpreter thread context, safe to sleep
407  */
408
409 void acpi_os_sleep(acpi_integer ms)
410 {
411         schedule_timeout_interruptible(msecs_to_jiffies(ms));
412 }
413
414 void acpi_os_stall(u32 us)
415 {
416         while (us) {
417                 u32 delay = 1000;
418
419                 if (delay > us)
420                         delay = us;
421                 udelay(delay);
422                 touch_nmi_watchdog();
423                 us -= delay;
424         }
425 }
426
427 /*
428  * Support ACPI 3.0 AML Timer operand
429  * Returns 64-bit free-running, monotonically increasing timer
430  * with 100ns granularity
431  */
432 u64 acpi_os_get_timer(void)
433 {
434         static u64 t;
435
436 #ifdef  CONFIG_HPET
437         /* TBD: use HPET if available */
438 #endif
439
440 #ifdef  CONFIG_X86_PM_TIMER
441         /* TBD: default to PM timer if HPET was not available */
442 #endif
443         if (!t)
444                 printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n");
445
446         return ++t;
447 }
448
449 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
450 {
451         u32 dummy;
452
453         if (!value)
454                 value = &dummy;
455
456         *value = 0;
457         if (width <= 8) {
458                 *(u8 *) value = inb(port);
459         } else if (width <= 16) {
460                 *(u16 *) value = inw(port);
461         } else if (width <= 32) {
462                 *(u32 *) value = inl(port);
463         } else {
464                 BUG();
465         }
466
467         return AE_OK;
468 }
469
470 EXPORT_SYMBOL(acpi_os_read_port);
471
472 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
473 {
474         if (width <= 8) {
475                 outb(value, port);
476         } else if (width <= 16) {
477                 outw(value, port);
478         } else if (width <= 32) {
479                 outl(value, port);
480         } else {
481                 BUG();
482         }
483
484         return AE_OK;
485 }
486
487 EXPORT_SYMBOL(acpi_os_write_port);
488
489 acpi_status
490 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
491 {
492         u32 dummy;
493         void __iomem *virt_addr;
494
495         virt_addr = ioremap(phys_addr, width);
496         if (!value)
497                 value = &dummy;
498
499         switch (width) {
500         case 8:
501                 *(u8 *) value = readb(virt_addr);
502                 break;
503         case 16:
504                 *(u16 *) value = readw(virt_addr);
505                 break;
506         case 32:
507                 *(u32 *) value = readl(virt_addr);
508                 break;
509         default:
510                 BUG();
511         }
512
513         iounmap(virt_addr);
514
515         return AE_OK;
516 }
517
518 acpi_status
519 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
520 {
521         void __iomem *virt_addr;
522
523         virt_addr = ioremap(phys_addr, width);
524
525         switch (width) {
526         case 8:
527                 writeb(value, virt_addr);
528                 break;
529         case 16:
530                 writew(value, virt_addr);
531                 break;
532         case 32:
533                 writel(value, virt_addr);
534                 break;
535         default:
536                 BUG();
537         }
538
539         iounmap(virt_addr);
540
541         return AE_OK;
542 }
543
544 acpi_status
545 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
546                                void *value, u32 width)
547 {
548         int result, size;
549
550         if (!value)
551                 return AE_BAD_PARAMETER;
552
553         switch (width) {
554         case 8:
555                 size = 1;
556                 break;
557         case 16:
558                 size = 2;
559                 break;
560         case 32:
561                 size = 4;
562                 break;
563         default:
564                 return AE_ERROR;
565         }
566
567         BUG_ON(!raw_pci_ops);
568
569         result = raw_pci_ops->read(pci_id->segment, pci_id->bus,
570                                    PCI_DEVFN(pci_id->device, pci_id->function),
571                                    reg, size, value);
572
573         return (result ? AE_ERROR : AE_OK);
574 }
575
576 acpi_status
577 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
578                                 acpi_integer value, u32 width)
579 {
580         int result, size;
581
582         switch (width) {
583         case 8:
584                 size = 1;
585                 break;
586         case 16:
587                 size = 2;
588                 break;
589         case 32:
590                 size = 4;
591                 break;
592         default:
593                 return AE_ERROR;
594         }
595
596         BUG_ON(!raw_pci_ops);
597
598         result = raw_pci_ops->write(pci_id->segment, pci_id->bus,
599                                     PCI_DEVFN(pci_id->device, pci_id->function),
600                                     reg, size, value);
601
602         return (result ? AE_ERROR : AE_OK);
603 }
604
605 /* TODO: Change code to take advantage of driver model more */
606 static void acpi_os_derive_pci_id_2(acpi_handle rhandle,        /* upper bound  */
607                                     acpi_handle chandle,        /* current node */
608                                     struct acpi_pci_id **id,
609                                     int *is_bridge, u8 * bus_number)
610 {
611         acpi_handle handle;
612         struct acpi_pci_id *pci_id = *id;
613         acpi_status status;
614         unsigned long temp;
615         acpi_object_type type;
616         u8 tu8;
617
618         acpi_get_parent(chandle, &handle);
619         if (handle != rhandle) {
620                 acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge,
621                                         bus_number);
622
623                 status = acpi_get_type(handle, &type);
624                 if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE))
625                         return;
626
627                 status =
628                     acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
629                                           &temp);
630                 if (ACPI_SUCCESS(status)) {
631                         pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp));
632                         pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp));
633
634                         if (*is_bridge)
635                                 pci_id->bus = *bus_number;
636
637                         /* any nicer way to get bus number of bridge ? */
638                         status =
639                             acpi_os_read_pci_configuration(pci_id, 0x0e, &tu8,
640                                                            8);
641                         if (ACPI_SUCCESS(status)
642                             && ((tu8 & 0x7f) == 1 || (tu8 & 0x7f) == 2)) {
643                                 status =
644                                     acpi_os_read_pci_configuration(pci_id, 0x18,
645                                                                    &tu8, 8);
646                                 if (!ACPI_SUCCESS(status)) {
647                                         /* Certainly broken...  FIX ME */
648                                         return;
649                                 }
650                                 *is_bridge = 1;
651                                 pci_id->bus = tu8;
652                                 status =
653                                     acpi_os_read_pci_configuration(pci_id, 0x19,
654                                                                    &tu8, 8);
655                                 if (ACPI_SUCCESS(status)) {
656                                         *bus_number = tu8;
657                                 }
658                         } else
659                                 *is_bridge = 0;
660                 }
661         }
662 }
663
664 void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound  */
665                            acpi_handle chandle, /* current node */
666                            struct acpi_pci_id **id)
667 {
668         int is_bridge = 1;
669         u8 bus_number = (*id)->bus;
670
671         acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
672 }
673
674 static void acpi_os_execute_deferred(struct work_struct *work)
675 {
676         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
677         if (!dpc) {
678                 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
679                 return;
680         }
681
682         dpc->function(dpc->context);
683         kfree(dpc);
684
685         return;
686 }
687
688 /*******************************************************************************
689  *
690  * FUNCTION:    acpi_os_execute
691  *
692  * PARAMETERS:  Type               - Type of the callback
693  *              Function           - Function to be executed
694  *              Context            - Function parameters
695  *
696  * RETURN:      Status
697  *
698  * DESCRIPTION: Depending on type, either queues function for deferred execution or
699  *              immediately executes function on a separate thread.
700  *
701  ******************************************************************************/
702
703 acpi_status acpi_os_execute(acpi_execute_type type,
704                             acpi_osd_exec_callback function, void *context)
705 {
706         acpi_status status = AE_OK;
707         struct acpi_os_dpc *dpc;
708         struct workqueue_struct *queue;
709         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
710                           "Scheduling function [%p(%p)] for deferred execution.\n",
711                           function, context));
712
713         if (!function)
714                 return AE_BAD_PARAMETER;
715
716         /*
717          * Allocate/initialize DPC structure.  Note that this memory will be
718          * freed by the callee.  The kernel handles the work_struct list  in a
719          * way that allows us to also free its memory inside the callee.
720          * Because we may want to schedule several tasks with different
721          * parameters we can't use the approach some kernel code uses of
722          * having a static work_struct.
723          */
724
725         dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
726         if (!dpc)
727                 return_ACPI_STATUS(AE_NO_MEMORY);
728
729         dpc->function = function;
730         dpc->context = context;
731
732         INIT_WORK(&dpc->work, acpi_os_execute_deferred);
733         queue = (type == OSL_NOTIFY_HANDLER) ? kacpi_notify_wq : kacpid_wq;
734         if (!queue_work(queue, &dpc->work)) {
735                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
736                           "Call to queue_work() failed.\n"));
737                 status = AE_ERROR;
738                 kfree(dpc);
739         }
740         return_ACPI_STATUS(status);
741 }
742
743 EXPORT_SYMBOL(acpi_os_execute);
744
745 void acpi_os_wait_events_complete(void *context)
746 {
747         flush_workqueue(kacpid_wq);
748 }
749
750 EXPORT_SYMBOL(acpi_os_wait_events_complete);
751
752 /*
753  * Allocate the memory for a spinlock and initialize it.
754  */
755 acpi_status acpi_os_create_lock(acpi_spinlock * handle)
756 {
757         spin_lock_init(*handle);
758
759         return AE_OK;
760 }
761
762 /*
763  * Deallocate the memory for a spinlock.
764  */
765 void acpi_os_delete_lock(acpi_spinlock handle)
766 {
767         return;
768 }
769
770 acpi_status
771 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
772 {
773         struct semaphore *sem = NULL;
774
775
776         sem = acpi_os_allocate(sizeof(struct semaphore));
777         if (!sem)
778                 return AE_NO_MEMORY;
779         memset(sem, 0, sizeof(struct semaphore));
780
781         sema_init(sem, initial_units);
782
783         *handle = (acpi_handle *) sem;
784
785         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
786                           *handle, initial_units));
787
788         return AE_OK;
789 }
790
791 /*
792  * TODO: A better way to delete semaphores?  Linux doesn't have a
793  * 'delete_semaphore()' function -- may result in an invalid
794  * pointer dereference for non-synchronized consumers.  Should
795  * we at least check for blocked threads and signal/cancel them?
796  */
797
798 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
799 {
800         struct semaphore *sem = (struct semaphore *)handle;
801
802
803         if (!sem)
804                 return AE_BAD_PARAMETER;
805
806         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
807
808         kfree(sem);
809         sem = NULL;
810
811         return AE_OK;
812 }
813
814 /*
815  * TODO: The kernel doesn't have a 'down_timeout' function -- had to
816  * improvise.  The process is to sleep for one scheduler quantum
817  * until the semaphore becomes available.  Downside is that this
818  * may result in starvation for timeout-based waits when there's
819  * lots of semaphore activity.
820  *
821  * TODO: Support for units > 1?
822  */
823 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
824 {
825         acpi_status status = AE_OK;
826         struct semaphore *sem = (struct semaphore *)handle;
827         int ret = 0;
828
829
830         if (!sem || (units < 1))
831                 return AE_BAD_PARAMETER;
832
833         if (units > 1)
834                 return AE_SUPPORT;
835
836         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
837                           handle, units, timeout));
838
839         /*
840          * This can be called during resume with interrupts off.
841          * Like boot-time, we should be single threaded and will
842          * always get the lock if we try -- timeout or not.
843          * If this doesn't succeed, then we will oops courtesy of
844          * might_sleep() in down().
845          */
846         if (!down_trylock(sem))
847                 return AE_OK;
848
849         switch (timeout) {
850                 /*
851                  * No Wait:
852                  * --------
853                  * A zero timeout value indicates that we shouldn't wait - just
854                  * acquire the semaphore if available otherwise return AE_TIME
855                  * (a.k.a. 'would block').
856                  */
857         case 0:
858                 if (down_trylock(sem))
859                         status = AE_TIME;
860                 break;
861
862                 /*
863                  * Wait Indefinitely:
864                  * ------------------
865                  */
866         case ACPI_WAIT_FOREVER:
867                 down(sem);
868                 break;
869
870                 /*
871                  * Wait w/ Timeout:
872                  * ----------------
873                  */
874         default:
875                 // TODO: A better timeout algorithm?
876                 {
877                         int i = 0;
878                         static const int quantum_ms = 1000 / HZ;
879
880                         ret = down_trylock(sem);
881                         for (i = timeout; (i > 0 && ret != 0); i -= quantum_ms) {
882                                 schedule_timeout_interruptible(1);
883                                 ret = down_trylock(sem);
884                         }
885
886                         if (ret != 0)
887                                 status = AE_TIME;
888                 }
889                 break;
890         }
891
892         if (ACPI_FAILURE(status)) {
893                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
894                                   "Failed to acquire semaphore[%p|%d|%d], %s",
895                                   handle, units, timeout,
896                                   acpi_format_exception(status)));
897         } else {
898                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
899                                   "Acquired semaphore[%p|%d|%d]", handle,
900                                   units, timeout));
901         }
902
903         return status;
904 }
905
906 /*
907  * TODO: Support for units > 1?
908  */
909 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
910 {
911         struct semaphore *sem = (struct semaphore *)handle;
912
913
914         if (!sem || (units < 1))
915                 return AE_BAD_PARAMETER;
916
917         if (units > 1)
918                 return AE_SUPPORT;
919
920         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
921                           units));
922
923         up(sem);
924
925         return AE_OK;
926 }
927
928 #ifdef ACPI_FUTURE_USAGE
929 u32 acpi_os_get_line(char *buffer)
930 {
931
932 #ifdef ENABLE_DEBUGGER
933         if (acpi_in_debugger) {
934                 u32 chars;
935
936                 kdb_read(buffer, sizeof(line_buf));
937
938                 /* remove the CR kdb includes */
939                 chars = strlen(buffer) - 1;
940                 buffer[chars] = '\0';
941         }
942 #endif
943
944         return 0;
945 }
946 #endif                          /*  ACPI_FUTURE_USAGE  */
947
948 acpi_status acpi_os_signal(u32 function, void *info)
949 {
950         switch (function) {
951         case ACPI_SIGNAL_FATAL:
952                 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
953                 break;
954         case ACPI_SIGNAL_BREAKPOINT:
955                 /*
956                  * AML Breakpoint
957                  * ACPI spec. says to treat it as a NOP unless
958                  * you are debugging.  So if/when we integrate
959                  * AML debugger into the kernel debugger its
960                  * hook will go here.  But until then it is
961                  * not useful to print anything on breakpoints.
962                  */
963                 break;
964         default:
965                 break;
966         }
967
968         return AE_OK;
969 }
970
971 static int __init acpi_os_name_setup(char *str)
972 {
973         char *p = acpi_os_name;
974         int count = ACPI_MAX_OVERRIDE_LEN - 1;
975
976         if (!str || !*str)
977                 return 0;
978
979         for (; count-- && str && *str; str++) {
980                 if (isalnum(*str) || *str == ' ' || *str == ':')
981                         *p++ = *str;
982                 else if (*str == '\'' || *str == '"')
983                         continue;
984                 else
985                         break;
986         }
987         *p = 0;
988
989         return 1;
990
991 }
992
993 __setup("acpi_os_name=", acpi_os_name_setup);
994
995 static void __init set_osi_linux(unsigned int enable)
996 {
997         if (osi_linux.enable != enable) {
998                 osi_linux.enable = enable;
999                 printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n",
1000                         enable ? "Add": "Delet");
1001         }
1002         return;
1003 }
1004
1005 static void __init acpi_cmdline_osi_linux(unsigned int enable)
1006 {
1007         osi_linux.cmdline = 1;  /* cmdline set the default */
1008         set_osi_linux(enable);
1009
1010         return;
1011 }
1012
1013 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1014 {
1015         osi_linux.dmi = 1;      /* DMI knows that this box asks OSI(Linux) */
1016
1017         printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1018
1019         if (enable == -1)
1020                 return;
1021
1022         osi_linux.known = 1;    /* DMI knows which OSI(Linux) default needed */
1023
1024         set_osi_linux(enable);
1025
1026         return;
1027 }
1028
1029 /*
1030  * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1031  *
1032  * empty string disables _OSI
1033  * string starting with '!' disables that string
1034  * otherwise string is added to list, augmenting built-in strings
1035  */
1036 static int __init acpi_osi_setup(char *str)
1037 {
1038         if (str == NULL || *str == '\0') {
1039                 printk(KERN_INFO PREFIX "_OSI method disabled\n");
1040                 acpi_gbl_create_osi_method = FALSE;
1041         } else if (!strcmp("!Linux", str)) {
1042                 acpi_cmdline_osi_linux(0);      /* !enable */
1043         } else if (*str == '!') {
1044                 if (acpi_osi_invalidate(++str) == AE_OK)
1045                         printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1046         } else if (!strcmp("Linux", str)) {
1047                 acpi_cmdline_osi_linux(1);      /* enable */
1048         } else if (*osi_additional_string == '\0') {
1049                 strncpy(osi_additional_string, str, OSI_STRING_LENGTH_MAX);
1050                 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1051         }
1052
1053         return 1;
1054 }
1055
1056 __setup("acpi_osi=", acpi_osi_setup);
1057
1058 /* enable serialization to combat AE_ALREADY_EXISTS errors */
1059 static int __init acpi_serialize_setup(char *str)
1060 {
1061         printk(KERN_INFO PREFIX "serialize enabled\n");
1062
1063         acpi_gbl_all_methods_serialized = TRUE;
1064
1065         return 1;
1066 }
1067
1068 __setup("acpi_serialize", acpi_serialize_setup);
1069
1070 /*
1071  * Wake and Run-Time GPES are expected to be separate.
1072  * We disable wake-GPEs at run-time to prevent spurious
1073  * interrupts.
1074  *
1075  * However, if a system exists that shares Wake and
1076  * Run-time events on the same GPE this flag is available
1077  * to tell Linux to keep the wake-time GPEs enabled at run-time.
1078  */
1079 static int __init acpi_wake_gpes_always_on_setup(char *str)
1080 {
1081         printk(KERN_INFO PREFIX "wake GPEs not disabled\n");
1082
1083         acpi_gbl_leave_wake_gpes_disabled = FALSE;
1084
1085         return 1;
1086 }
1087
1088 __setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
1089
1090 /* Check of resource interference between native drivers and ACPI
1091  * OperationRegions (SystemIO and System Memory only).
1092  * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1093  * in arbitrary AML code and can interfere with legacy drivers.
1094  * acpi_enforce_resources= can be set to:
1095  *
1096  *   - strict           (2)
1097  *     -> further driver trying to access the resources will not load
1098  *   - lax (default)    (1)
1099  *     -> further driver trying to access the resources will load, but you
1100  *     get a system message that something might go wrong...
1101  *
1102  *   - no               (0)
1103  *     -> ACPI Operation Region resources will not be registered
1104  *
1105  */
1106 #define ENFORCE_RESOURCES_STRICT 2
1107 #define ENFORCE_RESOURCES_LAX    1
1108 #define ENFORCE_RESOURCES_NO     0
1109
1110 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1111
1112 static int __init acpi_enforce_resources_setup(char *str)
1113 {
1114         if (str == NULL || *str == '\0')
1115                 return 0;
1116
1117         if (!strcmp("strict", str))
1118                 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1119         else if (!strcmp("lax", str))
1120                 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1121         else if (!strcmp("no", str))
1122                 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1123
1124         return 1;
1125 }
1126
1127 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1128
1129 /* Check for resource conflicts between ACPI OperationRegions and native
1130  * drivers */
1131 int acpi_check_resource_conflict(struct resource *res)
1132 {
1133         struct acpi_res_list *res_list_elem;
1134         int ioport;
1135         int clash = 0;
1136
1137         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1138                 return 0;
1139         if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1140                 return 0;
1141
1142         ioport = res->flags & IORESOURCE_IO;
1143
1144         spin_lock(&acpi_res_lock);
1145         list_for_each_entry(res_list_elem, &resource_list_head,
1146                             resource_list) {
1147                 if (ioport && (res_list_elem->resource_type
1148                                != ACPI_ADR_SPACE_SYSTEM_IO))
1149                         continue;
1150                 if (!ioport && (res_list_elem->resource_type
1151                                 != ACPI_ADR_SPACE_SYSTEM_MEMORY))
1152                         continue;
1153
1154                 if (res->end < res_list_elem->start
1155                     || res_list_elem->end < res->start)
1156                         continue;
1157                 clash = 1;
1158                 break;
1159         }
1160         spin_unlock(&acpi_res_lock);
1161
1162         if (clash) {
1163                 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1164                         printk(KERN_INFO "%sACPI: %s resource %s [0x%llx-0x%llx]"
1165                                " conflicts with ACPI region %s"
1166                                " [0x%llx-0x%llx]\n",
1167                                acpi_enforce_resources == ENFORCE_RESOURCES_LAX
1168                                ? KERN_WARNING : KERN_ERR,
1169                                ioport ? "I/O" : "Memory", res->name,
1170                                (long long) res->start, (long long) res->end,
1171                                res_list_elem->name,
1172                                (long long) res_list_elem->start,
1173                                (long long) res_list_elem->end);
1174                         printk(KERN_INFO "ACPI: Device needs an ACPI driver\n");
1175                 }
1176                 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1177                         return -EBUSY;
1178         }
1179         return 0;
1180 }
1181 EXPORT_SYMBOL(acpi_check_resource_conflict);
1182
1183 int acpi_check_region(resource_size_t start, resource_size_t n,
1184                       const char *name)
1185 {
1186         struct resource res = {
1187                 .start = start,
1188                 .end   = start + n - 1,
1189                 .name  = name,
1190                 .flags = IORESOURCE_IO,
1191         };
1192
1193         return acpi_check_resource_conflict(&res);
1194 }
1195 EXPORT_SYMBOL(acpi_check_region);
1196
1197 int acpi_check_mem_region(resource_size_t start, resource_size_t n,
1198                       const char *name)
1199 {
1200         struct resource res = {
1201                 .start = start,
1202                 .end   = start + n - 1,
1203                 .name  = name,
1204                 .flags = IORESOURCE_MEM,
1205         };
1206
1207         return acpi_check_resource_conflict(&res);
1208
1209 }
1210 EXPORT_SYMBOL(acpi_check_mem_region);
1211
1212 /*
1213  * Acquire a spinlock.
1214  *
1215  * handle is a pointer to the spinlock_t.
1216  */
1217
1218 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1219 {
1220         acpi_cpu_flags flags;
1221         spin_lock_irqsave(lockp, flags);
1222         return flags;
1223 }
1224
1225 /*
1226  * Release a spinlock. See above.
1227  */
1228
1229 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1230 {
1231         spin_unlock_irqrestore(lockp, flags);
1232 }
1233
1234 #ifndef ACPI_USE_LOCAL_CACHE
1235
1236 /*******************************************************************************
1237  *
1238  * FUNCTION:    acpi_os_create_cache
1239  *
1240  * PARAMETERS:  name      - Ascii name for the cache
1241  *              size      - Size of each cached object
1242  *              depth     - Maximum depth of the cache (in objects) <ignored>
1243  *              cache     - Where the new cache object is returned
1244  *
1245  * RETURN:      status
1246  *
1247  * DESCRIPTION: Create a cache object
1248  *
1249  ******************************************************************************/
1250
1251 acpi_status
1252 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1253 {
1254         *cache = kmem_cache_create(name, size, 0, 0, NULL);
1255         if (*cache == NULL)
1256                 return AE_ERROR;
1257         else
1258                 return AE_OK;
1259 }
1260
1261 /*******************************************************************************
1262  *
1263  * FUNCTION:    acpi_os_purge_cache
1264  *
1265  * PARAMETERS:  Cache           - Handle to cache object
1266  *
1267  * RETURN:      Status
1268  *
1269  * DESCRIPTION: Free all objects within the requested cache.
1270  *
1271  ******************************************************************************/
1272
1273 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1274 {
1275         kmem_cache_shrink(cache);
1276         return (AE_OK);
1277 }
1278
1279 /*******************************************************************************
1280  *
1281  * FUNCTION:    acpi_os_delete_cache
1282  *
1283  * PARAMETERS:  Cache           - Handle to cache object
1284  *
1285  * RETURN:      Status
1286  *
1287  * DESCRIPTION: Free all objects within the requested cache and delete the
1288  *              cache object.
1289  *
1290  ******************************************************************************/
1291
1292 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1293 {
1294         kmem_cache_destroy(cache);
1295         return (AE_OK);
1296 }
1297
1298 /*******************************************************************************
1299  *
1300  * FUNCTION:    acpi_os_release_object
1301  *
1302  * PARAMETERS:  Cache       - Handle to cache object
1303  *              Object      - The object to be released
1304  *
1305  * RETURN:      None
1306  *
1307  * DESCRIPTION: Release an object to the specified cache.  If cache is full,
1308  *              the object is deleted.
1309  *
1310  ******************************************************************************/
1311
1312 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1313 {
1314         kmem_cache_free(cache, object);
1315         return (AE_OK);
1316 }
1317
1318 /**
1319  *      acpi_dmi_dump - dump DMI slots needed for blacklist entry
1320  *
1321  *      Returns 0 on success
1322  */
1323 static int acpi_dmi_dump(void)
1324 {
1325
1326         if (!dmi_available)
1327                 return -1;
1328
1329         printk(KERN_NOTICE PREFIX "DMI System Vendor: %s\n",
1330                 dmi_get_system_info(DMI_SYS_VENDOR));
1331         printk(KERN_NOTICE PREFIX "DMI Product Name: %s\n",
1332                 dmi_get_system_info(DMI_PRODUCT_NAME));
1333         printk(KERN_NOTICE PREFIX "DMI Product Version: %s\n",
1334                 dmi_get_system_info(DMI_PRODUCT_VERSION));
1335         printk(KERN_NOTICE PREFIX "DMI Board Name: %s\n",
1336                 dmi_get_system_info(DMI_BOARD_NAME));
1337         printk(KERN_NOTICE PREFIX "DMI BIOS Vendor: %s\n",
1338                 dmi_get_system_info(DMI_BIOS_VENDOR));
1339         printk(KERN_NOTICE PREFIX "DMI BIOS Date: %s\n",
1340                 dmi_get_system_info(DMI_BIOS_DATE));
1341
1342         return 0;
1343 }
1344
1345
1346 /******************************************************************************
1347  *
1348  * FUNCTION:    acpi_os_validate_interface
1349  *
1350  * PARAMETERS:  interface           - Requested interface to be validated
1351  *
1352  * RETURN:      AE_OK if interface is supported, AE_SUPPORT otherwise
1353  *
1354  * DESCRIPTION: Match an interface string to the interfaces supported by the
1355  *              host. Strings originate from an AML call to the _OSI method.
1356  *
1357  *****************************************************************************/
1358
1359 acpi_status
1360 acpi_os_validate_interface (char *interface)
1361 {
1362         if (!strncmp(osi_additional_string, interface, OSI_STRING_LENGTH_MAX))
1363                 return AE_OK;
1364         if (!strcmp("Linux", interface)) {
1365
1366                 printk(KERN_NOTICE PREFIX
1367                         "BIOS _OSI(Linux) query %s%s\n",
1368                         osi_linux.enable ? "honored" : "ignored",
1369                         osi_linux.cmdline ? " via cmdline" :
1370                         osi_linux.dmi ? " via DMI" : "");
1371
1372                 if (!osi_linux.dmi) {
1373                         if (acpi_dmi_dump())
1374                                 printk(KERN_NOTICE PREFIX
1375                                         "[please extract dmidecode output]\n");
1376                         printk(KERN_NOTICE PREFIX
1377                                 "Please send DMI info above to "
1378                                 "linux-acpi@vger.kernel.org\n");
1379                 }
1380                 if (!osi_linux.known && !osi_linux.cmdline) {
1381                         printk(KERN_NOTICE PREFIX
1382                                 "If \"acpi_osi=%sLinux\" works better, "
1383                                 "please notify linux-acpi@vger.kernel.org\n",
1384                                 osi_linux.enable ? "!" : "");
1385                 }
1386
1387                 if (osi_linux.enable)
1388                         return AE_OK;
1389         }
1390         return AE_SUPPORT;
1391 }
1392
1393 /******************************************************************************
1394  *
1395  * FUNCTION:    acpi_os_validate_address
1396  *
1397  * PARAMETERS:  space_id             - ACPI space ID
1398  *              address             - Physical address
1399  *              length              - Address length
1400  *
1401  * RETURN:      AE_OK if address/length is valid for the space_id. Otherwise,
1402  *              should return AE_AML_ILLEGAL_ADDRESS.
1403  *
1404  * DESCRIPTION: Validate a system address via the host OS. Used to validate
1405  *              the addresses accessed by AML operation regions.
1406  *
1407  *****************************************************************************/
1408
1409 acpi_status
1410 acpi_os_validate_address (
1411     u8                   space_id,
1412     acpi_physical_address   address,
1413     acpi_size               length,
1414     char *name)
1415 {
1416         struct acpi_res_list *res;
1417         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1418                 return AE_OK;
1419
1420         switch (space_id) {
1421         case ACPI_ADR_SPACE_SYSTEM_IO:
1422         case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1423                 /* Only interference checks against SystemIO and SytemMemory
1424                    are needed */
1425                 res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
1426                 if (!res)
1427                         return AE_OK;
1428                 /* ACPI names are fixed to 4 bytes, still better use strlcpy */
1429                 strlcpy(res->name, name, 5);
1430                 res->start = address;
1431                 res->end = address + length - 1;
1432                 res->resource_type = space_id;
1433                 spin_lock(&acpi_res_lock);
1434                 list_add(&res->resource_list, &resource_list_head);
1435                 spin_unlock(&acpi_res_lock);
1436                 pr_debug("Added %s resource: start: 0x%llx, end: 0x%llx, "
1437                          "name: %s\n", (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
1438                          ? "SystemIO" : "System Memory",
1439                          (unsigned long long)res->start,
1440                          (unsigned long long)res->end,
1441                          res->name);
1442                 break;
1443         case ACPI_ADR_SPACE_PCI_CONFIG:
1444         case ACPI_ADR_SPACE_EC:
1445         case ACPI_ADR_SPACE_SMBUS:
1446         case ACPI_ADR_SPACE_CMOS:
1447         case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1448         case ACPI_ADR_SPACE_DATA_TABLE:
1449         case ACPI_ADR_SPACE_FIXED_HARDWARE:
1450                 break;
1451         }
1452         return AE_OK;
1453 }
1454
1455 #endif