Merge branches 'release' and 'hwmon-conflicts' into release
[safe/jmp/linux-2.6] / drivers / acpi / osl.c
1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *
8  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9  *
10  *  This program is free software; you can redistribute it and/or modify
11  *  it under the terms of the GNU General Public License as published by
12  *  the Free Software Foundation; either version 2 of the License, or
13  *  (at your option) any later version.
14  *
15  *  This program is distributed in the hope that it will be useful,
16  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *  GNU General Public License for more details.
19  *
20  *  You should have received a copy of the GNU General Public License
21  *  along with this program; if not, write to the Free Software
22  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23  *
24  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25  *
26  */
27
28 #include <linux/module.h>
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/mm.h>
32 #include <linux/pci.h>
33 #include <linux/interrupt.h>
34 #include <linux/kmod.h>
35 #include <linux/delay.h>
36 #include <linux/dmi.h>
37 #include <linux/workqueue.h>
38 #include <linux/nmi.h>
39 #include <linux/acpi.h>
40 #include <acpi/acpi.h>
41 #include <asm/io.h>
42 #include <acpi/acpi_bus.h>
43 #include <acpi/processor.h>
44 #include <asm/uaccess.h>
45
46 #include <linux/efi.h>
47 #include <linux/ioport.h>
48 #include <linux/list.h>
49
50 #define _COMPONENT              ACPI_OS_SERVICES
51 ACPI_MODULE_NAME("osl");
52 #define PREFIX          "ACPI: "
53 struct acpi_os_dpc {
54         acpi_osd_exec_callback function;
55         void *context;
56         struct work_struct work;
57 };
58
59 #ifdef CONFIG_ACPI_CUSTOM_DSDT
60 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
61 #endif
62
63 #ifdef ENABLE_DEBUGGER
64 #include <linux/kdb.h>
65
66 /* stuff for debugger support */
67 int acpi_in_debugger;
68 EXPORT_SYMBOL(acpi_in_debugger);
69
70 extern char line_buf[80];
71 #endif                          /*ENABLE_DEBUGGER */
72
73 static unsigned int acpi_irq_irq;
74 static acpi_osd_handler acpi_irq_handler;
75 static void *acpi_irq_context;
76 static struct workqueue_struct *kacpid_wq;
77 static struct workqueue_struct *kacpi_notify_wq;
78
79 struct acpi_res_list {
80         resource_size_t start;
81         resource_size_t end;
82         acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
83         char name[5];   /* only can have a length of 4 chars, make use of this
84                            one instead of res->name, no need to kalloc then */
85         struct list_head resource_list;
86 };
87
88 static LIST_HEAD(resource_list_head);
89 static DEFINE_SPINLOCK(acpi_res_lock);
90
91 #define OSI_STRING_LENGTH_MAX 64        /* arbitrary */
92 static char osi_additional_string[OSI_STRING_LENGTH_MAX];
93
94 /*
95  * "Ode to _OSI(Linux)"
96  *
97  * osi_linux -- Control response to BIOS _OSI(Linux) query.
98  *
99  * As Linux evolves, the features that it supports change.
100  * So an OSI string such as "Linux" is not specific enough
101  * to be useful across multiple versions of Linux.  It
102  * doesn't identify any particular feature, interface,
103  * or even any particular version of Linux...
104  *
105  * Unfortunately, Linux-2.6.22 and earlier responded "yes"
106  * to a BIOS _OSI(Linux) query.  When
107  * a reference mobile BIOS started using it, its use
108  * started to spread to many vendor platforms.
109  * As it is not supportable, we need to halt that spread.
110  *
111  * Today, most BIOS references to _OSI(Linux) are noise --
112  * they have no functional effect and are just dead code
113  * carried over from the reference BIOS.
114  *
115  * The next most common case is that _OSI(Linux) harms Linux,
116  * usually by causing the BIOS to follow paths that are
117  * not tested during Windows validation.
118  *
119  * Finally, there is a short list of platforms
120  * where OSI(Linux) benefits Linux.
121  *
122  * In Linux-2.6.23, OSI(Linux) is first disabled by default.
123  * DMI is used to disable the dmesg warning about OSI(Linux)
124  * on platforms where it is known to have no effect.
125  * But a dmesg warning remains for systems where
126  * we do not know if OSI(Linux) is good or bad for the system.
127  * DMI is also used to enable OSI(Linux) for the machines
128  * that are known to need it.
129  *
130  * BIOS writers should NOT query _OSI(Linux) on future systems.
131  * It will be ignored by default, and to get Linux to
132  * not ignore it will require a kernel source update to
133  * add a DMI entry, or a boot-time "acpi_osi=Linux" invocation.
134  */
135 #define OSI_LINUX_ENABLE 0
136
137 static struct osi_linux {
138         unsigned int    enable:1;
139         unsigned int    dmi:1;
140         unsigned int    cmdline:1;
141         unsigned int    known:1;
142 } osi_linux = { OSI_LINUX_ENABLE, 0, 0, 0};
143
144 static void __init acpi_request_region (struct acpi_generic_address *addr,
145         unsigned int length, char *desc)
146 {
147         struct resource *res;
148
149         if (!addr->address || !length)
150                 return;
151
152         if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
153                 res = request_region(addr->address, length, desc);
154         else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
155                 res = request_mem_region(addr->address, length, desc);
156 }
157
158 static int __init acpi_reserve_resources(void)
159 {
160         acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
161                 "ACPI PM1a_EVT_BLK");
162
163         acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
164                 "ACPI PM1b_EVT_BLK");
165
166         acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
167                 "ACPI PM1a_CNT_BLK");
168
169         acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
170                 "ACPI PM1b_CNT_BLK");
171
172         if (acpi_gbl_FADT.pm_timer_length == 4)
173                 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
174
175         acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
176                 "ACPI PM2_CNT_BLK");
177
178         /* Length of GPE blocks must be a non-negative multiple of 2 */
179
180         if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
181                 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
182                                acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
183
184         if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
185                 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
186                                acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
187
188         return 0;
189 }
190 device_initcall(acpi_reserve_resources);
191
192 acpi_status __init acpi_os_initialize(void)
193 {
194         return AE_OK;
195 }
196
197 acpi_status acpi_os_initialize1(void)
198 {
199         /*
200          * Initialize PCI configuration space access, as we'll need to access
201          * it while walking the namespace (bus 0 and root bridges w/ _BBNs).
202          */
203         if (!raw_pci_ops) {
204                 printk(KERN_ERR PREFIX
205                        "Access to PCI configuration space unavailable\n");
206                 return AE_NULL_ENTRY;
207         }
208         kacpid_wq = create_singlethread_workqueue("kacpid");
209         kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
210         BUG_ON(!kacpid_wq);
211         BUG_ON(!kacpi_notify_wq);
212         return AE_OK;
213 }
214
215 acpi_status acpi_os_terminate(void)
216 {
217         if (acpi_irq_handler) {
218                 acpi_os_remove_interrupt_handler(acpi_irq_irq,
219                                                  acpi_irq_handler);
220         }
221
222         destroy_workqueue(kacpid_wq);
223         destroy_workqueue(kacpi_notify_wq);
224
225         return AE_OK;
226 }
227
228 void acpi_os_printf(const char *fmt, ...)
229 {
230         va_list args;
231         va_start(args, fmt);
232         acpi_os_vprintf(fmt, args);
233         va_end(args);
234 }
235
236 EXPORT_SYMBOL(acpi_os_printf);
237
238 void acpi_os_vprintf(const char *fmt, va_list args)
239 {
240         static char buffer[512];
241
242         vsprintf(buffer, fmt, args);
243
244 #ifdef ENABLE_DEBUGGER
245         if (acpi_in_debugger) {
246                 kdb_printf("%s", buffer);
247         } else {
248                 printk("%s", buffer);
249         }
250 #else
251         printk("%s", buffer);
252 #endif
253 }
254
255 acpi_physical_address __init acpi_os_get_root_pointer(void)
256 {
257         if (efi_enabled) {
258                 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
259                         return efi.acpi20;
260                 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
261                         return efi.acpi;
262                 else {
263                         printk(KERN_ERR PREFIX
264                                "System description tables not found\n");
265                         return 0;
266                 }
267         } else {
268                 acpi_physical_address pa = 0;
269
270                 acpi_find_root_pointer(&pa);
271                 return pa;
272         }
273 }
274
275 void __iomem *__init_refok
276 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
277 {
278         if (phys > ULONG_MAX) {
279                 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
280                 return NULL;
281         }
282         if (acpi_gbl_permanent_mmap)
283                 /*
284                 * ioremap checks to ensure this is in reserved space
285                 */
286                 return ioremap((unsigned long)phys, size);
287         else
288                 return __acpi_map_table((unsigned long)phys, size);
289 }
290 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
291
292 void acpi_os_unmap_memory(void __iomem * virt, acpi_size size)
293 {
294         if (acpi_gbl_permanent_mmap) {
295                 iounmap(virt);
296         }
297 }
298 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
299
300 #ifdef ACPI_FUTURE_USAGE
301 acpi_status
302 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
303 {
304         if (!phys || !virt)
305                 return AE_BAD_PARAMETER;
306
307         *phys = virt_to_phys(virt);
308
309         return AE_OK;
310 }
311 #endif
312
313 #define ACPI_MAX_OVERRIDE_LEN 100
314
315 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
316
317 acpi_status
318 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
319                             acpi_string * new_val)
320 {
321         if (!init_val || !new_val)
322                 return AE_BAD_PARAMETER;
323
324         *new_val = NULL;
325         if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
326                 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
327                        acpi_os_name);
328                 *new_val = acpi_os_name;
329         }
330
331         return AE_OK;
332 }
333
334 acpi_status
335 acpi_os_table_override(struct acpi_table_header * existing_table,
336                        struct acpi_table_header ** new_table)
337 {
338         if (!existing_table || !new_table)
339                 return AE_BAD_PARAMETER;
340
341 #ifdef CONFIG_ACPI_CUSTOM_DSDT
342         if (strncmp(existing_table->signature, "DSDT", 4) == 0)
343                 *new_table = (struct acpi_table_header *)AmlCode;
344         else
345                 *new_table = NULL;
346 #else
347         *new_table = NULL;
348 #endif
349         return AE_OK;
350 }
351
352 static irqreturn_t acpi_irq(int irq, void *dev_id)
353 {
354         u32 handled;
355
356         handled = (*acpi_irq_handler) (acpi_irq_context);
357
358         if (handled) {
359                 acpi_irq_handled++;
360                 return IRQ_HANDLED;
361         } else
362                 return IRQ_NONE;
363 }
364
365 acpi_status
366 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
367                                   void *context)
368 {
369         unsigned int irq;
370
371         acpi_irq_stats_init();
372
373         /*
374          * Ignore the GSI from the core, and use the value in our copy of the
375          * FADT. It may not be the same if an interrupt source override exists
376          * for the SCI.
377          */
378         gsi = acpi_gbl_FADT.sci_interrupt;
379         if (acpi_gsi_to_irq(gsi, &irq) < 0) {
380                 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
381                        gsi);
382                 return AE_OK;
383         }
384
385         acpi_irq_handler = handler;
386         acpi_irq_context = context;
387         if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
388                 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
389                 return AE_NOT_ACQUIRED;
390         }
391         acpi_irq_irq = irq;
392
393         return AE_OK;
394 }
395
396 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
397 {
398         if (irq) {
399                 free_irq(irq, acpi_irq);
400                 acpi_irq_handler = NULL;
401                 acpi_irq_irq = 0;
402         }
403
404         return AE_OK;
405 }
406
407 /*
408  * Running in interpreter thread context, safe to sleep
409  */
410
411 void acpi_os_sleep(acpi_integer ms)
412 {
413         schedule_timeout_interruptible(msecs_to_jiffies(ms));
414 }
415
416 EXPORT_SYMBOL(acpi_os_sleep);
417
418 void acpi_os_stall(u32 us)
419 {
420         while (us) {
421                 u32 delay = 1000;
422
423                 if (delay > us)
424                         delay = us;
425                 udelay(delay);
426                 touch_nmi_watchdog();
427                 us -= delay;
428         }
429 }
430
431 EXPORT_SYMBOL(acpi_os_stall);
432
433 /*
434  * Support ACPI 3.0 AML Timer operand
435  * Returns 64-bit free-running, monotonically increasing timer
436  * with 100ns granularity
437  */
438 u64 acpi_os_get_timer(void)
439 {
440         static u64 t;
441
442 #ifdef  CONFIG_HPET
443         /* TBD: use HPET if available */
444 #endif
445
446 #ifdef  CONFIG_X86_PM_TIMER
447         /* TBD: default to PM timer if HPET was not available */
448 #endif
449         if (!t)
450                 printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n");
451
452         return ++t;
453 }
454
455 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
456 {
457         u32 dummy;
458
459         if (!value)
460                 value = &dummy;
461
462         *value = 0;
463         if (width <= 8) {
464                 *(u8 *) value = inb(port);
465         } else if (width <= 16) {
466                 *(u16 *) value = inw(port);
467         } else if (width <= 32) {
468                 *(u32 *) value = inl(port);
469         } else {
470                 BUG();
471         }
472
473         return AE_OK;
474 }
475
476 EXPORT_SYMBOL(acpi_os_read_port);
477
478 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
479 {
480         if (width <= 8) {
481                 outb(value, port);
482         } else if (width <= 16) {
483                 outw(value, port);
484         } else if (width <= 32) {
485                 outl(value, port);
486         } else {
487                 BUG();
488         }
489
490         return AE_OK;
491 }
492
493 EXPORT_SYMBOL(acpi_os_write_port);
494
495 acpi_status
496 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
497 {
498         u32 dummy;
499         void __iomem *virt_addr;
500
501         virt_addr = ioremap(phys_addr, width);
502         if (!value)
503                 value = &dummy;
504
505         switch (width) {
506         case 8:
507                 *(u8 *) value = readb(virt_addr);
508                 break;
509         case 16:
510                 *(u16 *) value = readw(virt_addr);
511                 break;
512         case 32:
513                 *(u32 *) value = readl(virt_addr);
514                 break;
515         default:
516                 BUG();
517         }
518
519         iounmap(virt_addr);
520
521         return AE_OK;
522 }
523
524 acpi_status
525 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
526 {
527         void __iomem *virt_addr;
528
529         virt_addr = ioremap(phys_addr, width);
530
531         switch (width) {
532         case 8:
533                 writeb(value, virt_addr);
534                 break;
535         case 16:
536                 writew(value, virt_addr);
537                 break;
538         case 32:
539                 writel(value, virt_addr);
540                 break;
541         default:
542                 BUG();
543         }
544
545         iounmap(virt_addr);
546
547         return AE_OK;
548 }
549
550 acpi_status
551 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
552                                void *value, u32 width)
553 {
554         int result, size;
555
556         if (!value)
557                 return AE_BAD_PARAMETER;
558
559         switch (width) {
560         case 8:
561                 size = 1;
562                 break;
563         case 16:
564                 size = 2;
565                 break;
566         case 32:
567                 size = 4;
568                 break;
569         default:
570                 return AE_ERROR;
571         }
572
573         BUG_ON(!raw_pci_ops);
574
575         result = raw_pci_ops->read(pci_id->segment, pci_id->bus,
576                                    PCI_DEVFN(pci_id->device, pci_id->function),
577                                    reg, size, value);
578
579         return (result ? AE_ERROR : AE_OK);
580 }
581
582 EXPORT_SYMBOL(acpi_os_read_pci_configuration);
583
584 acpi_status
585 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
586                                 acpi_integer value, u32 width)
587 {
588         int result, size;
589
590         switch (width) {
591         case 8:
592                 size = 1;
593                 break;
594         case 16:
595                 size = 2;
596                 break;
597         case 32:
598                 size = 4;
599                 break;
600         default:
601                 return AE_ERROR;
602         }
603
604         BUG_ON(!raw_pci_ops);
605
606         result = raw_pci_ops->write(pci_id->segment, pci_id->bus,
607                                     PCI_DEVFN(pci_id->device, pci_id->function),
608                                     reg, size, value);
609
610         return (result ? AE_ERROR : AE_OK);
611 }
612
613 /* TODO: Change code to take advantage of driver model more */
614 static void acpi_os_derive_pci_id_2(acpi_handle rhandle,        /* upper bound  */
615                                     acpi_handle chandle,        /* current node */
616                                     struct acpi_pci_id **id,
617                                     int *is_bridge, u8 * bus_number)
618 {
619         acpi_handle handle;
620         struct acpi_pci_id *pci_id = *id;
621         acpi_status status;
622         unsigned long temp;
623         acpi_object_type type;
624         u8 tu8;
625
626         acpi_get_parent(chandle, &handle);
627         if (handle != rhandle) {
628                 acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge,
629                                         bus_number);
630
631                 status = acpi_get_type(handle, &type);
632                 if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE))
633                         return;
634
635                 status =
636                     acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
637                                           &temp);
638                 if (ACPI_SUCCESS(status)) {
639                         pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp));
640                         pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp));
641
642                         if (*is_bridge)
643                                 pci_id->bus = *bus_number;
644
645                         /* any nicer way to get bus number of bridge ? */
646                         status =
647                             acpi_os_read_pci_configuration(pci_id, 0x0e, &tu8,
648                                                            8);
649                         if (ACPI_SUCCESS(status)
650                             && ((tu8 & 0x7f) == 1 || (tu8 & 0x7f) == 2)) {
651                                 status =
652                                     acpi_os_read_pci_configuration(pci_id, 0x18,
653                                                                    &tu8, 8);
654                                 if (!ACPI_SUCCESS(status)) {
655                                         /* Certainly broken...  FIX ME */
656                                         return;
657                                 }
658                                 *is_bridge = 1;
659                                 pci_id->bus = tu8;
660                                 status =
661                                     acpi_os_read_pci_configuration(pci_id, 0x19,
662                                                                    &tu8, 8);
663                                 if (ACPI_SUCCESS(status)) {
664                                         *bus_number = tu8;
665                                 }
666                         } else
667                                 *is_bridge = 0;
668                 }
669         }
670 }
671
672 void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound  */
673                            acpi_handle chandle, /* current node */
674                            struct acpi_pci_id **id)
675 {
676         int is_bridge = 1;
677         u8 bus_number = (*id)->bus;
678
679         acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
680 }
681
682 static void acpi_os_execute_deferred(struct work_struct *work)
683 {
684         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
685         if (!dpc) {
686                 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
687                 return;
688         }
689
690         dpc->function(dpc->context);
691         kfree(dpc);
692
693         return;
694 }
695
696 /*******************************************************************************
697  *
698  * FUNCTION:    acpi_os_execute
699  *
700  * PARAMETERS:  Type               - Type of the callback
701  *              Function           - Function to be executed
702  *              Context            - Function parameters
703  *
704  * RETURN:      Status
705  *
706  * DESCRIPTION: Depending on type, either queues function for deferred execution or
707  *              immediately executes function on a separate thread.
708  *
709  ******************************************************************************/
710
711 acpi_status acpi_os_execute(acpi_execute_type type,
712                             acpi_osd_exec_callback function, void *context)
713 {
714         acpi_status status = AE_OK;
715         struct acpi_os_dpc *dpc;
716         struct workqueue_struct *queue;
717         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
718                           "Scheduling function [%p(%p)] for deferred execution.\n",
719                           function, context));
720
721         if (!function)
722                 return AE_BAD_PARAMETER;
723
724         /*
725          * Allocate/initialize DPC structure.  Note that this memory will be
726          * freed by the callee.  The kernel handles the work_struct list  in a
727          * way that allows us to also free its memory inside the callee.
728          * Because we may want to schedule several tasks with different
729          * parameters we can't use the approach some kernel code uses of
730          * having a static work_struct.
731          */
732
733         dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
734         if (!dpc)
735                 return_ACPI_STATUS(AE_NO_MEMORY);
736
737         dpc->function = function;
738         dpc->context = context;
739
740         INIT_WORK(&dpc->work, acpi_os_execute_deferred);
741         queue = (type == OSL_NOTIFY_HANDLER) ? kacpi_notify_wq : kacpid_wq;
742         if (!queue_work(queue, &dpc->work)) {
743                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
744                           "Call to queue_work() failed.\n"));
745                 status = AE_ERROR;
746                 kfree(dpc);
747         }
748         return_ACPI_STATUS(status);
749 }
750
751 EXPORT_SYMBOL(acpi_os_execute);
752
753 void acpi_os_wait_events_complete(void *context)
754 {
755         flush_workqueue(kacpid_wq);
756 }
757
758 EXPORT_SYMBOL(acpi_os_wait_events_complete);
759
760 /*
761  * Allocate the memory for a spinlock and initialize it.
762  */
763 acpi_status acpi_os_create_lock(acpi_spinlock * handle)
764 {
765         spin_lock_init(*handle);
766
767         return AE_OK;
768 }
769
770 /*
771  * Deallocate the memory for a spinlock.
772  */
773 void acpi_os_delete_lock(acpi_spinlock handle)
774 {
775         return;
776 }
777
778 acpi_status
779 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
780 {
781         struct semaphore *sem = NULL;
782
783
784         sem = acpi_os_allocate(sizeof(struct semaphore));
785         if (!sem)
786                 return AE_NO_MEMORY;
787         memset(sem, 0, sizeof(struct semaphore));
788
789         sema_init(sem, initial_units);
790
791         *handle = (acpi_handle *) sem;
792
793         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
794                           *handle, initial_units));
795
796         return AE_OK;
797 }
798
799 EXPORT_SYMBOL(acpi_os_create_semaphore);
800
801 /*
802  * TODO: A better way to delete semaphores?  Linux doesn't have a
803  * 'delete_semaphore()' function -- may result in an invalid
804  * pointer dereference for non-synchronized consumers.  Should
805  * we at least check for blocked threads and signal/cancel them?
806  */
807
808 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
809 {
810         struct semaphore *sem = (struct semaphore *)handle;
811
812
813         if (!sem)
814                 return AE_BAD_PARAMETER;
815
816         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
817
818         kfree(sem);
819         sem = NULL;
820
821         return AE_OK;
822 }
823
824 EXPORT_SYMBOL(acpi_os_delete_semaphore);
825
826 /*
827  * TODO: The kernel doesn't have a 'down_timeout' function -- had to
828  * improvise.  The process is to sleep for one scheduler quantum
829  * until the semaphore becomes available.  Downside is that this
830  * may result in starvation for timeout-based waits when there's
831  * lots of semaphore activity.
832  *
833  * TODO: Support for units > 1?
834  */
835 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
836 {
837         acpi_status status = AE_OK;
838         struct semaphore *sem = (struct semaphore *)handle;
839         int ret = 0;
840
841
842         if (!sem || (units < 1))
843                 return AE_BAD_PARAMETER;
844
845         if (units > 1)
846                 return AE_SUPPORT;
847
848         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
849                           handle, units, timeout));
850
851         /*
852          * This can be called during resume with interrupts off.
853          * Like boot-time, we should be single threaded and will
854          * always get the lock if we try -- timeout or not.
855          * If this doesn't succeed, then we will oops courtesy of
856          * might_sleep() in down().
857          */
858         if (!down_trylock(sem))
859                 return AE_OK;
860
861         switch (timeout) {
862                 /*
863                  * No Wait:
864                  * --------
865                  * A zero timeout value indicates that we shouldn't wait - just
866                  * acquire the semaphore if available otherwise return AE_TIME
867                  * (a.k.a. 'would block').
868                  */
869         case 0:
870                 if (down_trylock(sem))
871                         status = AE_TIME;
872                 break;
873
874                 /*
875                  * Wait Indefinitely:
876                  * ------------------
877                  */
878         case ACPI_WAIT_FOREVER:
879                 down(sem);
880                 break;
881
882                 /*
883                  * Wait w/ Timeout:
884                  * ----------------
885                  */
886         default:
887                 // TODO: A better timeout algorithm?
888                 {
889                         int i = 0;
890                         static const int quantum_ms = 1000 / HZ;
891
892                         ret = down_trylock(sem);
893                         for (i = timeout; (i > 0 && ret != 0); i -= quantum_ms) {
894                                 schedule_timeout_interruptible(1);
895                                 ret = down_trylock(sem);
896                         }
897
898                         if (ret != 0)
899                                 status = AE_TIME;
900                 }
901                 break;
902         }
903
904         if (ACPI_FAILURE(status)) {
905                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
906                                   "Failed to acquire semaphore[%p|%d|%d], %s",
907                                   handle, units, timeout,
908                                   acpi_format_exception(status)));
909         } else {
910                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
911                                   "Acquired semaphore[%p|%d|%d]", handle,
912                                   units, timeout));
913         }
914
915         return status;
916 }
917
918 EXPORT_SYMBOL(acpi_os_wait_semaphore);
919
920 /*
921  * TODO: Support for units > 1?
922  */
923 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
924 {
925         struct semaphore *sem = (struct semaphore *)handle;
926
927
928         if (!sem || (units < 1))
929                 return AE_BAD_PARAMETER;
930
931         if (units > 1)
932                 return AE_SUPPORT;
933
934         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
935                           units));
936
937         up(sem);
938
939         return AE_OK;
940 }
941
942 EXPORT_SYMBOL(acpi_os_signal_semaphore);
943
944 #ifdef ACPI_FUTURE_USAGE
945 u32 acpi_os_get_line(char *buffer)
946 {
947
948 #ifdef ENABLE_DEBUGGER
949         if (acpi_in_debugger) {
950                 u32 chars;
951
952                 kdb_read(buffer, sizeof(line_buf));
953
954                 /* remove the CR kdb includes */
955                 chars = strlen(buffer) - 1;
956                 buffer[chars] = '\0';
957         }
958 #endif
959
960         return 0;
961 }
962 #endif                          /*  ACPI_FUTURE_USAGE  */
963
964 acpi_status acpi_os_signal(u32 function, void *info)
965 {
966         switch (function) {
967         case ACPI_SIGNAL_FATAL:
968                 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
969                 break;
970         case ACPI_SIGNAL_BREAKPOINT:
971                 /*
972                  * AML Breakpoint
973                  * ACPI spec. says to treat it as a NOP unless
974                  * you are debugging.  So if/when we integrate
975                  * AML debugger into the kernel debugger its
976                  * hook will go here.  But until then it is
977                  * not useful to print anything on breakpoints.
978                  */
979                 break;
980         default:
981                 break;
982         }
983
984         return AE_OK;
985 }
986
987 EXPORT_SYMBOL(acpi_os_signal);
988
989 static int __init acpi_os_name_setup(char *str)
990 {
991         char *p = acpi_os_name;
992         int count = ACPI_MAX_OVERRIDE_LEN - 1;
993
994         if (!str || !*str)
995                 return 0;
996
997         for (; count-- && str && *str; str++) {
998                 if (isalnum(*str) || *str == ' ' || *str == ':')
999                         *p++ = *str;
1000                 else if (*str == '\'' || *str == '"')
1001                         continue;
1002                 else
1003                         break;
1004         }
1005         *p = 0;
1006
1007         return 1;
1008
1009 }
1010
1011 __setup("acpi_os_name=", acpi_os_name_setup);
1012
1013 static void __init set_osi_linux(unsigned int enable)
1014 {
1015         if (osi_linux.enable != enable) {
1016                 osi_linux.enable = enable;
1017                 printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n",
1018                         enable ? "Add": "Delet");
1019         }
1020         return;
1021 }
1022
1023 static void __init acpi_cmdline_osi_linux(unsigned int enable)
1024 {
1025         osi_linux.cmdline = 1;  /* cmdline set the default */
1026         set_osi_linux(enable);
1027
1028         return;
1029 }
1030
1031 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1032 {
1033         osi_linux.dmi = 1;      /* DMI knows that this box asks OSI(Linux) */
1034
1035         printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1036
1037         if (enable == -1)
1038                 return;
1039
1040         osi_linux.known = 1;    /* DMI knows which OSI(Linux) default needed */
1041
1042         set_osi_linux(enable);
1043
1044         return;
1045 }
1046
1047 /*
1048  * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1049  *
1050  * empty string disables _OSI
1051  * string starting with '!' disables that string
1052  * otherwise string is added to list, augmenting built-in strings
1053  */
1054 static int __init acpi_osi_setup(char *str)
1055 {
1056         if (str == NULL || *str == '\0') {
1057                 printk(KERN_INFO PREFIX "_OSI method disabled\n");
1058                 acpi_gbl_create_osi_method = FALSE;
1059         } else if (!strcmp("!Linux", str)) {
1060                 acpi_cmdline_osi_linux(0);      /* !enable */
1061         } else if (*str == '!') {
1062                 if (acpi_osi_invalidate(++str) == AE_OK)
1063                         printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1064         } else if (!strcmp("Linux", str)) {
1065                 acpi_cmdline_osi_linux(1);      /* enable */
1066         } else if (*osi_additional_string == '\0') {
1067                 strncpy(osi_additional_string, str, OSI_STRING_LENGTH_MAX);
1068                 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1069         }
1070
1071         return 1;
1072 }
1073
1074 __setup("acpi_osi=", acpi_osi_setup);
1075
1076 /* enable serialization to combat AE_ALREADY_EXISTS errors */
1077 static int __init acpi_serialize_setup(char *str)
1078 {
1079         printk(KERN_INFO PREFIX "serialize enabled\n");
1080
1081         acpi_gbl_all_methods_serialized = TRUE;
1082
1083         return 1;
1084 }
1085
1086 __setup("acpi_serialize", acpi_serialize_setup);
1087
1088 /*
1089  * Wake and Run-Time GPES are expected to be separate.
1090  * We disable wake-GPEs at run-time to prevent spurious
1091  * interrupts.
1092  *
1093  * However, if a system exists that shares Wake and
1094  * Run-time events on the same GPE this flag is available
1095  * to tell Linux to keep the wake-time GPEs enabled at run-time.
1096  */
1097 static int __init acpi_wake_gpes_always_on_setup(char *str)
1098 {
1099         printk(KERN_INFO PREFIX "wake GPEs not disabled\n");
1100
1101         acpi_gbl_leave_wake_gpes_disabled = FALSE;
1102
1103         return 1;
1104 }
1105
1106 __setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
1107
1108 /* Check of resource interference between native drivers and ACPI
1109  * OperationRegions (SystemIO and System Memory only).
1110  * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1111  * in arbitrary AML code and can interfere with legacy drivers.
1112  * acpi_enforce_resources= can be set to:
1113  *
1114  *   - strict           (2)
1115  *     -> further driver trying to access the resources will not load
1116  *   - lax (default)    (1)
1117  *     -> further driver trying to access the resources will load, but you
1118  *     get a system message that something might go wrong...
1119  *
1120  *   - no               (0)
1121  *     -> ACPI Operation Region resources will not be registered
1122  *
1123  */
1124 #define ENFORCE_RESOURCES_STRICT 2
1125 #define ENFORCE_RESOURCES_LAX    1
1126 #define ENFORCE_RESOURCES_NO     0
1127
1128 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1129
1130 static int __init acpi_enforce_resources_setup(char *str)
1131 {
1132         if (str == NULL || *str == '\0')
1133                 return 0;
1134
1135         if (!strcmp("strict", str))
1136                 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1137         else if (!strcmp("lax", str))
1138                 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1139         else if (!strcmp("no", str))
1140                 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1141
1142         return 1;
1143 }
1144
1145 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1146
1147 /* Check for resource conflicts between ACPI OperationRegions and native
1148  * drivers */
1149 int acpi_check_resource_conflict(struct resource *res)
1150 {
1151         struct acpi_res_list *res_list_elem;
1152         int ioport;
1153         int clash = 0;
1154
1155         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1156                 return 0;
1157         if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1158                 return 0;
1159
1160         ioport = res->flags & IORESOURCE_IO;
1161
1162         spin_lock(&acpi_res_lock);
1163         list_for_each_entry(res_list_elem, &resource_list_head,
1164                             resource_list) {
1165                 if (ioport && (res_list_elem->resource_type
1166                                != ACPI_ADR_SPACE_SYSTEM_IO))
1167                         continue;
1168                 if (!ioport && (res_list_elem->resource_type
1169                                 != ACPI_ADR_SPACE_SYSTEM_MEMORY))
1170                         continue;
1171
1172                 if (res->end < res_list_elem->start
1173                     || res_list_elem->end < res->start)
1174                         continue;
1175                 clash = 1;
1176                 break;
1177         }
1178         spin_unlock(&acpi_res_lock);
1179
1180         if (clash) {
1181                 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1182                         printk(KERN_INFO "%sACPI: %s resource %s [0x%llx-0x%llx]"
1183                                " conflicts with ACPI region %s"
1184                                " [0x%llx-0x%llx]\n",
1185                                acpi_enforce_resources == ENFORCE_RESOURCES_LAX
1186                                ? KERN_WARNING : KERN_ERR,
1187                                ioport ? "I/O" : "Memory", res->name,
1188                                (long long) res->start, (long long) res->end,
1189                                res_list_elem->name,
1190                                (long long) res_list_elem->start,
1191                                (long long) res_list_elem->end);
1192                         printk(KERN_INFO "ACPI: Device needs an ACPI driver\n");
1193                 }
1194                 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1195                         return -EBUSY;
1196         }
1197         return 0;
1198 }
1199 EXPORT_SYMBOL(acpi_check_resource_conflict);
1200
1201 int acpi_check_region(resource_size_t start, resource_size_t n,
1202                       const char *name)
1203 {
1204         struct resource res = {
1205                 .start = start,
1206                 .end   = start + n - 1,
1207                 .name  = name,
1208                 .flags = IORESOURCE_IO,
1209         };
1210
1211         return acpi_check_resource_conflict(&res);
1212 }
1213 EXPORT_SYMBOL(acpi_check_region);
1214
1215 int acpi_check_mem_region(resource_size_t start, resource_size_t n,
1216                       const char *name)
1217 {
1218         struct resource res = {
1219                 .start = start,
1220                 .end   = start + n - 1,
1221                 .name  = name,
1222                 .flags = IORESOURCE_MEM,
1223         };
1224
1225         return acpi_check_resource_conflict(&res);
1226
1227 }
1228 EXPORT_SYMBOL(acpi_check_mem_region);
1229
1230 /*
1231  * Acquire a spinlock.
1232  *
1233  * handle is a pointer to the spinlock_t.
1234  */
1235
1236 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1237 {
1238         acpi_cpu_flags flags;
1239         spin_lock_irqsave(lockp, flags);
1240         return flags;
1241 }
1242
1243 /*
1244  * Release a spinlock. See above.
1245  */
1246
1247 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1248 {
1249         spin_unlock_irqrestore(lockp, flags);
1250 }
1251
1252 #ifndef ACPI_USE_LOCAL_CACHE
1253
1254 /*******************************************************************************
1255  *
1256  * FUNCTION:    acpi_os_create_cache
1257  *
1258  * PARAMETERS:  name      - Ascii name for the cache
1259  *              size      - Size of each cached object
1260  *              depth     - Maximum depth of the cache (in objects) <ignored>
1261  *              cache     - Where the new cache object is returned
1262  *
1263  * RETURN:      status
1264  *
1265  * DESCRIPTION: Create a cache object
1266  *
1267  ******************************************************************************/
1268
1269 acpi_status
1270 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1271 {
1272         *cache = kmem_cache_create(name, size, 0, 0, NULL);
1273         if (*cache == NULL)
1274                 return AE_ERROR;
1275         else
1276                 return AE_OK;
1277 }
1278
1279 /*******************************************************************************
1280  *
1281  * FUNCTION:    acpi_os_purge_cache
1282  *
1283  * PARAMETERS:  Cache           - Handle to cache object
1284  *
1285  * RETURN:      Status
1286  *
1287  * DESCRIPTION: Free all objects within the requested cache.
1288  *
1289  ******************************************************************************/
1290
1291 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1292 {
1293         kmem_cache_shrink(cache);
1294         return (AE_OK);
1295 }
1296
1297 /*******************************************************************************
1298  *
1299  * FUNCTION:    acpi_os_delete_cache
1300  *
1301  * PARAMETERS:  Cache           - Handle to cache object
1302  *
1303  * RETURN:      Status
1304  *
1305  * DESCRIPTION: Free all objects within the requested cache and delete the
1306  *              cache object.
1307  *
1308  ******************************************************************************/
1309
1310 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1311 {
1312         kmem_cache_destroy(cache);
1313         return (AE_OK);
1314 }
1315
1316 /*******************************************************************************
1317  *
1318  * FUNCTION:    acpi_os_release_object
1319  *
1320  * PARAMETERS:  Cache       - Handle to cache object
1321  *              Object      - The object to be released
1322  *
1323  * RETURN:      None
1324  *
1325  * DESCRIPTION: Release an object to the specified cache.  If cache is full,
1326  *              the object is deleted.
1327  *
1328  ******************************************************************************/
1329
1330 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1331 {
1332         kmem_cache_free(cache, object);
1333         return (AE_OK);
1334 }
1335
1336 /**
1337  *      acpi_dmi_dump - dump DMI slots needed for blacklist entry
1338  *
1339  *      Returns 0 on success
1340  */
1341 static int acpi_dmi_dump(void)
1342 {
1343
1344         if (!dmi_available)
1345                 return -1;
1346
1347         printk(KERN_NOTICE PREFIX "DMI System Vendor: %s\n",
1348                 dmi_get_system_info(DMI_SYS_VENDOR));
1349         printk(KERN_NOTICE PREFIX "DMI Product Name: %s\n",
1350                 dmi_get_system_info(DMI_PRODUCT_NAME));
1351         printk(KERN_NOTICE PREFIX "DMI Product Version: %s\n",
1352                 dmi_get_system_info(DMI_PRODUCT_VERSION));
1353         printk(KERN_NOTICE PREFIX "DMI Board Name: %s\n",
1354                 dmi_get_system_info(DMI_BOARD_NAME));
1355         printk(KERN_NOTICE PREFIX "DMI BIOS Vendor: %s\n",
1356                 dmi_get_system_info(DMI_BIOS_VENDOR));
1357         printk(KERN_NOTICE PREFIX "DMI BIOS Date: %s\n",
1358                 dmi_get_system_info(DMI_BIOS_DATE));
1359
1360         return 0;
1361 }
1362
1363
1364 /******************************************************************************
1365  *
1366  * FUNCTION:    acpi_os_validate_interface
1367  *
1368  * PARAMETERS:  interface           - Requested interface to be validated
1369  *
1370  * RETURN:      AE_OK if interface is supported, AE_SUPPORT otherwise
1371  *
1372  * DESCRIPTION: Match an interface string to the interfaces supported by the
1373  *              host. Strings originate from an AML call to the _OSI method.
1374  *
1375  *****************************************************************************/
1376
1377 acpi_status
1378 acpi_os_validate_interface (char *interface)
1379 {
1380         if (!strncmp(osi_additional_string, interface, OSI_STRING_LENGTH_MAX))
1381                 return AE_OK;
1382         if (!strcmp("Linux", interface)) {
1383
1384                 printk(KERN_NOTICE PREFIX
1385                         "BIOS _OSI(Linux) query %s%s\n",
1386                         osi_linux.enable ? "honored" : "ignored",
1387                         osi_linux.cmdline ? " via cmdline" :
1388                         osi_linux.dmi ? " via DMI" : "");
1389
1390                 if (!osi_linux.dmi) {
1391                         if (acpi_dmi_dump())
1392                                 printk(KERN_NOTICE PREFIX
1393                                         "[please extract dmidecode output]\n");
1394                         printk(KERN_NOTICE PREFIX
1395                                 "Please send DMI info above to "
1396                                 "linux-acpi@vger.kernel.org\n");
1397                 }
1398                 if (!osi_linux.known && !osi_linux.cmdline) {
1399                         printk(KERN_NOTICE PREFIX
1400                                 "If \"acpi_osi=%sLinux\" works better, "
1401                                 "please notify linux-acpi@vger.kernel.org\n",
1402                                 osi_linux.enable ? "!" : "");
1403                 }
1404
1405                 if (osi_linux.enable)
1406                         return AE_OK;
1407         }
1408         return AE_SUPPORT;
1409 }
1410
1411 /******************************************************************************
1412  *
1413  * FUNCTION:    acpi_os_validate_address
1414  *
1415  * PARAMETERS:  space_id             - ACPI space ID
1416  *              address             - Physical address
1417  *              length              - Address length
1418  *
1419  * RETURN:      AE_OK if address/length is valid for the space_id. Otherwise,
1420  *              should return AE_AML_ILLEGAL_ADDRESS.
1421  *
1422  * DESCRIPTION: Validate a system address via the host OS. Used to validate
1423  *              the addresses accessed by AML operation regions.
1424  *
1425  *****************************************************************************/
1426
1427 acpi_status
1428 acpi_os_validate_address (
1429     u8                   space_id,
1430     acpi_physical_address   address,
1431     acpi_size               length,
1432     char *name)
1433 {
1434         struct acpi_res_list *res;
1435         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1436                 return AE_OK;
1437
1438         switch (space_id) {
1439         case ACPI_ADR_SPACE_SYSTEM_IO:
1440         case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1441                 /* Only interference checks against SystemIO and SytemMemory
1442                    are needed */
1443                 res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
1444                 if (!res)
1445                         return AE_OK;
1446                 /* ACPI names are fixed to 4 bytes, still better use strlcpy */
1447                 strlcpy(res->name, name, 5);
1448                 res->start = address;
1449                 res->end = address + length - 1;
1450                 res->resource_type = space_id;
1451                 spin_lock(&acpi_res_lock);
1452                 list_add(&res->resource_list, &resource_list_head);
1453                 spin_unlock(&acpi_res_lock);
1454                 pr_debug("Added %s resource: start: 0x%llx, end: 0x%llx, "
1455                          "name: %s\n", (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
1456                          ? "SystemIO" : "System Memory",
1457                          (unsigned long long)res->start,
1458                          (unsigned long long)res->end,
1459                          res->name);
1460                 break;
1461         case ACPI_ADR_SPACE_PCI_CONFIG:
1462         case ACPI_ADR_SPACE_EC:
1463         case ACPI_ADR_SPACE_SMBUS:
1464         case ACPI_ADR_SPACE_CMOS:
1465         case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1466         case ACPI_ADR_SPACE_DATA_TABLE:
1467         case ACPI_ADR_SPACE_FIXED_HARDWARE:
1468                 break;
1469         }
1470         return AE_OK;
1471 }
1472
1473 #endif