ACPI: simplify deferred execution path
[safe/jmp/linux-2.6] / drivers / acpi / osl.c
1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *  Copyright (c) 2008 Intel Corporation
8  *   Author: Matthew Wilcox <willy@linux.intel.com>
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or
15  *  (at your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; if not, write to the Free Software
24  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  *
28  */
29
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/mm.h>
34 #include <linux/pci.h>
35 #include <linux/interrupt.h>
36 #include <linux/kmod.h>
37 #include <linux/delay.h>
38 #include <linux/workqueue.h>
39 #include <linux/nmi.h>
40 #include <linux/acpi.h>
41 #include <linux/efi.h>
42 #include <linux/ioport.h>
43 #include <linux/list.h>
44 #include <linux/jiffies.h>
45 #include <linux/semaphore.h>
46
47 #include <asm/io.h>
48 #include <asm/uaccess.h>
49
50 #include <acpi/acpi.h>
51 #include <acpi/acpi_bus.h>
52 #include <acpi/processor.h>
53
54 #define _COMPONENT              ACPI_OS_SERVICES
55 ACPI_MODULE_NAME("osl");
56 #define PREFIX          "ACPI: "
57 struct acpi_os_dpc {
58         acpi_osd_exec_callback function;
59         void *context;
60         struct work_struct work;
61         int wait;
62 };
63
64 #ifdef CONFIG_ACPI_CUSTOM_DSDT
65 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
66 #endif
67
68 #ifdef ENABLE_DEBUGGER
69 #include <linux/kdb.h>
70
71 /* stuff for debugger support */
72 int acpi_in_debugger;
73 EXPORT_SYMBOL(acpi_in_debugger);
74
75 extern char line_buf[80];
76 #endif                          /*ENABLE_DEBUGGER */
77
78 static unsigned int acpi_irq_irq;
79 static acpi_osd_handler acpi_irq_handler;
80 static void *acpi_irq_context;
81 static struct workqueue_struct *kacpid_wq;
82 static struct workqueue_struct *kacpi_notify_wq;
83 static struct workqueue_struct *kacpi_hotplug_wq;
84
85 struct acpi_res_list {
86         resource_size_t start;
87         resource_size_t end;
88         acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
89         char name[5];   /* only can have a length of 4 chars, make use of this
90                            one instead of res->name, no need to kalloc then */
91         struct list_head resource_list;
92         int count;
93 };
94
95 static LIST_HEAD(resource_list_head);
96 static DEFINE_SPINLOCK(acpi_res_lock);
97
98 #define OSI_STRING_LENGTH_MAX 64        /* arbitrary */
99 static char osi_additional_string[OSI_STRING_LENGTH_MAX];
100
101 /*
102  * The story of _OSI(Linux)
103  *
104  * From pre-history through Linux-2.6.22,
105  * Linux responded TRUE upon a BIOS OSI(Linux) query.
106  *
107  * Unfortunately, reference BIOS writers got wind of this
108  * and put OSI(Linux) in their example code, quickly exposing
109  * this string as ill-conceived and opening the door to
110  * an un-bounded number of BIOS incompatibilities.
111  *
112  * For example, OSI(Linux) was used on resume to re-POST a
113  * video card on one system, because Linux at that time
114  * could not do a speedy restore in its native driver.
115  * But then upon gaining quick native restore capability,
116  * Linux has no way to tell the BIOS to skip the time-consuming
117  * POST -- putting Linux at a permanent performance disadvantage.
118  * On another system, the BIOS writer used OSI(Linux)
119  * to infer native OS support for IPMI!  On other systems,
120  * OSI(Linux) simply got in the way of Linux claiming to
121  * be compatible with other operating systems, exposing
122  * BIOS issues such as skipped device initialization.
123  *
124  * So "Linux" turned out to be a really poor chose of
125  * OSI string, and from Linux-2.6.23 onward we respond FALSE.
126  *
127  * BIOS writers should NOT query _OSI(Linux) on future systems.
128  * Linux will complain on the console when it sees it, and return FALSE.
129  * To get Linux to return TRUE for your system  will require
130  * a kernel source update to add a DMI entry,
131  * or boot with "acpi_osi=Linux"
132  */
133
134 static struct osi_linux {
135         unsigned int    enable:1;
136         unsigned int    dmi:1;
137         unsigned int    cmdline:1;
138         unsigned int    known:1;
139 } osi_linux = { 0, 0, 0, 0};
140
141 static void __init acpi_request_region (struct acpi_generic_address *addr,
142         unsigned int length, char *desc)
143 {
144         struct resource *res;
145
146         if (!addr->address || !length)
147                 return;
148
149         if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
150                 res = request_region(addr->address, length, desc);
151         else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
152                 res = request_mem_region(addr->address, length, desc);
153 }
154
155 static int __init acpi_reserve_resources(void)
156 {
157         acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
158                 "ACPI PM1a_EVT_BLK");
159
160         acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
161                 "ACPI PM1b_EVT_BLK");
162
163         acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
164                 "ACPI PM1a_CNT_BLK");
165
166         acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
167                 "ACPI PM1b_CNT_BLK");
168
169         if (acpi_gbl_FADT.pm_timer_length == 4)
170                 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
171
172         acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
173                 "ACPI PM2_CNT_BLK");
174
175         /* Length of GPE blocks must be a non-negative multiple of 2 */
176
177         if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
178                 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
179                                acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
180
181         if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
182                 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
183                                acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
184
185         return 0;
186 }
187 device_initcall(acpi_reserve_resources);
188
189 acpi_status __init acpi_os_initialize(void)
190 {
191         return AE_OK;
192 }
193
194 static void bind_to_cpu0(struct work_struct *work)
195 {
196         set_cpus_allowed(current, cpumask_of_cpu(0));
197         kfree(work);
198 }
199
200 static void bind_workqueue(struct workqueue_struct *wq)
201 {
202         struct work_struct *work;
203
204         work = kzalloc(sizeof(struct work_struct), GFP_KERNEL);
205         INIT_WORK(work, bind_to_cpu0);
206         queue_work(wq, work);
207 }
208
209 acpi_status acpi_os_initialize1(void)
210 {
211         /*
212          * On some machines, a software-initiated SMI causes corruption unless
213          * the SMI runs on CPU 0.  An SMI can be initiated by any AML, but
214          * typically it's done in GPE-related methods that are run via
215          * workqueues, so we can avoid the known corruption cases by binding
216          * the workqueues to CPU 0.
217          */
218         kacpid_wq = create_singlethread_workqueue("kacpid");
219         bind_workqueue(kacpid_wq);
220         kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
221         bind_workqueue(kacpi_notify_wq);
222         kacpi_hotplug_wq = create_singlethread_workqueue("kacpi_hotplug");
223         bind_workqueue(kacpi_hotplug_wq);
224         BUG_ON(!kacpid_wq);
225         BUG_ON(!kacpi_notify_wq);
226         BUG_ON(!kacpi_hotplug_wq);
227         return AE_OK;
228 }
229
230 acpi_status acpi_os_terminate(void)
231 {
232         if (acpi_irq_handler) {
233                 acpi_os_remove_interrupt_handler(acpi_irq_irq,
234                                                  acpi_irq_handler);
235         }
236
237         destroy_workqueue(kacpid_wq);
238         destroy_workqueue(kacpi_notify_wq);
239         destroy_workqueue(kacpi_hotplug_wq);
240
241         return AE_OK;
242 }
243
244 void acpi_os_printf(const char *fmt, ...)
245 {
246         va_list args;
247         va_start(args, fmt);
248         acpi_os_vprintf(fmt, args);
249         va_end(args);
250 }
251
252 void acpi_os_vprintf(const char *fmt, va_list args)
253 {
254         static char buffer[512];
255
256         vsprintf(buffer, fmt, args);
257
258 #ifdef ENABLE_DEBUGGER
259         if (acpi_in_debugger) {
260                 kdb_printf("%s", buffer);
261         } else {
262                 printk(KERN_CONT "%s", buffer);
263         }
264 #else
265         printk(KERN_CONT "%s", buffer);
266 #endif
267 }
268
269 acpi_physical_address __init acpi_os_get_root_pointer(void)
270 {
271         if (efi_enabled) {
272                 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
273                         return efi.acpi20;
274                 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
275                         return efi.acpi;
276                 else {
277                         printk(KERN_ERR PREFIX
278                                "System description tables not found\n");
279                         return 0;
280                 }
281         } else {
282                 acpi_physical_address pa = 0;
283
284                 acpi_find_root_pointer(&pa);
285                 return pa;
286         }
287 }
288
289 void __iomem *__init_refok
290 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
291 {
292         if (phys > ULONG_MAX) {
293                 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
294                 return NULL;
295         }
296         if (acpi_gbl_permanent_mmap)
297                 /*
298                 * ioremap checks to ensure this is in reserved space
299                 */
300                 return ioremap((unsigned long)phys, size);
301         else
302                 return __acpi_map_table((unsigned long)phys, size);
303 }
304 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
305
306 void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
307 {
308         if (acpi_gbl_permanent_mmap)
309                 iounmap(virt);
310         else
311                 __acpi_unmap_table(virt, size);
312 }
313 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
314
315 void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
316 {
317         if (!acpi_gbl_permanent_mmap)
318                 __acpi_unmap_table(virt, size);
319 }
320
321 #ifdef ACPI_FUTURE_USAGE
322 acpi_status
323 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
324 {
325         if (!phys || !virt)
326                 return AE_BAD_PARAMETER;
327
328         *phys = virt_to_phys(virt);
329
330         return AE_OK;
331 }
332 #endif
333
334 #define ACPI_MAX_OVERRIDE_LEN 100
335
336 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
337
338 acpi_status
339 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
340                             acpi_string * new_val)
341 {
342         if (!init_val || !new_val)
343                 return AE_BAD_PARAMETER;
344
345         *new_val = NULL;
346         if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
347                 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
348                        acpi_os_name);
349                 *new_val = acpi_os_name;
350         }
351
352         return AE_OK;
353 }
354
355 acpi_status
356 acpi_os_table_override(struct acpi_table_header * existing_table,
357                        struct acpi_table_header ** new_table)
358 {
359         if (!existing_table || !new_table)
360                 return AE_BAD_PARAMETER;
361
362         *new_table = NULL;
363
364 #ifdef CONFIG_ACPI_CUSTOM_DSDT
365         if (strncmp(existing_table->signature, "DSDT", 4) == 0)
366                 *new_table = (struct acpi_table_header *)AmlCode;
367 #endif
368         if (*new_table != NULL) {
369                 printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], "
370                            "this is unsafe: tainting kernel\n",
371                        existing_table->signature,
372                        existing_table->oem_table_id);
373                 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
374         }
375         return AE_OK;
376 }
377
378 static irqreturn_t acpi_irq(int irq, void *dev_id)
379 {
380         u32 handled;
381
382         handled = (*acpi_irq_handler) (acpi_irq_context);
383
384         if (handled) {
385                 acpi_irq_handled++;
386                 return IRQ_HANDLED;
387         } else {
388                 acpi_irq_not_handled++;
389                 return IRQ_NONE;
390         }
391 }
392
393 acpi_status
394 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
395                                   void *context)
396 {
397         unsigned int irq;
398
399         acpi_irq_stats_init();
400
401         /*
402          * Ignore the GSI from the core, and use the value in our copy of the
403          * FADT. It may not be the same if an interrupt source override exists
404          * for the SCI.
405          */
406         gsi = acpi_gbl_FADT.sci_interrupt;
407         if (acpi_gsi_to_irq(gsi, &irq) < 0) {
408                 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
409                        gsi);
410                 return AE_OK;
411         }
412
413         acpi_irq_handler = handler;
414         acpi_irq_context = context;
415         if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
416                 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
417                 return AE_NOT_ACQUIRED;
418         }
419         acpi_irq_irq = irq;
420
421         return AE_OK;
422 }
423
424 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
425 {
426         if (irq) {
427                 free_irq(irq, acpi_irq);
428                 acpi_irq_handler = NULL;
429                 acpi_irq_irq = 0;
430         }
431
432         return AE_OK;
433 }
434
435 /*
436  * Running in interpreter thread context, safe to sleep
437  */
438
439 void acpi_os_sleep(acpi_integer ms)
440 {
441         schedule_timeout_interruptible(msecs_to_jiffies(ms));
442 }
443
444 void acpi_os_stall(u32 us)
445 {
446         while (us) {
447                 u32 delay = 1000;
448
449                 if (delay > us)
450                         delay = us;
451                 udelay(delay);
452                 touch_nmi_watchdog();
453                 us -= delay;
454         }
455 }
456
457 /*
458  * Support ACPI 3.0 AML Timer operand
459  * Returns 64-bit free-running, monotonically increasing timer
460  * with 100ns granularity
461  */
462 u64 acpi_os_get_timer(void)
463 {
464         static u64 t;
465
466 #ifdef  CONFIG_HPET
467         /* TBD: use HPET if available */
468 #endif
469
470 #ifdef  CONFIG_X86_PM_TIMER
471         /* TBD: default to PM timer if HPET was not available */
472 #endif
473         if (!t)
474                 printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n");
475
476         return ++t;
477 }
478
479 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
480 {
481         u32 dummy;
482
483         if (!value)
484                 value = &dummy;
485
486         *value = 0;
487         if (width <= 8) {
488                 *(u8 *) value = inb(port);
489         } else if (width <= 16) {
490                 *(u16 *) value = inw(port);
491         } else if (width <= 32) {
492                 *(u32 *) value = inl(port);
493         } else {
494                 BUG();
495         }
496
497         return AE_OK;
498 }
499
500 EXPORT_SYMBOL(acpi_os_read_port);
501
502 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
503 {
504         if (width <= 8) {
505                 outb(value, port);
506         } else if (width <= 16) {
507                 outw(value, port);
508         } else if (width <= 32) {
509                 outl(value, port);
510         } else {
511                 BUG();
512         }
513
514         return AE_OK;
515 }
516
517 EXPORT_SYMBOL(acpi_os_write_port);
518
519 acpi_status
520 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
521 {
522         u32 dummy;
523         void __iomem *virt_addr;
524
525         virt_addr = ioremap(phys_addr, width);
526         if (!value)
527                 value = &dummy;
528
529         switch (width) {
530         case 8:
531                 *(u8 *) value = readb(virt_addr);
532                 break;
533         case 16:
534                 *(u16 *) value = readw(virt_addr);
535                 break;
536         case 32:
537                 *(u32 *) value = readl(virt_addr);
538                 break;
539         default:
540                 BUG();
541         }
542
543         iounmap(virt_addr);
544
545         return AE_OK;
546 }
547
548 acpi_status
549 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
550 {
551         void __iomem *virt_addr;
552
553         virt_addr = ioremap(phys_addr, width);
554
555         switch (width) {
556         case 8:
557                 writeb(value, virt_addr);
558                 break;
559         case 16:
560                 writew(value, virt_addr);
561                 break;
562         case 32:
563                 writel(value, virt_addr);
564                 break;
565         default:
566                 BUG();
567         }
568
569         iounmap(virt_addr);
570
571         return AE_OK;
572 }
573
574 acpi_status
575 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
576                                u32 *value, u32 width)
577 {
578         int result, size;
579
580         if (!value)
581                 return AE_BAD_PARAMETER;
582
583         switch (width) {
584         case 8:
585                 size = 1;
586                 break;
587         case 16:
588                 size = 2;
589                 break;
590         case 32:
591                 size = 4;
592                 break;
593         default:
594                 return AE_ERROR;
595         }
596
597         result = raw_pci_read(pci_id->segment, pci_id->bus,
598                                 PCI_DEVFN(pci_id->device, pci_id->function),
599                                 reg, size, value);
600
601         return (result ? AE_ERROR : AE_OK);
602 }
603
604 acpi_status
605 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
606                                 acpi_integer value, u32 width)
607 {
608         int result, size;
609
610         switch (width) {
611         case 8:
612                 size = 1;
613                 break;
614         case 16:
615                 size = 2;
616                 break;
617         case 32:
618                 size = 4;
619                 break;
620         default:
621                 return AE_ERROR;
622         }
623
624         result = raw_pci_write(pci_id->segment, pci_id->bus,
625                                 PCI_DEVFN(pci_id->device, pci_id->function),
626                                 reg, size, value);
627
628         return (result ? AE_ERROR : AE_OK);
629 }
630
631 /* TODO: Change code to take advantage of driver model more */
632 static void acpi_os_derive_pci_id_2(acpi_handle rhandle,        /* upper bound  */
633                                     acpi_handle chandle,        /* current node */
634                                     struct acpi_pci_id **id,
635                                     int *is_bridge, u8 * bus_number)
636 {
637         acpi_handle handle;
638         struct acpi_pci_id *pci_id = *id;
639         acpi_status status;
640         unsigned long long temp;
641         acpi_object_type type;
642
643         acpi_get_parent(chandle, &handle);
644         if (handle != rhandle) {
645                 acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge,
646                                         bus_number);
647
648                 status = acpi_get_type(handle, &type);
649                 if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE))
650                         return;
651
652                 status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
653                                           &temp);
654                 if (ACPI_SUCCESS(status)) {
655                         u32 val;
656                         pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp));
657                         pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp));
658
659                         if (*is_bridge)
660                                 pci_id->bus = *bus_number;
661
662                         /* any nicer way to get bus number of bridge ? */
663                         status =
664                             acpi_os_read_pci_configuration(pci_id, 0x0e, &val,
665                                                            8);
666                         if (ACPI_SUCCESS(status)
667                             && ((val & 0x7f) == 1 || (val & 0x7f) == 2)) {
668                                 status =
669                                     acpi_os_read_pci_configuration(pci_id, 0x18,
670                                                                    &val, 8);
671                                 if (!ACPI_SUCCESS(status)) {
672                                         /* Certainly broken...  FIX ME */
673                                         return;
674                                 }
675                                 *is_bridge = 1;
676                                 pci_id->bus = val;
677                                 status =
678                                     acpi_os_read_pci_configuration(pci_id, 0x19,
679                                                                    &val, 8);
680                                 if (ACPI_SUCCESS(status)) {
681                                         *bus_number = val;
682                                 }
683                         } else
684                                 *is_bridge = 0;
685                 }
686         }
687 }
688
689 void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound  */
690                            acpi_handle chandle, /* current node */
691                            struct acpi_pci_id **id)
692 {
693         int is_bridge = 1;
694         u8 bus_number = (*id)->bus;
695
696         acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
697 }
698
699 static void acpi_os_execute_deferred(struct work_struct *work)
700 {
701         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
702         if (!dpc) {
703                 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
704                 return;
705         }
706
707         if (dpc->wait)
708                 acpi_os_wait_events_complete(NULL);
709
710         dpc->function(dpc->context);
711         kfree(dpc);
712
713         return;
714 }
715
716 /*******************************************************************************
717  *
718  * FUNCTION:    acpi_os_execute
719  *
720  * PARAMETERS:  Type               - Type of the callback
721  *              Function           - Function to be executed
722  *              Context            - Function parameters
723  *
724  * RETURN:      Status
725  *
726  * DESCRIPTION: Depending on type, either queues function for deferred execution or
727  *              immediately executes function on a separate thread.
728  *
729  ******************************************************************************/
730
731 static acpi_status __acpi_os_execute(acpi_execute_type type,
732         acpi_osd_exec_callback function, void *context, int hp)
733 {
734         acpi_status status = AE_OK;
735         struct acpi_os_dpc *dpc;
736         struct workqueue_struct *queue;
737         int ret;
738         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
739                           "Scheduling function [%p(%p)] for deferred execution.\n",
740                           function, context));
741
742         if (!function)
743                 return AE_BAD_PARAMETER;
744
745         /*
746          * Allocate/initialize DPC structure.  Note that this memory will be
747          * freed by the callee.  The kernel handles the work_struct list  in a
748          * way that allows us to also free its memory inside the callee.
749          * Because we may want to schedule several tasks with different
750          * parameters we can't use the approach some kernel code uses of
751          * having a static work_struct.
752          */
753
754         dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
755         if (!dpc)
756                 return AE_NO_MEMORY;
757
758         dpc->function = function;
759         dpc->context = context;
760
761         /*
762          * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq
763          * because the hotplug code may call driver .remove() functions,
764          * which invoke flush_scheduled_work/acpi_os_wait_events_complete
765          * to flush these workqueues.
766          */
767         queue = hp ? kacpi_hotplug_wq :
768                 (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq);
769         dpc->wait = hp ? 1 : 0;
770         INIT_WORK(&dpc->work, acpi_os_execute_deferred);
771         ret = queue_work(queue, &dpc->work);
772
773         if (!ret) {
774                 printk(KERN_ERR PREFIX
775                           "Call to queue_work() failed.\n");
776                 status = AE_ERROR;
777                 kfree(dpc);
778         }
779         return status;
780 }
781
782 acpi_status acpi_os_execute(acpi_execute_type type,
783                             acpi_osd_exec_callback function, void *context)
784 {
785         return __acpi_os_execute(type, function, context, 0);
786 }
787 EXPORT_SYMBOL(acpi_os_execute);
788
789 acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
790         void *context)
791 {
792         return __acpi_os_execute(0, function, context, 1);
793 }
794
795 void acpi_os_wait_events_complete(void *context)
796 {
797         flush_workqueue(kacpid_wq);
798         flush_workqueue(kacpi_notify_wq);
799 }
800
801 EXPORT_SYMBOL(acpi_os_wait_events_complete);
802
803 /*
804  * Allocate the memory for a spinlock and initialize it.
805  */
806 acpi_status acpi_os_create_lock(acpi_spinlock * handle)
807 {
808         spin_lock_init(*handle);
809
810         return AE_OK;
811 }
812
813 /*
814  * Deallocate the memory for a spinlock.
815  */
816 void acpi_os_delete_lock(acpi_spinlock handle)
817 {
818         return;
819 }
820
821 acpi_status
822 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
823 {
824         struct semaphore *sem = NULL;
825
826         sem = acpi_os_allocate(sizeof(struct semaphore));
827         if (!sem)
828                 return AE_NO_MEMORY;
829         memset(sem, 0, sizeof(struct semaphore));
830
831         sema_init(sem, initial_units);
832
833         *handle = (acpi_handle *) sem;
834
835         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
836                           *handle, initial_units));
837
838         return AE_OK;
839 }
840
841 /*
842  * TODO: A better way to delete semaphores?  Linux doesn't have a
843  * 'delete_semaphore()' function -- may result in an invalid
844  * pointer dereference for non-synchronized consumers.  Should
845  * we at least check for blocked threads and signal/cancel them?
846  */
847
848 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
849 {
850         struct semaphore *sem = (struct semaphore *)handle;
851
852         if (!sem)
853                 return AE_BAD_PARAMETER;
854
855         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
856
857         BUG_ON(!list_empty(&sem->wait_list));
858         kfree(sem);
859         sem = NULL;
860
861         return AE_OK;
862 }
863
864 /*
865  * TODO: Support for units > 1?
866  */
867 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
868 {
869         acpi_status status = AE_OK;
870         struct semaphore *sem = (struct semaphore *)handle;
871         long jiffies;
872         int ret = 0;
873
874         if (!sem || (units < 1))
875                 return AE_BAD_PARAMETER;
876
877         if (units > 1)
878                 return AE_SUPPORT;
879
880         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
881                           handle, units, timeout));
882
883         if (timeout == ACPI_WAIT_FOREVER)
884                 jiffies = MAX_SCHEDULE_TIMEOUT;
885         else
886                 jiffies = msecs_to_jiffies(timeout);
887         
888         ret = down_timeout(sem, jiffies);
889         if (ret)
890                 status = AE_TIME;
891
892         if (ACPI_FAILURE(status)) {
893                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
894                                   "Failed to acquire semaphore[%p|%d|%d], %s",
895                                   handle, units, timeout,
896                                   acpi_format_exception(status)));
897         } else {
898                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
899                                   "Acquired semaphore[%p|%d|%d]", handle,
900                                   units, timeout));
901         }
902
903         return status;
904 }
905
906 /*
907  * TODO: Support for units > 1?
908  */
909 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
910 {
911         struct semaphore *sem = (struct semaphore *)handle;
912
913         if (!sem || (units < 1))
914                 return AE_BAD_PARAMETER;
915
916         if (units > 1)
917                 return AE_SUPPORT;
918
919         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
920                           units));
921
922         up(sem);
923
924         return AE_OK;
925 }
926
927 #ifdef ACPI_FUTURE_USAGE
928 u32 acpi_os_get_line(char *buffer)
929 {
930
931 #ifdef ENABLE_DEBUGGER
932         if (acpi_in_debugger) {
933                 u32 chars;
934
935                 kdb_read(buffer, sizeof(line_buf));
936
937                 /* remove the CR kdb includes */
938                 chars = strlen(buffer) - 1;
939                 buffer[chars] = '\0';
940         }
941 #endif
942
943         return 0;
944 }
945 #endif                          /*  ACPI_FUTURE_USAGE  */
946
947 acpi_status acpi_os_signal(u32 function, void *info)
948 {
949         switch (function) {
950         case ACPI_SIGNAL_FATAL:
951                 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
952                 break;
953         case ACPI_SIGNAL_BREAKPOINT:
954                 /*
955                  * AML Breakpoint
956                  * ACPI spec. says to treat it as a NOP unless
957                  * you are debugging.  So if/when we integrate
958                  * AML debugger into the kernel debugger its
959                  * hook will go here.  But until then it is
960                  * not useful to print anything on breakpoints.
961                  */
962                 break;
963         default:
964                 break;
965         }
966
967         return AE_OK;
968 }
969
970 static int __init acpi_os_name_setup(char *str)
971 {
972         char *p = acpi_os_name;
973         int count = ACPI_MAX_OVERRIDE_LEN - 1;
974
975         if (!str || !*str)
976                 return 0;
977
978         for (; count-- && str && *str; str++) {
979                 if (isalnum(*str) || *str == ' ' || *str == ':')
980                         *p++ = *str;
981                 else if (*str == '\'' || *str == '"')
982                         continue;
983                 else
984                         break;
985         }
986         *p = 0;
987
988         return 1;
989
990 }
991
992 __setup("acpi_os_name=", acpi_os_name_setup);
993
994 static void __init set_osi_linux(unsigned int enable)
995 {
996         if (osi_linux.enable != enable) {
997                 osi_linux.enable = enable;
998                 printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n",
999                         enable ? "Add": "Delet");
1000         }
1001         return;
1002 }
1003
1004 static void __init acpi_cmdline_osi_linux(unsigned int enable)
1005 {
1006         osi_linux.cmdline = 1;  /* cmdline set the default */
1007         set_osi_linux(enable);
1008
1009         return;
1010 }
1011
1012 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1013 {
1014         osi_linux.dmi = 1;      /* DMI knows that this box asks OSI(Linux) */
1015
1016         printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1017
1018         if (enable == -1)
1019                 return;
1020
1021         osi_linux.known = 1;    /* DMI knows which OSI(Linux) default needed */
1022
1023         set_osi_linux(enable);
1024
1025         return;
1026 }
1027
1028 /*
1029  * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1030  *
1031  * empty string disables _OSI
1032  * string starting with '!' disables that string
1033  * otherwise string is added to list, augmenting built-in strings
1034  */
1035 int __init acpi_osi_setup(char *str)
1036 {
1037         if (str == NULL || *str == '\0') {
1038                 printk(KERN_INFO PREFIX "_OSI method disabled\n");
1039                 acpi_gbl_create_osi_method = FALSE;
1040         } else if (!strcmp("!Linux", str)) {
1041                 acpi_cmdline_osi_linux(0);      /* !enable */
1042         } else if (*str == '!') {
1043                 if (acpi_osi_invalidate(++str) == AE_OK)
1044                         printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1045         } else if (!strcmp("Linux", str)) {
1046                 acpi_cmdline_osi_linux(1);      /* enable */
1047         } else if (*osi_additional_string == '\0') {
1048                 strncpy(osi_additional_string, str, OSI_STRING_LENGTH_MAX);
1049                 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1050         }
1051
1052         return 1;
1053 }
1054
1055 __setup("acpi_osi=", acpi_osi_setup);
1056
1057 /* enable serialization to combat AE_ALREADY_EXISTS errors */
1058 static int __init acpi_serialize_setup(char *str)
1059 {
1060         printk(KERN_INFO PREFIX "serialize enabled\n");
1061
1062         acpi_gbl_all_methods_serialized = TRUE;
1063
1064         return 1;
1065 }
1066
1067 __setup("acpi_serialize", acpi_serialize_setup);
1068
1069 /*
1070  * Wake and Run-Time GPES are expected to be separate.
1071  * We disable wake-GPEs at run-time to prevent spurious
1072  * interrupts.
1073  *
1074  * However, if a system exists that shares Wake and
1075  * Run-time events on the same GPE this flag is available
1076  * to tell Linux to keep the wake-time GPEs enabled at run-time.
1077  */
1078 static int __init acpi_wake_gpes_always_on_setup(char *str)
1079 {
1080         printk(KERN_INFO PREFIX "wake GPEs not disabled\n");
1081
1082         acpi_gbl_leave_wake_gpes_disabled = FALSE;
1083
1084         return 1;
1085 }
1086
1087 __setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
1088
1089 /* Check of resource interference between native drivers and ACPI
1090  * OperationRegions (SystemIO and System Memory only).
1091  * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1092  * in arbitrary AML code and can interfere with legacy drivers.
1093  * acpi_enforce_resources= can be set to:
1094  *
1095  *   - strict (default) (2)
1096  *     -> further driver trying to access the resources will not load
1097  *   - lax              (1)
1098  *     -> further driver trying to access the resources will load, but you
1099  *     get a system message that something might go wrong...
1100  *
1101  *   - no               (0)
1102  *     -> ACPI Operation Region resources will not be registered
1103  *
1104  */
1105 #define ENFORCE_RESOURCES_STRICT 2
1106 #define ENFORCE_RESOURCES_LAX    1
1107 #define ENFORCE_RESOURCES_NO     0
1108
1109 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1110
1111 static int __init acpi_enforce_resources_setup(char *str)
1112 {
1113         if (str == NULL || *str == '\0')
1114                 return 0;
1115
1116         if (!strcmp("strict", str))
1117                 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1118         else if (!strcmp("lax", str))
1119                 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1120         else if (!strcmp("no", str))
1121                 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1122
1123         return 1;
1124 }
1125
1126 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1127
1128 /* Check for resource conflicts between ACPI OperationRegions and native
1129  * drivers */
1130 int acpi_check_resource_conflict(struct resource *res)
1131 {
1132         struct acpi_res_list *res_list_elem;
1133         int ioport;
1134         int clash = 0;
1135
1136         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1137                 return 0;
1138         if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1139                 return 0;
1140
1141         ioport = res->flags & IORESOURCE_IO;
1142
1143         spin_lock(&acpi_res_lock);
1144         list_for_each_entry(res_list_elem, &resource_list_head,
1145                             resource_list) {
1146                 if (ioport && (res_list_elem->resource_type
1147                                != ACPI_ADR_SPACE_SYSTEM_IO))
1148                         continue;
1149                 if (!ioport && (res_list_elem->resource_type
1150                                 != ACPI_ADR_SPACE_SYSTEM_MEMORY))
1151                         continue;
1152
1153                 if (res->end < res_list_elem->start
1154                     || res_list_elem->end < res->start)
1155                         continue;
1156                 clash = 1;
1157                 break;
1158         }
1159         spin_unlock(&acpi_res_lock);
1160
1161         if (clash) {
1162                 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1163                         printk("%sACPI: %s resource %s [0x%llx-0x%llx]"
1164                                " conflicts with ACPI region %s"
1165                                " [0x%llx-0x%llx]\n",
1166                                acpi_enforce_resources == ENFORCE_RESOURCES_LAX
1167                                ? KERN_WARNING : KERN_ERR,
1168                                ioport ? "I/O" : "Memory", res->name,
1169                                (long long) res->start, (long long) res->end,
1170                                res_list_elem->name,
1171                                (long long) res_list_elem->start,
1172                                (long long) res_list_elem->end);
1173                         printk(KERN_INFO "ACPI: Device needs an ACPI driver\n");
1174                 }
1175                 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1176                         return -EBUSY;
1177         }
1178         return 0;
1179 }
1180 EXPORT_SYMBOL(acpi_check_resource_conflict);
1181
1182 int acpi_check_region(resource_size_t start, resource_size_t n,
1183                       const char *name)
1184 {
1185         struct resource res = {
1186                 .start = start,
1187                 .end   = start + n - 1,
1188                 .name  = name,
1189                 .flags = IORESOURCE_IO,
1190         };
1191
1192         return acpi_check_resource_conflict(&res);
1193 }
1194 EXPORT_SYMBOL(acpi_check_region);
1195
1196 int acpi_check_mem_region(resource_size_t start, resource_size_t n,
1197                       const char *name)
1198 {
1199         struct resource res = {
1200                 .start = start,
1201                 .end   = start + n - 1,
1202                 .name  = name,
1203                 .flags = IORESOURCE_MEM,
1204         };
1205
1206         return acpi_check_resource_conflict(&res);
1207
1208 }
1209 EXPORT_SYMBOL(acpi_check_mem_region);
1210
1211 /*
1212  * Acquire a spinlock.
1213  *
1214  * handle is a pointer to the spinlock_t.
1215  */
1216
1217 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1218 {
1219         acpi_cpu_flags flags;
1220         spin_lock_irqsave(lockp, flags);
1221         return flags;
1222 }
1223
1224 /*
1225  * Release a spinlock. See above.
1226  */
1227
1228 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1229 {
1230         spin_unlock_irqrestore(lockp, flags);
1231 }
1232
1233 #ifndef ACPI_USE_LOCAL_CACHE
1234
1235 /*******************************************************************************
1236  *
1237  * FUNCTION:    acpi_os_create_cache
1238  *
1239  * PARAMETERS:  name      - Ascii name for the cache
1240  *              size      - Size of each cached object
1241  *              depth     - Maximum depth of the cache (in objects) <ignored>
1242  *              cache     - Where the new cache object is returned
1243  *
1244  * RETURN:      status
1245  *
1246  * DESCRIPTION: Create a cache object
1247  *
1248  ******************************************************************************/
1249
1250 acpi_status
1251 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1252 {
1253         *cache = kmem_cache_create(name, size, 0, 0, NULL);
1254         if (*cache == NULL)
1255                 return AE_ERROR;
1256         else
1257                 return AE_OK;
1258 }
1259
1260 /*******************************************************************************
1261  *
1262  * FUNCTION:    acpi_os_purge_cache
1263  *
1264  * PARAMETERS:  Cache           - Handle to cache object
1265  *
1266  * RETURN:      Status
1267  *
1268  * DESCRIPTION: Free all objects within the requested cache.
1269  *
1270  ******************************************************************************/
1271
1272 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1273 {
1274         kmem_cache_shrink(cache);
1275         return (AE_OK);
1276 }
1277
1278 /*******************************************************************************
1279  *
1280  * FUNCTION:    acpi_os_delete_cache
1281  *
1282  * PARAMETERS:  Cache           - Handle to cache object
1283  *
1284  * RETURN:      Status
1285  *
1286  * DESCRIPTION: Free all objects within the requested cache and delete the
1287  *              cache object.
1288  *
1289  ******************************************************************************/
1290
1291 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1292 {
1293         kmem_cache_destroy(cache);
1294         return (AE_OK);
1295 }
1296
1297 /*******************************************************************************
1298  *
1299  * FUNCTION:    acpi_os_release_object
1300  *
1301  * PARAMETERS:  Cache       - Handle to cache object
1302  *              Object      - The object to be released
1303  *
1304  * RETURN:      None
1305  *
1306  * DESCRIPTION: Release an object to the specified cache.  If cache is full,
1307  *              the object is deleted.
1308  *
1309  ******************************************************************************/
1310
1311 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1312 {
1313         kmem_cache_free(cache, object);
1314         return (AE_OK);
1315 }
1316
1317 /******************************************************************************
1318  *
1319  * FUNCTION:    acpi_os_validate_interface
1320  *
1321  * PARAMETERS:  interface           - Requested interface to be validated
1322  *
1323  * RETURN:      AE_OK if interface is supported, AE_SUPPORT otherwise
1324  *
1325  * DESCRIPTION: Match an interface string to the interfaces supported by the
1326  *              host. Strings originate from an AML call to the _OSI method.
1327  *
1328  *****************************************************************************/
1329
1330 acpi_status
1331 acpi_os_validate_interface (char *interface)
1332 {
1333         if (!strncmp(osi_additional_string, interface, OSI_STRING_LENGTH_MAX))
1334                 return AE_OK;
1335         if (!strcmp("Linux", interface)) {
1336
1337                 printk(KERN_NOTICE PREFIX
1338                         "BIOS _OSI(Linux) query %s%s\n",
1339                         osi_linux.enable ? "honored" : "ignored",
1340                         osi_linux.cmdline ? " via cmdline" :
1341                         osi_linux.dmi ? " via DMI" : "");
1342
1343                 if (osi_linux.enable)
1344                         return AE_OK;
1345         }
1346         return AE_SUPPORT;
1347 }
1348
1349 static inline int acpi_res_list_add(struct acpi_res_list *res)
1350 {
1351         struct acpi_res_list *res_list_elem;
1352
1353         list_for_each_entry(res_list_elem, &resource_list_head,
1354                             resource_list) {
1355
1356                 if (res->resource_type == res_list_elem->resource_type &&
1357                     res->start == res_list_elem->start &&
1358                     res->end == res_list_elem->end) {
1359
1360                         /*
1361                          * The Region(addr,len) already exist in the list,
1362                          * just increase the count
1363                          */
1364
1365                         res_list_elem->count++;
1366                         return 0;
1367                 }
1368         }
1369
1370         res->count = 1;
1371         list_add(&res->resource_list, &resource_list_head);
1372         return 1;
1373 }
1374
1375 static inline void acpi_res_list_del(struct acpi_res_list *res)
1376 {
1377         struct acpi_res_list *res_list_elem;
1378
1379         list_for_each_entry(res_list_elem, &resource_list_head,
1380                             resource_list) {
1381
1382                 if (res->resource_type == res_list_elem->resource_type &&
1383                     res->start == res_list_elem->start &&
1384                     res->end == res_list_elem->end) {
1385
1386                         /*
1387                          * If the res count is decreased to 0,
1388                          * remove and free it
1389                          */
1390
1391                         if (--res_list_elem->count == 0) {
1392                                 list_del(&res_list_elem->resource_list);
1393                                 kfree(res_list_elem);
1394                         }
1395                         return;
1396                 }
1397         }
1398 }
1399
1400 acpi_status
1401 acpi_os_invalidate_address(
1402     u8                   space_id,
1403     acpi_physical_address   address,
1404     acpi_size               length)
1405 {
1406         struct acpi_res_list res;
1407
1408         switch (space_id) {
1409         case ACPI_ADR_SPACE_SYSTEM_IO:
1410         case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1411                 /* Only interference checks against SystemIO and SytemMemory
1412                    are needed */
1413                 res.start = address;
1414                 res.end = address + length - 1;
1415                 res.resource_type = space_id;
1416                 spin_lock(&acpi_res_lock);
1417                 acpi_res_list_del(&res);
1418                 spin_unlock(&acpi_res_lock);
1419                 break;
1420         case ACPI_ADR_SPACE_PCI_CONFIG:
1421         case ACPI_ADR_SPACE_EC:
1422         case ACPI_ADR_SPACE_SMBUS:
1423         case ACPI_ADR_SPACE_CMOS:
1424         case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1425         case ACPI_ADR_SPACE_DATA_TABLE:
1426         case ACPI_ADR_SPACE_FIXED_HARDWARE:
1427                 break;
1428         }
1429         return AE_OK;
1430 }
1431
1432 /******************************************************************************
1433  *
1434  * FUNCTION:    acpi_os_validate_address
1435  *
1436  * PARAMETERS:  space_id             - ACPI space ID
1437  *              address             - Physical address
1438  *              length              - Address length
1439  *
1440  * RETURN:      AE_OK if address/length is valid for the space_id. Otherwise,
1441  *              should return AE_AML_ILLEGAL_ADDRESS.
1442  *
1443  * DESCRIPTION: Validate a system address via the host OS. Used to validate
1444  *              the addresses accessed by AML operation regions.
1445  *
1446  *****************************************************************************/
1447
1448 acpi_status
1449 acpi_os_validate_address (
1450     u8                   space_id,
1451     acpi_physical_address   address,
1452     acpi_size               length,
1453     char *name)
1454 {
1455         struct acpi_res_list *res;
1456         int added;
1457         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1458                 return AE_OK;
1459
1460         switch (space_id) {
1461         case ACPI_ADR_SPACE_SYSTEM_IO:
1462         case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1463                 /* Only interference checks against SystemIO and SytemMemory
1464                    are needed */
1465                 res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
1466                 if (!res)
1467                         return AE_OK;
1468                 /* ACPI names are fixed to 4 bytes, still better use strlcpy */
1469                 strlcpy(res->name, name, 5);
1470                 res->start = address;
1471                 res->end = address + length - 1;
1472                 res->resource_type = space_id;
1473                 spin_lock(&acpi_res_lock);
1474                 added = acpi_res_list_add(res);
1475                 spin_unlock(&acpi_res_lock);
1476                 pr_debug("%s %s resource: start: 0x%llx, end: 0x%llx, "
1477                          "name: %s\n", added ? "Added" : "Already exist",
1478                          (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
1479                          ? "SystemIO" : "System Memory",
1480                          (unsigned long long)res->start,
1481                          (unsigned long long)res->end,
1482                          res->name);
1483                 if (!added)
1484                         kfree(res);
1485                 break;
1486         case ACPI_ADR_SPACE_PCI_CONFIG:
1487         case ACPI_ADR_SPACE_EC:
1488         case ACPI_ADR_SPACE_SMBUS:
1489         case ACPI_ADR_SPACE_CMOS:
1490         case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1491         case ACPI_ADR_SPACE_DATA_TABLE:
1492         case ACPI_ADR_SPACE_FIXED_HARDWARE:
1493                 break;
1494         }
1495         return AE_OK;
1496 }
1497
1498 #endif