]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/acpi/osl.c
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mv-sheeva.git] / drivers / acpi / osl.c
1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *  Copyright (c) 2008 Intel Corporation
8  *   Author: Matthew Wilcox <willy@linux.intel.com>
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or
15  *  (at your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; if not, write to the Free Software
24  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  *
28  */
29
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/mm.h>
34 #include <linux/pci.h>
35 #include <linux/interrupt.h>
36 #include <linux/kmod.h>
37 #include <linux/delay.h>
38 #include <linux/workqueue.h>
39 #include <linux/nmi.h>
40 #include <linux/acpi.h>
41 #include <linux/efi.h>
42 #include <linux/ioport.h>
43 #include <linux/list.h>
44 #include <linux/jiffies.h>
45 #include <linux/semaphore.h>
46
47 #include <asm/io.h>
48 #include <asm/uaccess.h>
49
50 #include <acpi/acpi.h>
51 #include <acpi/acpi_bus.h>
52 #include <acpi/processor.h>
53
54 #define _COMPONENT              ACPI_OS_SERVICES
55 ACPI_MODULE_NAME("osl");
56 #define PREFIX          "ACPI: "
57 struct acpi_os_dpc {
58         acpi_osd_exec_callback function;
59         void *context;
60         struct work_struct work;
61         int wait;
62 };
63
64 #ifdef CONFIG_ACPI_CUSTOM_DSDT
65 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
66 #endif
67
68 #ifdef ENABLE_DEBUGGER
69 #include <linux/kdb.h>
70
71 /* stuff for debugger support */
72 int acpi_in_debugger;
73 EXPORT_SYMBOL(acpi_in_debugger);
74
75 extern char line_buf[80];
76 #endif                          /*ENABLE_DEBUGGER */
77
78 static unsigned int acpi_irq_irq;
79 static acpi_osd_handler acpi_irq_handler;
80 static void *acpi_irq_context;
81 static struct workqueue_struct *kacpid_wq;
82 static struct workqueue_struct *kacpi_notify_wq;
83 static struct workqueue_struct *kacpi_hotplug_wq;
84
85 struct acpi_res_list {
86         resource_size_t start;
87         resource_size_t end;
88         acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
89         char name[5];   /* only can have a length of 4 chars, make use of this
90                            one instead of res->name, no need to kalloc then */
91         struct list_head resource_list;
92         int count;
93 };
94
95 static LIST_HEAD(resource_list_head);
96 static DEFINE_SPINLOCK(acpi_res_lock);
97
98 #define OSI_STRING_LENGTH_MAX 64        /* arbitrary */
99 static char osi_additional_string[OSI_STRING_LENGTH_MAX];
100
101 /*
102  * The story of _OSI(Linux)
103  *
104  * From pre-history through Linux-2.6.22,
105  * Linux responded TRUE upon a BIOS OSI(Linux) query.
106  *
107  * Unfortunately, reference BIOS writers got wind of this
108  * and put OSI(Linux) in their example code, quickly exposing
109  * this string as ill-conceived and opening the door to
110  * an un-bounded number of BIOS incompatibilities.
111  *
112  * For example, OSI(Linux) was used on resume to re-POST a
113  * video card on one system, because Linux at that time
114  * could not do a speedy restore in its native driver.
115  * But then upon gaining quick native restore capability,
116  * Linux has no way to tell the BIOS to skip the time-consuming
117  * POST -- putting Linux at a permanent performance disadvantage.
118  * On another system, the BIOS writer used OSI(Linux)
119  * to infer native OS support for IPMI!  On other systems,
120  * OSI(Linux) simply got in the way of Linux claiming to
121  * be compatible with other operating systems, exposing
122  * BIOS issues such as skipped device initialization.
123  *
124  * So "Linux" turned out to be a really poor chose of
125  * OSI string, and from Linux-2.6.23 onward we respond FALSE.
126  *
127  * BIOS writers should NOT query _OSI(Linux) on future systems.
128  * Linux will complain on the console when it sees it, and return FALSE.
129  * To get Linux to return TRUE for your system  will require
130  * a kernel source update to add a DMI entry,
131  * or boot with "acpi_osi=Linux"
132  */
133
134 static struct osi_linux {
135         unsigned int    enable:1;
136         unsigned int    dmi:1;
137         unsigned int    cmdline:1;
138         unsigned int    known:1;
139 } osi_linux = { 0, 0, 0, 0};
140
141 static void __init acpi_request_region (struct acpi_generic_address *addr,
142         unsigned int length, char *desc)
143 {
144         struct resource *res;
145
146         if (!addr->address || !length)
147                 return;
148
149         if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
150                 res = request_region(addr->address, length, desc);
151         else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
152                 res = request_mem_region(addr->address, length, desc);
153 }
154
155 static int __init acpi_reserve_resources(void)
156 {
157         acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
158                 "ACPI PM1a_EVT_BLK");
159
160         acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
161                 "ACPI PM1b_EVT_BLK");
162
163         acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
164                 "ACPI PM1a_CNT_BLK");
165
166         acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
167                 "ACPI PM1b_CNT_BLK");
168
169         if (acpi_gbl_FADT.pm_timer_length == 4)
170                 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
171
172         acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
173                 "ACPI PM2_CNT_BLK");
174
175         /* Length of GPE blocks must be a non-negative multiple of 2 */
176
177         if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
178                 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
179                                acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
180
181         if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
182                 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
183                                acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
184
185         return 0;
186 }
187 device_initcall(acpi_reserve_resources);
188
189 acpi_status __init acpi_os_initialize(void)
190 {
191         return AE_OK;
192 }
193
194 static void bind_to_cpu0(struct work_struct *work)
195 {
196         set_cpus_allowed_ptr(current, cpumask_of(0));
197         kfree(work);
198 }
199
200 static void bind_workqueue(struct workqueue_struct *wq)
201 {
202         struct work_struct *work;
203
204         work = kzalloc(sizeof(struct work_struct), GFP_KERNEL);
205         INIT_WORK(work, bind_to_cpu0);
206         queue_work(wq, work);
207 }
208
209 acpi_status acpi_os_initialize1(void)
210 {
211         /*
212          * On some machines, a software-initiated SMI causes corruption unless
213          * the SMI runs on CPU 0.  An SMI can be initiated by any AML, but
214          * typically it's done in GPE-related methods that are run via
215          * workqueues, so we can avoid the known corruption cases by binding
216          * the workqueues to CPU 0.
217          */
218         kacpid_wq = create_singlethread_workqueue("kacpid");
219         bind_workqueue(kacpid_wq);
220         kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
221         bind_workqueue(kacpi_notify_wq);
222         kacpi_hotplug_wq = create_singlethread_workqueue("kacpi_hotplug");
223         bind_workqueue(kacpi_hotplug_wq);
224         BUG_ON(!kacpid_wq);
225         BUG_ON(!kacpi_notify_wq);
226         BUG_ON(!kacpi_hotplug_wq);
227         return AE_OK;
228 }
229
230 acpi_status acpi_os_terminate(void)
231 {
232         if (acpi_irq_handler) {
233                 acpi_os_remove_interrupt_handler(acpi_irq_irq,
234                                                  acpi_irq_handler);
235         }
236
237         destroy_workqueue(kacpid_wq);
238         destroy_workqueue(kacpi_notify_wq);
239         destroy_workqueue(kacpi_hotplug_wq);
240
241         return AE_OK;
242 }
243
244 void acpi_os_printf(const char *fmt, ...)
245 {
246         va_list args;
247         va_start(args, fmt);
248         acpi_os_vprintf(fmt, args);
249         va_end(args);
250 }
251
252 void acpi_os_vprintf(const char *fmt, va_list args)
253 {
254         static char buffer[512];
255
256         vsprintf(buffer, fmt, args);
257
258 #ifdef ENABLE_DEBUGGER
259         if (acpi_in_debugger) {
260                 kdb_printf("%s", buffer);
261         } else {
262                 printk(KERN_CONT "%s", buffer);
263         }
264 #else
265         printk(KERN_CONT "%s", buffer);
266 #endif
267 }
268
269 acpi_physical_address __init acpi_os_get_root_pointer(void)
270 {
271         if (efi_enabled) {
272                 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
273                         return efi.acpi20;
274                 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
275                         return efi.acpi;
276                 else {
277                         printk(KERN_ERR PREFIX
278                                "System description tables not found\n");
279                         return 0;
280                 }
281         } else {
282                 acpi_physical_address pa = 0;
283
284                 acpi_find_root_pointer(&pa);
285                 return pa;
286         }
287 }
288
289 void __iomem *__init_refok
290 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
291 {
292         if (phys > ULONG_MAX) {
293                 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
294                 return NULL;
295         }
296         if (acpi_gbl_permanent_mmap)
297                 /*
298                 * ioremap checks to ensure this is in reserved space
299                 */
300                 return ioremap((unsigned long)phys, size);
301         else
302                 return __acpi_map_table((unsigned long)phys, size);
303 }
304 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
305
306 void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
307 {
308         if (acpi_gbl_permanent_mmap)
309                 iounmap(virt);
310         else
311                 __acpi_unmap_table(virt, size);
312 }
313 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
314
315 void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
316 {
317         if (!acpi_gbl_permanent_mmap)
318                 __acpi_unmap_table(virt, size);
319 }
320
321 #ifdef ACPI_FUTURE_USAGE
322 acpi_status
323 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
324 {
325         if (!phys || !virt)
326                 return AE_BAD_PARAMETER;
327
328         *phys = virt_to_phys(virt);
329
330         return AE_OK;
331 }
332 #endif
333
334 #define ACPI_MAX_OVERRIDE_LEN 100
335
336 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
337
338 acpi_status
339 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
340                             acpi_string * new_val)
341 {
342         if (!init_val || !new_val)
343                 return AE_BAD_PARAMETER;
344
345         *new_val = NULL;
346         if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
347                 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
348                        acpi_os_name);
349                 *new_val = acpi_os_name;
350         }
351
352         return AE_OK;
353 }
354
355 acpi_status
356 acpi_os_table_override(struct acpi_table_header * existing_table,
357                        struct acpi_table_header ** new_table)
358 {
359         if (!existing_table || !new_table)
360                 return AE_BAD_PARAMETER;
361
362         *new_table = NULL;
363
364 #ifdef CONFIG_ACPI_CUSTOM_DSDT
365         if (strncmp(existing_table->signature, "DSDT", 4) == 0)
366                 *new_table = (struct acpi_table_header *)AmlCode;
367 #endif
368         if (*new_table != NULL) {
369                 printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], "
370                            "this is unsafe: tainting kernel\n",
371                        existing_table->signature,
372                        existing_table->oem_table_id);
373                 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
374         }
375         return AE_OK;
376 }
377
378 static irqreturn_t acpi_irq(int irq, void *dev_id)
379 {
380         u32 handled;
381
382         handled = (*acpi_irq_handler) (acpi_irq_context);
383
384         if (handled) {
385                 acpi_irq_handled++;
386                 return IRQ_HANDLED;
387         } else {
388                 acpi_irq_not_handled++;
389                 return IRQ_NONE;
390         }
391 }
392
393 acpi_status
394 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
395                                   void *context)
396 {
397         unsigned int irq;
398
399         acpi_irq_stats_init();
400
401         /*
402          * Ignore the GSI from the core, and use the value in our copy of the
403          * FADT. It may not be the same if an interrupt source override exists
404          * for the SCI.
405          */
406         gsi = acpi_gbl_FADT.sci_interrupt;
407         if (acpi_gsi_to_irq(gsi, &irq) < 0) {
408                 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
409                        gsi);
410                 return AE_OK;
411         }
412
413         acpi_irq_handler = handler;
414         acpi_irq_context = context;
415         if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
416                 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
417                 return AE_NOT_ACQUIRED;
418         }
419         acpi_irq_irq = irq;
420
421         return AE_OK;
422 }
423
424 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
425 {
426         if (irq) {
427                 free_irq(irq, acpi_irq);
428                 acpi_irq_handler = NULL;
429                 acpi_irq_irq = 0;
430         }
431
432         return AE_OK;
433 }
434
435 /*
436  * Running in interpreter thread context, safe to sleep
437  */
438
439 void acpi_os_sleep(u64 ms)
440 {
441         schedule_timeout_interruptible(msecs_to_jiffies(ms));
442 }
443
444 void acpi_os_stall(u32 us)
445 {
446         while (us) {
447                 u32 delay = 1000;
448
449                 if (delay > us)
450                         delay = us;
451                 udelay(delay);
452                 touch_nmi_watchdog();
453                 us -= delay;
454         }
455 }
456
457 /*
458  * Support ACPI 3.0 AML Timer operand
459  * Returns 64-bit free-running, monotonically increasing timer
460  * with 100ns granularity
461  */
462 u64 acpi_os_get_timer(void)
463 {
464         static u64 t;
465
466 #ifdef  CONFIG_HPET
467         /* TBD: use HPET if available */
468 #endif
469
470 #ifdef  CONFIG_X86_PM_TIMER
471         /* TBD: default to PM timer if HPET was not available */
472 #endif
473         if (!t)
474                 printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n");
475
476         return ++t;
477 }
478
479 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
480 {
481         u32 dummy;
482
483         if (!value)
484                 value = &dummy;
485
486         *value = 0;
487         if (width <= 8) {
488                 *(u8 *) value = inb(port);
489         } else if (width <= 16) {
490                 *(u16 *) value = inw(port);
491         } else if (width <= 32) {
492                 *(u32 *) value = inl(port);
493         } else {
494                 BUG();
495         }
496
497         return AE_OK;
498 }
499
500 EXPORT_SYMBOL(acpi_os_read_port);
501
502 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
503 {
504         if (width <= 8) {
505                 outb(value, port);
506         } else if (width <= 16) {
507                 outw(value, port);
508         } else if (width <= 32) {
509                 outl(value, port);
510         } else {
511                 BUG();
512         }
513
514         return AE_OK;
515 }
516
517 EXPORT_SYMBOL(acpi_os_write_port);
518
519 acpi_status
520 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
521 {
522         u32 dummy;
523         void __iomem *virt_addr;
524
525         virt_addr = ioremap(phys_addr, width);
526         if (!value)
527                 value = &dummy;
528
529         switch (width) {
530         case 8:
531                 *(u8 *) value = readb(virt_addr);
532                 break;
533         case 16:
534                 *(u16 *) value = readw(virt_addr);
535                 break;
536         case 32:
537                 *(u32 *) value = readl(virt_addr);
538                 break;
539         default:
540                 BUG();
541         }
542
543         iounmap(virt_addr);
544
545         return AE_OK;
546 }
547
548 acpi_status
549 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
550 {
551         void __iomem *virt_addr;
552
553         virt_addr = ioremap(phys_addr, width);
554
555         switch (width) {
556         case 8:
557                 writeb(value, virt_addr);
558                 break;
559         case 16:
560                 writew(value, virt_addr);
561                 break;
562         case 32:
563                 writel(value, virt_addr);
564                 break;
565         default:
566                 BUG();
567         }
568
569         iounmap(virt_addr);
570
571         return AE_OK;
572 }
573
574 acpi_status
575 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
576                                u32 *value, u32 width)
577 {
578         int result, size;
579
580         if (!value)
581                 return AE_BAD_PARAMETER;
582
583         switch (width) {
584         case 8:
585                 size = 1;
586                 break;
587         case 16:
588                 size = 2;
589                 break;
590         case 32:
591                 size = 4;
592                 break;
593         default:
594                 return AE_ERROR;
595         }
596
597         result = raw_pci_read(pci_id->segment, pci_id->bus,
598                                 PCI_DEVFN(pci_id->device, pci_id->function),
599                                 reg, size, value);
600
601         return (result ? AE_ERROR : AE_OK);
602 }
603
604 acpi_status
605 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
606                                 u64 value, u32 width)
607 {
608         int result, size;
609
610         switch (width) {
611         case 8:
612                 size = 1;
613                 break;
614         case 16:
615                 size = 2;
616                 break;
617         case 32:
618                 size = 4;
619                 break;
620         default:
621                 return AE_ERROR;
622         }
623
624         result = raw_pci_write(pci_id->segment, pci_id->bus,
625                                 PCI_DEVFN(pci_id->device, pci_id->function),
626                                 reg, size, value);
627
628         return (result ? AE_ERROR : AE_OK);
629 }
630
631 /* TODO: Change code to take advantage of driver model more */
632 static void acpi_os_derive_pci_id_2(acpi_handle rhandle,        /* upper bound  */
633                                     acpi_handle chandle,        /* current node */
634                                     struct acpi_pci_id **id,
635                                     int *is_bridge, u8 * bus_number)
636 {
637         acpi_handle handle;
638         struct acpi_pci_id *pci_id = *id;
639         acpi_status status;
640         unsigned long long temp;
641         acpi_object_type type;
642
643         acpi_get_parent(chandle, &handle);
644         if (handle != rhandle) {
645                 acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge,
646                                         bus_number);
647
648                 status = acpi_get_type(handle, &type);
649                 if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE))
650                         return;
651
652                 status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
653                                           &temp);
654                 if (ACPI_SUCCESS(status)) {
655                         u32 val;
656                         pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp));
657                         pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp));
658
659                         if (*is_bridge)
660                                 pci_id->bus = *bus_number;
661
662                         /* any nicer way to get bus number of bridge ? */
663                         status =
664                             acpi_os_read_pci_configuration(pci_id, 0x0e, &val,
665                                                            8);
666                         if (ACPI_SUCCESS(status)
667                             && ((val & 0x7f) == 1 || (val & 0x7f) == 2)) {
668                                 status =
669                                     acpi_os_read_pci_configuration(pci_id, 0x18,
670                                                                    &val, 8);
671                                 if (!ACPI_SUCCESS(status)) {
672                                         /* Certainly broken...  FIX ME */
673                                         return;
674                                 }
675                                 *is_bridge = 1;
676                                 pci_id->bus = val;
677                                 status =
678                                     acpi_os_read_pci_configuration(pci_id, 0x19,
679                                                                    &val, 8);
680                                 if (ACPI_SUCCESS(status)) {
681                                         *bus_number = val;
682                                 }
683                         } else
684                                 *is_bridge = 0;
685                 }
686         }
687 }
688
689 void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound  */
690                            acpi_handle chandle, /* current node */
691                            struct acpi_pci_id **id)
692 {
693         int is_bridge = 1;
694         u8 bus_number = (*id)->bus;
695
696         acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
697 }
698
699 static void acpi_os_execute_deferred(struct work_struct *work)
700 {
701         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
702
703         if (dpc->wait)
704                 acpi_os_wait_events_complete(NULL);
705
706         dpc->function(dpc->context);
707         kfree(dpc);
708 }
709
710 /*******************************************************************************
711  *
712  * FUNCTION:    acpi_os_execute
713  *
714  * PARAMETERS:  Type               - Type of the callback
715  *              Function           - Function to be executed
716  *              Context            - Function parameters
717  *
718  * RETURN:      Status
719  *
720  * DESCRIPTION: Depending on type, either queues function for deferred execution or
721  *              immediately executes function on a separate thread.
722  *
723  ******************************************************************************/
724
725 static acpi_status __acpi_os_execute(acpi_execute_type type,
726         acpi_osd_exec_callback function, void *context, int hp)
727 {
728         acpi_status status = AE_OK;
729         struct acpi_os_dpc *dpc;
730         struct workqueue_struct *queue;
731         int ret;
732         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
733                           "Scheduling function [%p(%p)] for deferred execution.\n",
734                           function, context));
735
736         /*
737          * Allocate/initialize DPC structure.  Note that this memory will be
738          * freed by the callee.  The kernel handles the work_struct list  in a
739          * way that allows us to also free its memory inside the callee.
740          * Because we may want to schedule several tasks with different
741          * parameters we can't use the approach some kernel code uses of
742          * having a static work_struct.
743          */
744
745         dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
746         if (!dpc)
747                 return AE_NO_MEMORY;
748
749         dpc->function = function;
750         dpc->context = context;
751
752         /*
753          * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq
754          * because the hotplug code may call driver .remove() functions,
755          * which invoke flush_scheduled_work/acpi_os_wait_events_complete
756          * to flush these workqueues.
757          */
758         queue = hp ? kacpi_hotplug_wq :
759                 (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq);
760         dpc->wait = hp ? 1 : 0;
761
762         if (queue == kacpi_hotplug_wq)
763                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
764         else if (queue == kacpi_notify_wq)
765                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
766         else
767                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
768
769         ret = queue_work(queue, &dpc->work);
770
771         if (!ret) {
772                 printk(KERN_ERR PREFIX
773                           "Call to queue_work() failed.\n");
774                 status = AE_ERROR;
775                 kfree(dpc);
776         }
777         return status;
778 }
779
780 acpi_status acpi_os_execute(acpi_execute_type type,
781                             acpi_osd_exec_callback function, void *context)
782 {
783         return __acpi_os_execute(type, function, context, 0);
784 }
785 EXPORT_SYMBOL(acpi_os_execute);
786
787 acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
788         void *context)
789 {
790         return __acpi_os_execute(0, function, context, 1);
791 }
792
793 void acpi_os_wait_events_complete(void *context)
794 {
795         flush_workqueue(kacpid_wq);
796         flush_workqueue(kacpi_notify_wq);
797 }
798
799 EXPORT_SYMBOL(acpi_os_wait_events_complete);
800
801 /*
802  * Allocate the memory for a spinlock and initialize it.
803  */
804 acpi_status acpi_os_create_lock(acpi_spinlock * handle)
805 {
806         spin_lock_init(*handle);
807
808         return AE_OK;
809 }
810
811 /*
812  * Deallocate the memory for a spinlock.
813  */
814 void acpi_os_delete_lock(acpi_spinlock handle)
815 {
816         return;
817 }
818
819 acpi_status
820 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
821 {
822         struct semaphore *sem = NULL;
823
824         sem = acpi_os_allocate(sizeof(struct semaphore));
825         if (!sem)
826                 return AE_NO_MEMORY;
827         memset(sem, 0, sizeof(struct semaphore));
828
829         sema_init(sem, initial_units);
830
831         *handle = (acpi_handle *) sem;
832
833         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
834                           *handle, initial_units));
835
836         return AE_OK;
837 }
838
839 /*
840  * TODO: A better way to delete semaphores?  Linux doesn't have a
841  * 'delete_semaphore()' function -- may result in an invalid
842  * pointer dereference for non-synchronized consumers.  Should
843  * we at least check for blocked threads and signal/cancel them?
844  */
845
846 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
847 {
848         struct semaphore *sem = (struct semaphore *)handle;
849
850         if (!sem)
851                 return AE_BAD_PARAMETER;
852
853         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
854
855         BUG_ON(!list_empty(&sem->wait_list));
856         kfree(sem);
857         sem = NULL;
858
859         return AE_OK;
860 }
861
862 /*
863  * TODO: Support for units > 1?
864  */
865 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
866 {
867         acpi_status status = AE_OK;
868         struct semaphore *sem = (struct semaphore *)handle;
869         long jiffies;
870         int ret = 0;
871
872         if (!sem || (units < 1))
873                 return AE_BAD_PARAMETER;
874
875         if (units > 1)
876                 return AE_SUPPORT;
877
878         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
879                           handle, units, timeout));
880
881         if (timeout == ACPI_WAIT_FOREVER)
882                 jiffies = MAX_SCHEDULE_TIMEOUT;
883         else
884                 jiffies = msecs_to_jiffies(timeout);
885         
886         ret = down_timeout(sem, jiffies);
887         if (ret)
888                 status = AE_TIME;
889
890         if (ACPI_FAILURE(status)) {
891                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
892                                   "Failed to acquire semaphore[%p|%d|%d], %s",
893                                   handle, units, timeout,
894                                   acpi_format_exception(status)));
895         } else {
896                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
897                                   "Acquired semaphore[%p|%d|%d]", handle,
898                                   units, timeout));
899         }
900
901         return status;
902 }
903
904 /*
905  * TODO: Support for units > 1?
906  */
907 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
908 {
909         struct semaphore *sem = (struct semaphore *)handle;
910
911         if (!sem || (units < 1))
912                 return AE_BAD_PARAMETER;
913
914         if (units > 1)
915                 return AE_SUPPORT;
916
917         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
918                           units));
919
920         up(sem);
921
922         return AE_OK;
923 }
924
925 #ifdef ACPI_FUTURE_USAGE
926 u32 acpi_os_get_line(char *buffer)
927 {
928
929 #ifdef ENABLE_DEBUGGER
930         if (acpi_in_debugger) {
931                 u32 chars;
932
933                 kdb_read(buffer, sizeof(line_buf));
934
935                 /* remove the CR kdb includes */
936                 chars = strlen(buffer) - 1;
937                 buffer[chars] = '\0';
938         }
939 #endif
940
941         return 0;
942 }
943 #endif                          /*  ACPI_FUTURE_USAGE  */
944
945 acpi_status acpi_os_signal(u32 function, void *info)
946 {
947         switch (function) {
948         case ACPI_SIGNAL_FATAL:
949                 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
950                 break;
951         case ACPI_SIGNAL_BREAKPOINT:
952                 /*
953                  * AML Breakpoint
954                  * ACPI spec. says to treat it as a NOP unless
955                  * you are debugging.  So if/when we integrate
956                  * AML debugger into the kernel debugger its
957                  * hook will go here.  But until then it is
958                  * not useful to print anything on breakpoints.
959                  */
960                 break;
961         default:
962                 break;
963         }
964
965         return AE_OK;
966 }
967
968 static int __init acpi_os_name_setup(char *str)
969 {
970         char *p = acpi_os_name;
971         int count = ACPI_MAX_OVERRIDE_LEN - 1;
972
973         if (!str || !*str)
974                 return 0;
975
976         for (; count-- && str && *str; str++) {
977                 if (isalnum(*str) || *str == ' ' || *str == ':')
978                         *p++ = *str;
979                 else if (*str == '\'' || *str == '"')
980                         continue;
981                 else
982                         break;
983         }
984         *p = 0;
985
986         return 1;
987
988 }
989
990 __setup("acpi_os_name=", acpi_os_name_setup);
991
992 static void __init set_osi_linux(unsigned int enable)
993 {
994         if (osi_linux.enable != enable) {
995                 osi_linux.enable = enable;
996                 printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n",
997                         enable ? "Add": "Delet");
998         }
999         return;
1000 }
1001
1002 static void __init acpi_cmdline_osi_linux(unsigned int enable)
1003 {
1004         osi_linux.cmdline = 1;  /* cmdline set the default */
1005         set_osi_linux(enable);
1006
1007         return;
1008 }
1009
1010 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1011 {
1012         osi_linux.dmi = 1;      /* DMI knows that this box asks OSI(Linux) */
1013
1014         printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1015
1016         if (enable == -1)
1017                 return;
1018
1019         osi_linux.known = 1;    /* DMI knows which OSI(Linux) default needed */
1020
1021         set_osi_linux(enable);
1022
1023         return;
1024 }
1025
1026 /*
1027  * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1028  *
1029  * empty string disables _OSI
1030  * string starting with '!' disables that string
1031  * otherwise string is added to list, augmenting built-in strings
1032  */
1033 int __init acpi_osi_setup(char *str)
1034 {
1035         if (str == NULL || *str == '\0') {
1036                 printk(KERN_INFO PREFIX "_OSI method disabled\n");
1037                 acpi_gbl_create_osi_method = FALSE;
1038         } else if (!strcmp("!Linux", str)) {
1039                 acpi_cmdline_osi_linux(0);      /* !enable */
1040         } else if (*str == '!') {
1041                 if (acpi_osi_invalidate(++str) == AE_OK)
1042                         printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1043         } else if (!strcmp("Linux", str)) {
1044                 acpi_cmdline_osi_linux(1);      /* enable */
1045         } else if (*osi_additional_string == '\0') {
1046                 strncpy(osi_additional_string, str, OSI_STRING_LENGTH_MAX);
1047                 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1048         }
1049
1050         return 1;
1051 }
1052
1053 __setup("acpi_osi=", acpi_osi_setup);
1054
1055 /* enable serialization to combat AE_ALREADY_EXISTS errors */
1056 static int __init acpi_serialize_setup(char *str)
1057 {
1058         printk(KERN_INFO PREFIX "serialize enabled\n");
1059
1060         acpi_gbl_all_methods_serialized = TRUE;
1061
1062         return 1;
1063 }
1064
1065 __setup("acpi_serialize", acpi_serialize_setup);
1066
1067 /*
1068  * Wake and Run-Time GPES are expected to be separate.
1069  * We disable wake-GPEs at run-time to prevent spurious
1070  * interrupts.
1071  *
1072  * However, if a system exists that shares Wake and
1073  * Run-time events on the same GPE this flag is available
1074  * to tell Linux to keep the wake-time GPEs enabled at run-time.
1075  */
1076 static int __init acpi_wake_gpes_always_on_setup(char *str)
1077 {
1078         printk(KERN_INFO PREFIX "wake GPEs not disabled\n");
1079
1080         acpi_gbl_leave_wake_gpes_disabled = FALSE;
1081
1082         return 1;
1083 }
1084
1085 __setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
1086
1087 /* Check of resource interference between native drivers and ACPI
1088  * OperationRegions (SystemIO and System Memory only).
1089  * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1090  * in arbitrary AML code and can interfere with legacy drivers.
1091  * acpi_enforce_resources= can be set to:
1092  *
1093  *   - strict (default) (2)
1094  *     -> further driver trying to access the resources will not load
1095  *   - lax              (1)
1096  *     -> further driver trying to access the resources will load, but you
1097  *     get a system message that something might go wrong...
1098  *
1099  *   - no               (0)
1100  *     -> ACPI Operation Region resources will not be registered
1101  *
1102  */
1103 #define ENFORCE_RESOURCES_STRICT 2
1104 #define ENFORCE_RESOURCES_LAX    1
1105 #define ENFORCE_RESOURCES_NO     0
1106
1107 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1108
1109 static int __init acpi_enforce_resources_setup(char *str)
1110 {
1111         if (str == NULL || *str == '\0')
1112                 return 0;
1113
1114         if (!strcmp("strict", str))
1115                 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1116         else if (!strcmp("lax", str))
1117                 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1118         else if (!strcmp("no", str))
1119                 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1120
1121         return 1;
1122 }
1123
1124 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1125
1126 /* Check for resource conflicts between ACPI OperationRegions and native
1127  * drivers */
1128 int acpi_check_resource_conflict(const struct resource *res)
1129 {
1130         struct acpi_res_list *res_list_elem;
1131         int ioport;
1132         int clash = 0;
1133
1134         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1135                 return 0;
1136         if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1137                 return 0;
1138
1139         ioport = res->flags & IORESOURCE_IO;
1140
1141         spin_lock(&acpi_res_lock);
1142         list_for_each_entry(res_list_elem, &resource_list_head,
1143                             resource_list) {
1144                 if (ioport && (res_list_elem->resource_type
1145                                != ACPI_ADR_SPACE_SYSTEM_IO))
1146                         continue;
1147                 if (!ioport && (res_list_elem->resource_type
1148                                 != ACPI_ADR_SPACE_SYSTEM_MEMORY))
1149                         continue;
1150
1151                 if (res->end < res_list_elem->start
1152                     || res_list_elem->end < res->start)
1153                         continue;
1154                 clash = 1;
1155                 break;
1156         }
1157         spin_unlock(&acpi_res_lock);
1158
1159         if (clash) {
1160                 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1161                         printk(KERN_WARNING "ACPI: resource %s %pR"
1162                                " conflicts with ACPI region %s %pR\n",
1163                                res->name, res, res_list_elem->name,
1164                                res_list_elem);
1165                         if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1166                                 printk(KERN_NOTICE "ACPI: This conflict may"
1167                                        " cause random problems and system"
1168                                        " instability\n");
1169                         printk(KERN_INFO "ACPI: If an ACPI driver is available"
1170                                " for this device, you should use it instead of"
1171                                " the native driver\n");
1172                 }
1173                 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1174                         return -EBUSY;
1175         }
1176         return 0;
1177 }
1178 EXPORT_SYMBOL(acpi_check_resource_conflict);
1179
1180 int acpi_check_region(resource_size_t start, resource_size_t n,
1181                       const char *name)
1182 {
1183         struct resource res = {
1184                 .start = start,
1185                 .end   = start + n - 1,
1186                 .name  = name,
1187                 .flags = IORESOURCE_IO,
1188         };
1189
1190         return acpi_check_resource_conflict(&res);
1191 }
1192 EXPORT_SYMBOL(acpi_check_region);
1193
1194 int acpi_check_mem_region(resource_size_t start, resource_size_t n,
1195                       const char *name)
1196 {
1197         struct resource res = {
1198                 .start = start,
1199                 .end   = start + n - 1,
1200                 .name  = name,
1201                 .flags = IORESOURCE_MEM,
1202         };
1203
1204         return acpi_check_resource_conflict(&res);
1205
1206 }
1207 EXPORT_SYMBOL(acpi_check_mem_region);
1208
1209 /*
1210  * Acquire a spinlock.
1211  *
1212  * handle is a pointer to the spinlock_t.
1213  */
1214
1215 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1216 {
1217         acpi_cpu_flags flags;
1218         spin_lock_irqsave(lockp, flags);
1219         return flags;
1220 }
1221
1222 /*
1223  * Release a spinlock. See above.
1224  */
1225
1226 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1227 {
1228         spin_unlock_irqrestore(lockp, flags);
1229 }
1230
1231 #ifndef ACPI_USE_LOCAL_CACHE
1232
1233 /*******************************************************************************
1234  *
1235  * FUNCTION:    acpi_os_create_cache
1236  *
1237  * PARAMETERS:  name      - Ascii name for the cache
1238  *              size      - Size of each cached object
1239  *              depth     - Maximum depth of the cache (in objects) <ignored>
1240  *              cache     - Where the new cache object is returned
1241  *
1242  * RETURN:      status
1243  *
1244  * DESCRIPTION: Create a cache object
1245  *
1246  ******************************************************************************/
1247
1248 acpi_status
1249 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1250 {
1251         *cache = kmem_cache_create(name, size, 0, 0, NULL);
1252         if (*cache == NULL)
1253                 return AE_ERROR;
1254         else
1255                 return AE_OK;
1256 }
1257
1258 /*******************************************************************************
1259  *
1260  * FUNCTION:    acpi_os_purge_cache
1261  *
1262  * PARAMETERS:  Cache           - Handle to cache object
1263  *
1264  * RETURN:      Status
1265  *
1266  * DESCRIPTION: Free all objects within the requested cache.
1267  *
1268  ******************************************************************************/
1269
1270 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1271 {
1272         kmem_cache_shrink(cache);
1273         return (AE_OK);
1274 }
1275
1276 /*******************************************************************************
1277  *
1278  * FUNCTION:    acpi_os_delete_cache
1279  *
1280  * PARAMETERS:  Cache           - Handle to cache object
1281  *
1282  * RETURN:      Status
1283  *
1284  * DESCRIPTION: Free all objects within the requested cache and delete the
1285  *              cache object.
1286  *
1287  ******************************************************************************/
1288
1289 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1290 {
1291         kmem_cache_destroy(cache);
1292         return (AE_OK);
1293 }
1294
1295 /*******************************************************************************
1296  *
1297  * FUNCTION:    acpi_os_release_object
1298  *
1299  * PARAMETERS:  Cache       - Handle to cache object
1300  *              Object      - The object to be released
1301  *
1302  * RETURN:      None
1303  *
1304  * DESCRIPTION: Release an object to the specified cache.  If cache is full,
1305  *              the object is deleted.
1306  *
1307  ******************************************************************************/
1308
1309 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1310 {
1311         kmem_cache_free(cache, object);
1312         return (AE_OK);
1313 }
1314
1315 /******************************************************************************
1316  *
1317  * FUNCTION:    acpi_os_validate_interface
1318  *
1319  * PARAMETERS:  interface           - Requested interface to be validated
1320  *
1321  * RETURN:      AE_OK if interface is supported, AE_SUPPORT otherwise
1322  *
1323  * DESCRIPTION: Match an interface string to the interfaces supported by the
1324  *              host. Strings originate from an AML call to the _OSI method.
1325  *
1326  *****************************************************************************/
1327
1328 acpi_status
1329 acpi_os_validate_interface (char *interface)
1330 {
1331         if (!strncmp(osi_additional_string, interface, OSI_STRING_LENGTH_MAX))
1332                 return AE_OK;
1333         if (!strcmp("Linux", interface)) {
1334
1335                 printk(KERN_NOTICE PREFIX
1336                         "BIOS _OSI(Linux) query %s%s\n",
1337                         osi_linux.enable ? "honored" : "ignored",
1338                         osi_linux.cmdline ? " via cmdline" :
1339                         osi_linux.dmi ? " via DMI" : "");
1340
1341                 if (osi_linux.enable)
1342                         return AE_OK;
1343         }
1344         return AE_SUPPORT;
1345 }
1346
1347 static inline int acpi_res_list_add(struct acpi_res_list *res)
1348 {
1349         struct acpi_res_list *res_list_elem;
1350
1351         list_for_each_entry(res_list_elem, &resource_list_head,
1352                             resource_list) {
1353
1354                 if (res->resource_type == res_list_elem->resource_type &&
1355                     res->start == res_list_elem->start &&
1356                     res->end == res_list_elem->end) {
1357
1358                         /*
1359                          * The Region(addr,len) already exist in the list,
1360                          * just increase the count
1361                          */
1362
1363                         res_list_elem->count++;
1364                         return 0;
1365                 }
1366         }
1367
1368         res->count = 1;
1369         list_add(&res->resource_list, &resource_list_head);
1370         return 1;
1371 }
1372
1373 static inline void acpi_res_list_del(struct acpi_res_list *res)
1374 {
1375         struct acpi_res_list *res_list_elem;
1376
1377         list_for_each_entry(res_list_elem, &resource_list_head,
1378                             resource_list) {
1379
1380                 if (res->resource_type == res_list_elem->resource_type &&
1381                     res->start == res_list_elem->start &&
1382                     res->end == res_list_elem->end) {
1383
1384                         /*
1385                          * If the res count is decreased to 0,
1386                          * remove and free it
1387                          */
1388
1389                         if (--res_list_elem->count == 0) {
1390                                 list_del(&res_list_elem->resource_list);
1391                                 kfree(res_list_elem);
1392                         }
1393                         return;
1394                 }
1395         }
1396 }
1397
1398 acpi_status
1399 acpi_os_invalidate_address(
1400     u8                   space_id,
1401     acpi_physical_address   address,
1402     acpi_size               length)
1403 {
1404         struct acpi_res_list res;
1405
1406         switch (space_id) {
1407         case ACPI_ADR_SPACE_SYSTEM_IO:
1408         case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1409                 /* Only interference checks against SystemIO and SytemMemory
1410                    are needed */
1411                 res.start = address;
1412                 res.end = address + length - 1;
1413                 res.resource_type = space_id;
1414                 spin_lock(&acpi_res_lock);
1415                 acpi_res_list_del(&res);
1416                 spin_unlock(&acpi_res_lock);
1417                 break;
1418         case ACPI_ADR_SPACE_PCI_CONFIG:
1419         case ACPI_ADR_SPACE_EC:
1420         case ACPI_ADR_SPACE_SMBUS:
1421         case ACPI_ADR_SPACE_CMOS:
1422         case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1423         case ACPI_ADR_SPACE_DATA_TABLE:
1424         case ACPI_ADR_SPACE_FIXED_HARDWARE:
1425                 break;
1426         }
1427         return AE_OK;
1428 }
1429
1430 /******************************************************************************
1431  *
1432  * FUNCTION:    acpi_os_validate_address
1433  *
1434  * PARAMETERS:  space_id             - ACPI space ID
1435  *              address             - Physical address
1436  *              length              - Address length
1437  *
1438  * RETURN:      AE_OK if address/length is valid for the space_id. Otherwise,
1439  *              should return AE_AML_ILLEGAL_ADDRESS.
1440  *
1441  * DESCRIPTION: Validate a system address via the host OS. Used to validate
1442  *              the addresses accessed by AML operation regions.
1443  *
1444  *****************************************************************************/
1445
1446 acpi_status
1447 acpi_os_validate_address (
1448     u8                   space_id,
1449     acpi_physical_address   address,
1450     acpi_size               length,
1451     char *name)
1452 {
1453         struct acpi_res_list *res;
1454         int added;
1455         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1456                 return AE_OK;
1457
1458         switch (space_id) {
1459         case ACPI_ADR_SPACE_SYSTEM_IO:
1460         case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1461                 /* Only interference checks against SystemIO and SytemMemory
1462                    are needed */
1463                 res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
1464                 if (!res)
1465                         return AE_OK;
1466                 /* ACPI names are fixed to 4 bytes, still better use strlcpy */
1467                 strlcpy(res->name, name, 5);
1468                 res->start = address;
1469                 res->end = address + length - 1;
1470                 res->resource_type = space_id;
1471                 spin_lock(&acpi_res_lock);
1472                 added = acpi_res_list_add(res);
1473                 spin_unlock(&acpi_res_lock);
1474                 pr_debug("%s %s resource: start: 0x%llx, end: 0x%llx, "
1475                          "name: %s\n", added ? "Added" : "Already exist",
1476                          (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
1477                          ? "SystemIO" : "System Memory",
1478                          (unsigned long long)res->start,
1479                          (unsigned long long)res->end,
1480                          res->name);
1481                 if (!added)
1482                         kfree(res);
1483                 break;
1484         case ACPI_ADR_SPACE_PCI_CONFIG:
1485         case ACPI_ADR_SPACE_EC:
1486         case ACPI_ADR_SPACE_SMBUS:
1487         case ACPI_ADR_SPACE_CMOS:
1488         case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1489         case ACPI_ADR_SPACE_DATA_TABLE:
1490         case ACPI_ADR_SPACE_FIXED_HARDWARE:
1491                 break;
1492         }
1493         return AE_OK;
1494 }
1495
1496 #endif