]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/acpi/osl.c
c5b4f1ed9b71f4fb821232e547b48392f35e39c8
[mv-sheeva.git] / drivers / acpi / osl.c
1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *  Copyright (c) 2008 Intel Corporation
8  *   Author: Matthew Wilcox <willy@linux.intel.com>
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or
15  *  (at your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; if not, write to the Free Software
24  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  *
28  */
29
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/mm.h>
34 #include <linux/pci.h>
35 #include <linux/interrupt.h>
36 #include <linux/kmod.h>
37 #include <linux/delay.h>
38 #include <linux/workqueue.h>
39 #include <linux/nmi.h>
40 #include <linux/acpi.h>
41 #include <linux/efi.h>
42 #include <linux/ioport.h>
43 #include <linux/list.h>
44 #include <linux/jiffies.h>
45 #include <linux/semaphore.h>
46
47 #include <asm/io.h>
48 #include <asm/uaccess.h>
49
50 #include <acpi/acpi.h>
51 #include <acpi/acpi_bus.h>
52 #include <acpi/processor.h>
53
54 #define _COMPONENT              ACPI_OS_SERVICES
55 ACPI_MODULE_NAME("osl");
56 #define PREFIX          "ACPI: "
57 struct acpi_os_dpc {
58         acpi_osd_exec_callback function;
59         void *context;
60         struct work_struct work;
61 };
62
63 #ifdef CONFIG_ACPI_CUSTOM_DSDT
64 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
65 #endif
66
67 #ifdef ENABLE_DEBUGGER
68 #include <linux/kdb.h>
69
70 /* stuff for debugger support */
71 int acpi_in_debugger;
72 EXPORT_SYMBOL(acpi_in_debugger);
73
74 extern char line_buf[80];
75 #endif                          /*ENABLE_DEBUGGER */
76
77 static unsigned int acpi_irq_irq;
78 static acpi_osd_handler acpi_irq_handler;
79 static void *acpi_irq_context;
80 static struct workqueue_struct *kacpid_wq;
81 static struct workqueue_struct *kacpi_notify_wq;
82 static struct workqueue_struct *kacpi_hotplug_wq;
83
84 struct acpi_res_list {
85         resource_size_t start;
86         resource_size_t end;
87         acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
88         char name[5];   /* only can have a length of 4 chars, make use of this
89                            one instead of res->name, no need to kalloc then */
90         struct list_head resource_list;
91         int count;
92 };
93
94 static LIST_HEAD(resource_list_head);
95 static DEFINE_SPINLOCK(acpi_res_lock);
96
97 #define OSI_STRING_LENGTH_MAX 64        /* arbitrary */
98 static char osi_additional_string[OSI_STRING_LENGTH_MAX];
99
100 /*
101  * The story of _OSI(Linux)
102  *
103  * From pre-history through Linux-2.6.22,
104  * Linux responded TRUE upon a BIOS OSI(Linux) query.
105  *
106  * Unfortunately, reference BIOS writers got wind of this
107  * and put OSI(Linux) in their example code, quickly exposing
108  * this string as ill-conceived and opening the door to
109  * an un-bounded number of BIOS incompatibilities.
110  *
111  * For example, OSI(Linux) was used on resume to re-POST a
112  * video card on one system, because Linux at that time
113  * could not do a speedy restore in its native driver.
114  * But then upon gaining quick native restore capability,
115  * Linux has no way to tell the BIOS to skip the time-consuming
116  * POST -- putting Linux at a permanent performance disadvantage.
117  * On another system, the BIOS writer used OSI(Linux)
118  * to infer native OS support for IPMI!  On other systems,
119  * OSI(Linux) simply got in the way of Linux claiming to
120  * be compatible with other operating systems, exposing
121  * BIOS issues such as skipped device initialization.
122  *
123  * So "Linux" turned out to be a really poor chose of
124  * OSI string, and from Linux-2.6.23 onward we respond FALSE.
125  *
126  * BIOS writers should NOT query _OSI(Linux) on future systems.
127  * Linux will complain on the console when it sees it, and return FALSE.
128  * To get Linux to return TRUE for your system  will require
129  * a kernel source update to add a DMI entry,
130  * or boot with "acpi_osi=Linux"
131  */
132
133 static struct osi_linux {
134         unsigned int    enable:1;
135         unsigned int    dmi:1;
136         unsigned int    cmdline:1;
137         unsigned int    known:1;
138 } osi_linux = { 0, 0, 0, 0};
139
140 static void __init acpi_request_region (struct acpi_generic_address *addr,
141         unsigned int length, char *desc)
142 {
143         struct resource *res;
144
145         if (!addr->address || !length)
146                 return;
147
148         if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
149                 res = request_region(addr->address, length, desc);
150         else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
151                 res = request_mem_region(addr->address, length, desc);
152 }
153
154 static int __init acpi_reserve_resources(void)
155 {
156         acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
157                 "ACPI PM1a_EVT_BLK");
158
159         acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
160                 "ACPI PM1b_EVT_BLK");
161
162         acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
163                 "ACPI PM1a_CNT_BLK");
164
165         acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
166                 "ACPI PM1b_CNT_BLK");
167
168         if (acpi_gbl_FADT.pm_timer_length == 4)
169                 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
170
171         acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
172                 "ACPI PM2_CNT_BLK");
173
174         /* Length of GPE blocks must be a non-negative multiple of 2 */
175
176         if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
177                 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
178                                acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
179
180         if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
181                 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
182                                acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
183
184         return 0;
185 }
186 device_initcall(acpi_reserve_resources);
187
188 acpi_status __init acpi_os_initialize(void)
189 {
190         return AE_OK;
191 }
192
193 static void bind_to_cpu0(struct work_struct *work)
194 {
195         set_cpus_allowed(current, cpumask_of_cpu(0));
196         kfree(work);
197 }
198
199 static void bind_workqueue(struct workqueue_struct *wq)
200 {
201         struct work_struct *work;
202
203         work = kzalloc(sizeof(struct work_struct), GFP_KERNEL);
204         INIT_WORK(work, bind_to_cpu0);
205         queue_work(wq, work);
206 }
207
208 acpi_status acpi_os_initialize1(void)
209 {
210         /*
211          * On some machines, a software-initiated SMI causes corruption unless
212          * the SMI runs on CPU 0.  An SMI can be initiated by any AML, but
213          * typically it's done in GPE-related methods that are run via
214          * workqueues, so we can avoid the known corruption cases by binding
215          * the workqueues to CPU 0.
216          */
217         kacpid_wq = create_singlethread_workqueue("kacpid");
218         bind_workqueue(kacpid_wq);
219         kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
220         bind_workqueue(kacpi_notify_wq);
221         kacpi_hotplug_wq = create_singlethread_workqueue("kacpi_hotplug");
222         bind_workqueue(kacpi_hotplug_wq);
223         BUG_ON(!kacpid_wq);
224         BUG_ON(!kacpi_notify_wq);
225         BUG_ON(!kacpi_hotplug_wq);
226         return AE_OK;
227 }
228
229 acpi_status acpi_os_terminate(void)
230 {
231         if (acpi_irq_handler) {
232                 acpi_os_remove_interrupt_handler(acpi_irq_irq,
233                                                  acpi_irq_handler);
234         }
235
236         destroy_workqueue(kacpid_wq);
237         destroy_workqueue(kacpi_notify_wq);
238         destroy_workqueue(kacpi_hotplug_wq);
239
240         return AE_OK;
241 }
242
243 void acpi_os_printf(const char *fmt, ...)
244 {
245         va_list args;
246         va_start(args, fmt);
247         acpi_os_vprintf(fmt, args);
248         va_end(args);
249 }
250
251 void acpi_os_vprintf(const char *fmt, va_list args)
252 {
253         static char buffer[512];
254
255         vsprintf(buffer, fmt, args);
256
257 #ifdef ENABLE_DEBUGGER
258         if (acpi_in_debugger) {
259                 kdb_printf("%s", buffer);
260         } else {
261                 printk(KERN_CONT "%s", buffer);
262         }
263 #else
264         printk(KERN_CONT "%s", buffer);
265 #endif
266 }
267
268 acpi_physical_address __init acpi_os_get_root_pointer(void)
269 {
270         if (efi_enabled) {
271                 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
272                         return efi.acpi20;
273                 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
274                         return efi.acpi;
275                 else {
276                         printk(KERN_ERR PREFIX
277                                "System description tables not found\n");
278                         return 0;
279                 }
280         } else {
281                 acpi_physical_address pa = 0;
282
283                 acpi_find_root_pointer(&pa);
284                 return pa;
285         }
286 }
287
288 void __iomem *__init_refok
289 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
290 {
291         if (phys > ULONG_MAX) {
292                 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
293                 return NULL;
294         }
295         if (acpi_gbl_permanent_mmap)
296                 /*
297                 * ioremap checks to ensure this is in reserved space
298                 */
299                 return ioremap((unsigned long)phys, size);
300         else
301                 return __acpi_map_table((unsigned long)phys, size);
302 }
303 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
304
305 void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
306 {
307         if (acpi_gbl_permanent_mmap)
308                 iounmap(virt);
309         else
310                 __acpi_unmap_table(virt, size);
311 }
312 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
313
314 void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
315 {
316         if (!acpi_gbl_permanent_mmap)
317                 __acpi_unmap_table(virt, size);
318 }
319
320 #ifdef ACPI_FUTURE_USAGE
321 acpi_status
322 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
323 {
324         if (!phys || !virt)
325                 return AE_BAD_PARAMETER;
326
327         *phys = virt_to_phys(virt);
328
329         return AE_OK;
330 }
331 #endif
332
333 #define ACPI_MAX_OVERRIDE_LEN 100
334
335 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
336
337 acpi_status
338 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
339                             acpi_string * new_val)
340 {
341         if (!init_val || !new_val)
342                 return AE_BAD_PARAMETER;
343
344         *new_val = NULL;
345         if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
346                 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
347                        acpi_os_name);
348                 *new_val = acpi_os_name;
349         }
350
351         return AE_OK;
352 }
353
354 acpi_status
355 acpi_os_table_override(struct acpi_table_header * existing_table,
356                        struct acpi_table_header ** new_table)
357 {
358         if (!existing_table || !new_table)
359                 return AE_BAD_PARAMETER;
360
361         *new_table = NULL;
362
363 #ifdef CONFIG_ACPI_CUSTOM_DSDT
364         if (strncmp(existing_table->signature, "DSDT", 4) == 0)
365                 *new_table = (struct acpi_table_header *)AmlCode;
366 #endif
367         if (*new_table != NULL) {
368                 printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], "
369                            "this is unsafe: tainting kernel\n",
370                        existing_table->signature,
371                        existing_table->oem_table_id);
372                 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
373         }
374         return AE_OK;
375 }
376
377 static irqreturn_t acpi_irq(int irq, void *dev_id)
378 {
379         u32 handled;
380
381         handled = (*acpi_irq_handler) (acpi_irq_context);
382
383         if (handled) {
384                 acpi_irq_handled++;
385                 return IRQ_HANDLED;
386         } else {
387                 acpi_irq_not_handled++;
388                 return IRQ_NONE;
389         }
390 }
391
392 acpi_status
393 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
394                                   void *context)
395 {
396         unsigned int irq;
397
398         acpi_irq_stats_init();
399
400         /*
401          * Ignore the GSI from the core, and use the value in our copy of the
402          * FADT. It may not be the same if an interrupt source override exists
403          * for the SCI.
404          */
405         gsi = acpi_gbl_FADT.sci_interrupt;
406         if (acpi_gsi_to_irq(gsi, &irq) < 0) {
407                 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
408                        gsi);
409                 return AE_OK;
410         }
411
412         acpi_irq_handler = handler;
413         acpi_irq_context = context;
414         if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
415                 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
416                 return AE_NOT_ACQUIRED;
417         }
418         acpi_irq_irq = irq;
419
420         return AE_OK;
421 }
422
423 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
424 {
425         if (irq) {
426                 free_irq(irq, acpi_irq);
427                 acpi_irq_handler = NULL;
428                 acpi_irq_irq = 0;
429         }
430
431         return AE_OK;
432 }
433
434 /*
435  * Running in interpreter thread context, safe to sleep
436  */
437
438 void acpi_os_sleep(acpi_integer ms)
439 {
440         schedule_timeout_interruptible(msecs_to_jiffies(ms));
441 }
442
443 void acpi_os_stall(u32 us)
444 {
445         while (us) {
446                 u32 delay = 1000;
447
448                 if (delay > us)
449                         delay = us;
450                 udelay(delay);
451                 touch_nmi_watchdog();
452                 us -= delay;
453         }
454 }
455
456 /*
457  * Support ACPI 3.0 AML Timer operand
458  * Returns 64-bit free-running, monotonically increasing timer
459  * with 100ns granularity
460  */
461 u64 acpi_os_get_timer(void)
462 {
463         static u64 t;
464
465 #ifdef  CONFIG_HPET
466         /* TBD: use HPET if available */
467 #endif
468
469 #ifdef  CONFIG_X86_PM_TIMER
470         /* TBD: default to PM timer if HPET was not available */
471 #endif
472         if (!t)
473                 printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n");
474
475         return ++t;
476 }
477
478 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
479 {
480         u32 dummy;
481
482         if (!value)
483                 value = &dummy;
484
485         *value = 0;
486         if (width <= 8) {
487                 *(u8 *) value = inb(port);
488         } else if (width <= 16) {
489                 *(u16 *) value = inw(port);
490         } else if (width <= 32) {
491                 *(u32 *) value = inl(port);
492         } else {
493                 BUG();
494         }
495
496         return AE_OK;
497 }
498
499 EXPORT_SYMBOL(acpi_os_read_port);
500
501 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
502 {
503         if (width <= 8) {
504                 outb(value, port);
505         } else if (width <= 16) {
506                 outw(value, port);
507         } else if (width <= 32) {
508                 outl(value, port);
509         } else {
510                 BUG();
511         }
512
513         return AE_OK;
514 }
515
516 EXPORT_SYMBOL(acpi_os_write_port);
517
518 acpi_status
519 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
520 {
521         u32 dummy;
522         void __iomem *virt_addr;
523
524         virt_addr = ioremap(phys_addr, width);
525         if (!value)
526                 value = &dummy;
527
528         switch (width) {
529         case 8:
530                 *(u8 *) value = readb(virt_addr);
531                 break;
532         case 16:
533                 *(u16 *) value = readw(virt_addr);
534                 break;
535         case 32:
536                 *(u32 *) value = readl(virt_addr);
537                 break;
538         default:
539                 BUG();
540         }
541
542         iounmap(virt_addr);
543
544         return AE_OK;
545 }
546
547 acpi_status
548 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
549 {
550         void __iomem *virt_addr;
551
552         virt_addr = ioremap(phys_addr, width);
553
554         switch (width) {
555         case 8:
556                 writeb(value, virt_addr);
557                 break;
558         case 16:
559                 writew(value, virt_addr);
560                 break;
561         case 32:
562                 writel(value, virt_addr);
563                 break;
564         default:
565                 BUG();
566         }
567
568         iounmap(virt_addr);
569
570         return AE_OK;
571 }
572
573 acpi_status
574 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
575                                u32 *value, u32 width)
576 {
577         int result, size;
578
579         if (!value)
580                 return AE_BAD_PARAMETER;
581
582         switch (width) {
583         case 8:
584                 size = 1;
585                 break;
586         case 16:
587                 size = 2;
588                 break;
589         case 32:
590                 size = 4;
591                 break;
592         default:
593                 return AE_ERROR;
594         }
595
596         result = raw_pci_read(pci_id->segment, pci_id->bus,
597                                 PCI_DEVFN(pci_id->device, pci_id->function),
598                                 reg, size, value);
599
600         return (result ? AE_ERROR : AE_OK);
601 }
602
603 acpi_status
604 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
605                                 acpi_integer value, u32 width)
606 {
607         int result, size;
608
609         switch (width) {
610         case 8:
611                 size = 1;
612                 break;
613         case 16:
614                 size = 2;
615                 break;
616         case 32:
617                 size = 4;
618                 break;
619         default:
620                 return AE_ERROR;
621         }
622
623         result = raw_pci_write(pci_id->segment, pci_id->bus,
624                                 PCI_DEVFN(pci_id->device, pci_id->function),
625                                 reg, size, value);
626
627         return (result ? AE_ERROR : AE_OK);
628 }
629
630 /* TODO: Change code to take advantage of driver model more */
631 static void acpi_os_derive_pci_id_2(acpi_handle rhandle,        /* upper bound  */
632                                     acpi_handle chandle,        /* current node */
633                                     struct acpi_pci_id **id,
634                                     int *is_bridge, u8 * bus_number)
635 {
636         acpi_handle handle;
637         struct acpi_pci_id *pci_id = *id;
638         acpi_status status;
639         unsigned long long temp;
640         acpi_object_type type;
641
642         acpi_get_parent(chandle, &handle);
643         if (handle != rhandle) {
644                 acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge,
645                                         bus_number);
646
647                 status = acpi_get_type(handle, &type);
648                 if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE))
649                         return;
650
651                 status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
652                                           &temp);
653                 if (ACPI_SUCCESS(status)) {
654                         u32 val;
655                         pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp));
656                         pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp));
657
658                         if (*is_bridge)
659                                 pci_id->bus = *bus_number;
660
661                         /* any nicer way to get bus number of bridge ? */
662                         status =
663                             acpi_os_read_pci_configuration(pci_id, 0x0e, &val,
664                                                            8);
665                         if (ACPI_SUCCESS(status)
666                             && ((val & 0x7f) == 1 || (val & 0x7f) == 2)) {
667                                 status =
668                                     acpi_os_read_pci_configuration(pci_id, 0x18,
669                                                                    &val, 8);
670                                 if (!ACPI_SUCCESS(status)) {
671                                         /* Certainly broken...  FIX ME */
672                                         return;
673                                 }
674                                 *is_bridge = 1;
675                                 pci_id->bus = val;
676                                 status =
677                                     acpi_os_read_pci_configuration(pci_id, 0x19,
678                                                                    &val, 8);
679                                 if (ACPI_SUCCESS(status)) {
680                                         *bus_number = val;
681                                 }
682                         } else
683                                 *is_bridge = 0;
684                 }
685         }
686 }
687
688 void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound  */
689                            acpi_handle chandle, /* current node */
690                            struct acpi_pci_id **id)
691 {
692         int is_bridge = 1;
693         u8 bus_number = (*id)->bus;
694
695         acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
696 }
697
698 static void acpi_os_execute_deferred(struct work_struct *work)
699 {
700         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
701         if (!dpc) {
702                 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
703                 return;
704         }
705
706         dpc->function(dpc->context);
707         kfree(dpc);
708
709         return;
710 }
711
712 static void acpi_os_execute_hp_deferred(struct work_struct *work)
713 {
714         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
715         if (!dpc) {
716                 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
717                 return;
718         }
719
720         acpi_os_wait_events_complete(NULL);
721
722         dpc->function(dpc->context);
723         kfree(dpc);
724
725         return;
726 }
727
728 /*******************************************************************************
729  *
730  * FUNCTION:    acpi_os_execute
731  *
732  * PARAMETERS:  Type               - Type of the callback
733  *              Function           - Function to be executed
734  *              Context            - Function parameters
735  *
736  * RETURN:      Status
737  *
738  * DESCRIPTION: Depending on type, either queues function for deferred execution or
739  *              immediately executes function on a separate thread.
740  *
741  ******************************************************************************/
742
743 static acpi_status __acpi_os_execute(acpi_execute_type type,
744         acpi_osd_exec_callback function, void *context, int hp)
745 {
746         acpi_status status = AE_OK;
747         struct acpi_os_dpc *dpc;
748         struct workqueue_struct *queue;
749         work_func_t func;
750         int ret;
751         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
752                           "Scheduling function [%p(%p)] for deferred execution.\n",
753                           function, context));
754
755         if (!function)
756                 return AE_BAD_PARAMETER;
757
758         /*
759          * Allocate/initialize DPC structure.  Note that this memory will be
760          * freed by the callee.  The kernel handles the work_struct list  in a
761          * way that allows us to also free its memory inside the callee.
762          * Because we may want to schedule several tasks with different
763          * parameters we can't use the approach some kernel code uses of
764          * having a static work_struct.
765          */
766
767         dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
768         if (!dpc)
769                 return AE_NO_MEMORY;
770
771         dpc->function = function;
772         dpc->context = context;
773
774         /*
775          * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq
776          * because the hotplug code may call driver .remove() functions,
777          * which invoke flush_scheduled_work/acpi_os_wait_events_complete
778          * to flush these workqueues.
779          */
780         queue = hp ? kacpi_hotplug_wq :
781                 (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq);
782         func = hp ? acpi_os_execute_hp_deferred : acpi_os_execute_deferred;
783         INIT_WORK(&dpc->work, func);
784         ret = queue_work(queue, &dpc->work);
785
786         if (!ret) {
787                 printk(KERN_ERR PREFIX
788                           "Call to queue_work() failed.\n");
789                 status = AE_ERROR;
790                 kfree(dpc);
791         }
792         return status;
793 }
794
795 acpi_status acpi_os_execute(acpi_execute_type type,
796                             acpi_osd_exec_callback function, void *context)
797 {
798         return __acpi_os_execute(type, function, context, 0);
799 }
800 EXPORT_SYMBOL(acpi_os_execute);
801
802 acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
803         void *context)
804 {
805         return __acpi_os_execute(0, function, context, 1);
806 }
807
808 void acpi_os_wait_events_complete(void *context)
809 {
810         flush_workqueue(kacpid_wq);
811         flush_workqueue(kacpi_notify_wq);
812 }
813
814 EXPORT_SYMBOL(acpi_os_wait_events_complete);
815
816 /*
817  * Allocate the memory for a spinlock and initialize it.
818  */
819 acpi_status acpi_os_create_lock(acpi_spinlock * handle)
820 {
821         spin_lock_init(*handle);
822
823         return AE_OK;
824 }
825
826 /*
827  * Deallocate the memory for a spinlock.
828  */
829 void acpi_os_delete_lock(acpi_spinlock handle)
830 {
831         return;
832 }
833
834 acpi_status
835 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
836 {
837         struct semaphore *sem = NULL;
838
839         sem = acpi_os_allocate(sizeof(struct semaphore));
840         if (!sem)
841                 return AE_NO_MEMORY;
842         memset(sem, 0, sizeof(struct semaphore));
843
844         sema_init(sem, initial_units);
845
846         *handle = (acpi_handle *) sem;
847
848         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
849                           *handle, initial_units));
850
851         return AE_OK;
852 }
853
854 /*
855  * TODO: A better way to delete semaphores?  Linux doesn't have a
856  * 'delete_semaphore()' function -- may result in an invalid
857  * pointer dereference for non-synchronized consumers.  Should
858  * we at least check for blocked threads and signal/cancel them?
859  */
860
861 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
862 {
863         struct semaphore *sem = (struct semaphore *)handle;
864
865         if (!sem)
866                 return AE_BAD_PARAMETER;
867
868         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
869
870         BUG_ON(!list_empty(&sem->wait_list));
871         kfree(sem);
872         sem = NULL;
873
874         return AE_OK;
875 }
876
877 /*
878  * TODO: Support for units > 1?
879  */
880 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
881 {
882         acpi_status status = AE_OK;
883         struct semaphore *sem = (struct semaphore *)handle;
884         long jiffies;
885         int ret = 0;
886
887         if (!sem || (units < 1))
888                 return AE_BAD_PARAMETER;
889
890         if (units > 1)
891                 return AE_SUPPORT;
892
893         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
894                           handle, units, timeout));
895
896         if (timeout == ACPI_WAIT_FOREVER)
897                 jiffies = MAX_SCHEDULE_TIMEOUT;
898         else
899                 jiffies = msecs_to_jiffies(timeout);
900         
901         ret = down_timeout(sem, jiffies);
902         if (ret)
903                 status = AE_TIME;
904
905         if (ACPI_FAILURE(status)) {
906                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
907                                   "Failed to acquire semaphore[%p|%d|%d], %s",
908                                   handle, units, timeout,
909                                   acpi_format_exception(status)));
910         } else {
911                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
912                                   "Acquired semaphore[%p|%d|%d]", handle,
913                                   units, timeout));
914         }
915
916         return status;
917 }
918
919 /*
920  * TODO: Support for units > 1?
921  */
922 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
923 {
924         struct semaphore *sem = (struct semaphore *)handle;
925
926         if (!sem || (units < 1))
927                 return AE_BAD_PARAMETER;
928
929         if (units > 1)
930                 return AE_SUPPORT;
931
932         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
933                           units));
934
935         up(sem);
936
937         return AE_OK;
938 }
939
940 #ifdef ACPI_FUTURE_USAGE
941 u32 acpi_os_get_line(char *buffer)
942 {
943
944 #ifdef ENABLE_DEBUGGER
945         if (acpi_in_debugger) {
946                 u32 chars;
947
948                 kdb_read(buffer, sizeof(line_buf));
949
950                 /* remove the CR kdb includes */
951                 chars = strlen(buffer) - 1;
952                 buffer[chars] = '\0';
953         }
954 #endif
955
956         return 0;
957 }
958 #endif                          /*  ACPI_FUTURE_USAGE  */
959
960 acpi_status acpi_os_signal(u32 function, void *info)
961 {
962         switch (function) {
963         case ACPI_SIGNAL_FATAL:
964                 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
965                 break;
966         case ACPI_SIGNAL_BREAKPOINT:
967                 /*
968                  * AML Breakpoint
969                  * ACPI spec. says to treat it as a NOP unless
970                  * you are debugging.  So if/when we integrate
971                  * AML debugger into the kernel debugger its
972                  * hook will go here.  But until then it is
973                  * not useful to print anything on breakpoints.
974                  */
975                 break;
976         default:
977                 break;
978         }
979
980         return AE_OK;
981 }
982
983 static int __init acpi_os_name_setup(char *str)
984 {
985         char *p = acpi_os_name;
986         int count = ACPI_MAX_OVERRIDE_LEN - 1;
987
988         if (!str || !*str)
989                 return 0;
990
991         for (; count-- && str && *str; str++) {
992                 if (isalnum(*str) || *str == ' ' || *str == ':')
993                         *p++ = *str;
994                 else if (*str == '\'' || *str == '"')
995                         continue;
996                 else
997                         break;
998         }
999         *p = 0;
1000
1001         return 1;
1002
1003 }
1004
1005 __setup("acpi_os_name=", acpi_os_name_setup);
1006
1007 static void __init set_osi_linux(unsigned int enable)
1008 {
1009         if (osi_linux.enable != enable) {
1010                 osi_linux.enable = enable;
1011                 printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n",
1012                         enable ? "Add": "Delet");
1013         }
1014         return;
1015 }
1016
1017 static void __init acpi_cmdline_osi_linux(unsigned int enable)
1018 {
1019         osi_linux.cmdline = 1;  /* cmdline set the default */
1020         set_osi_linux(enable);
1021
1022         return;
1023 }
1024
1025 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1026 {
1027         osi_linux.dmi = 1;      /* DMI knows that this box asks OSI(Linux) */
1028
1029         printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1030
1031         if (enable == -1)
1032                 return;
1033
1034         osi_linux.known = 1;    /* DMI knows which OSI(Linux) default needed */
1035
1036         set_osi_linux(enable);
1037
1038         return;
1039 }
1040
1041 /*
1042  * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1043  *
1044  * empty string disables _OSI
1045  * string starting with '!' disables that string
1046  * otherwise string is added to list, augmenting built-in strings
1047  */
1048 int __init acpi_osi_setup(char *str)
1049 {
1050         if (str == NULL || *str == '\0') {
1051                 printk(KERN_INFO PREFIX "_OSI method disabled\n");
1052                 acpi_gbl_create_osi_method = FALSE;
1053         } else if (!strcmp("!Linux", str)) {
1054                 acpi_cmdline_osi_linux(0);      /* !enable */
1055         } else if (*str == '!') {
1056                 if (acpi_osi_invalidate(++str) == AE_OK)
1057                         printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1058         } else if (!strcmp("Linux", str)) {
1059                 acpi_cmdline_osi_linux(1);      /* enable */
1060         } else if (*osi_additional_string == '\0') {
1061                 strncpy(osi_additional_string, str, OSI_STRING_LENGTH_MAX);
1062                 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1063         }
1064
1065         return 1;
1066 }
1067
1068 __setup("acpi_osi=", acpi_osi_setup);
1069
1070 /* enable serialization to combat AE_ALREADY_EXISTS errors */
1071 static int __init acpi_serialize_setup(char *str)
1072 {
1073         printk(KERN_INFO PREFIX "serialize enabled\n");
1074
1075         acpi_gbl_all_methods_serialized = TRUE;
1076
1077         return 1;
1078 }
1079
1080 __setup("acpi_serialize", acpi_serialize_setup);
1081
1082 /*
1083  * Wake and Run-Time GPES are expected to be separate.
1084  * We disable wake-GPEs at run-time to prevent spurious
1085  * interrupts.
1086  *
1087  * However, if a system exists that shares Wake and
1088  * Run-time events on the same GPE this flag is available
1089  * to tell Linux to keep the wake-time GPEs enabled at run-time.
1090  */
1091 static int __init acpi_wake_gpes_always_on_setup(char *str)
1092 {
1093         printk(KERN_INFO PREFIX "wake GPEs not disabled\n");
1094
1095         acpi_gbl_leave_wake_gpes_disabled = FALSE;
1096
1097         return 1;
1098 }
1099
1100 __setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
1101
1102 /* Check of resource interference between native drivers and ACPI
1103  * OperationRegions (SystemIO and System Memory only).
1104  * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1105  * in arbitrary AML code and can interfere with legacy drivers.
1106  * acpi_enforce_resources= can be set to:
1107  *
1108  *   - strict (default) (2)
1109  *     -> further driver trying to access the resources will not load
1110  *   - lax              (1)
1111  *     -> further driver trying to access the resources will load, but you
1112  *     get a system message that something might go wrong...
1113  *
1114  *   - no               (0)
1115  *     -> ACPI Operation Region resources will not be registered
1116  *
1117  */
1118 #define ENFORCE_RESOURCES_STRICT 2
1119 #define ENFORCE_RESOURCES_LAX    1
1120 #define ENFORCE_RESOURCES_NO     0
1121
1122 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1123
1124 static int __init acpi_enforce_resources_setup(char *str)
1125 {
1126         if (str == NULL || *str == '\0')
1127                 return 0;
1128
1129         if (!strcmp("strict", str))
1130                 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1131         else if (!strcmp("lax", str))
1132                 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1133         else if (!strcmp("no", str))
1134                 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1135
1136         return 1;
1137 }
1138
1139 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1140
1141 /* Check for resource conflicts between ACPI OperationRegions and native
1142  * drivers */
1143 int acpi_check_resource_conflict(struct resource *res)
1144 {
1145         struct acpi_res_list *res_list_elem;
1146         int ioport;
1147         int clash = 0;
1148
1149         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1150                 return 0;
1151         if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1152                 return 0;
1153
1154         ioport = res->flags & IORESOURCE_IO;
1155
1156         spin_lock(&acpi_res_lock);
1157         list_for_each_entry(res_list_elem, &resource_list_head,
1158                             resource_list) {
1159                 if (ioport && (res_list_elem->resource_type
1160                                != ACPI_ADR_SPACE_SYSTEM_IO))
1161                         continue;
1162                 if (!ioport && (res_list_elem->resource_type
1163                                 != ACPI_ADR_SPACE_SYSTEM_MEMORY))
1164                         continue;
1165
1166                 if (res->end < res_list_elem->start
1167                     || res_list_elem->end < res->start)
1168                         continue;
1169                 clash = 1;
1170                 break;
1171         }
1172         spin_unlock(&acpi_res_lock);
1173
1174         if (clash) {
1175                 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1176                         printk("%sACPI: %s resource %s [0x%llx-0x%llx]"
1177                                " conflicts with ACPI region %s"
1178                                " [0x%llx-0x%llx]\n",
1179                                acpi_enforce_resources == ENFORCE_RESOURCES_LAX
1180                                ? KERN_WARNING : KERN_ERR,
1181                                ioport ? "I/O" : "Memory", res->name,
1182                                (long long) res->start, (long long) res->end,
1183                                res_list_elem->name,
1184                                (long long) res_list_elem->start,
1185                                (long long) res_list_elem->end);
1186                         printk(KERN_INFO "ACPI: Device needs an ACPI driver\n");
1187                 }
1188                 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1189                         return -EBUSY;
1190         }
1191         return 0;
1192 }
1193 EXPORT_SYMBOL(acpi_check_resource_conflict);
1194
1195 int acpi_check_region(resource_size_t start, resource_size_t n,
1196                       const char *name)
1197 {
1198         struct resource res = {
1199                 .start = start,
1200                 .end   = start + n - 1,
1201                 .name  = name,
1202                 .flags = IORESOURCE_IO,
1203         };
1204
1205         return acpi_check_resource_conflict(&res);
1206 }
1207 EXPORT_SYMBOL(acpi_check_region);
1208
1209 int acpi_check_mem_region(resource_size_t start, resource_size_t n,
1210                       const char *name)
1211 {
1212         struct resource res = {
1213                 .start = start,
1214                 .end   = start + n - 1,
1215                 .name  = name,
1216                 .flags = IORESOURCE_MEM,
1217         };
1218
1219         return acpi_check_resource_conflict(&res);
1220
1221 }
1222 EXPORT_SYMBOL(acpi_check_mem_region);
1223
1224 /*
1225  * Acquire a spinlock.
1226  *
1227  * handle is a pointer to the spinlock_t.
1228  */
1229
1230 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1231 {
1232         acpi_cpu_flags flags;
1233         spin_lock_irqsave(lockp, flags);
1234         return flags;
1235 }
1236
1237 /*
1238  * Release a spinlock. See above.
1239  */
1240
1241 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1242 {
1243         spin_unlock_irqrestore(lockp, flags);
1244 }
1245
1246 #ifndef ACPI_USE_LOCAL_CACHE
1247
1248 /*******************************************************************************
1249  *
1250  * FUNCTION:    acpi_os_create_cache
1251  *
1252  * PARAMETERS:  name      - Ascii name for the cache
1253  *              size      - Size of each cached object
1254  *              depth     - Maximum depth of the cache (in objects) <ignored>
1255  *              cache     - Where the new cache object is returned
1256  *
1257  * RETURN:      status
1258  *
1259  * DESCRIPTION: Create a cache object
1260  *
1261  ******************************************************************************/
1262
1263 acpi_status
1264 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1265 {
1266         *cache = kmem_cache_create(name, size, 0, 0, NULL);
1267         if (*cache == NULL)
1268                 return AE_ERROR;
1269         else
1270                 return AE_OK;
1271 }
1272
1273 /*******************************************************************************
1274  *
1275  * FUNCTION:    acpi_os_purge_cache
1276  *
1277  * PARAMETERS:  Cache           - Handle to cache object
1278  *
1279  * RETURN:      Status
1280  *
1281  * DESCRIPTION: Free all objects within the requested cache.
1282  *
1283  ******************************************************************************/
1284
1285 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1286 {
1287         kmem_cache_shrink(cache);
1288         return (AE_OK);
1289 }
1290
1291 /*******************************************************************************
1292  *
1293  * FUNCTION:    acpi_os_delete_cache
1294  *
1295  * PARAMETERS:  Cache           - Handle to cache object
1296  *
1297  * RETURN:      Status
1298  *
1299  * DESCRIPTION: Free all objects within the requested cache and delete the
1300  *              cache object.
1301  *
1302  ******************************************************************************/
1303
1304 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1305 {
1306         kmem_cache_destroy(cache);
1307         return (AE_OK);
1308 }
1309
1310 /*******************************************************************************
1311  *
1312  * FUNCTION:    acpi_os_release_object
1313  *
1314  * PARAMETERS:  Cache       - Handle to cache object
1315  *              Object      - The object to be released
1316  *
1317  * RETURN:      None
1318  *
1319  * DESCRIPTION: Release an object to the specified cache.  If cache is full,
1320  *              the object is deleted.
1321  *
1322  ******************************************************************************/
1323
1324 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1325 {
1326         kmem_cache_free(cache, object);
1327         return (AE_OK);
1328 }
1329
1330 /******************************************************************************
1331  *
1332  * FUNCTION:    acpi_os_validate_interface
1333  *
1334  * PARAMETERS:  interface           - Requested interface to be validated
1335  *
1336  * RETURN:      AE_OK if interface is supported, AE_SUPPORT otherwise
1337  *
1338  * DESCRIPTION: Match an interface string to the interfaces supported by the
1339  *              host. Strings originate from an AML call to the _OSI method.
1340  *
1341  *****************************************************************************/
1342
1343 acpi_status
1344 acpi_os_validate_interface (char *interface)
1345 {
1346         if (!strncmp(osi_additional_string, interface, OSI_STRING_LENGTH_MAX))
1347                 return AE_OK;
1348         if (!strcmp("Linux", interface)) {
1349
1350                 printk(KERN_NOTICE PREFIX
1351                         "BIOS _OSI(Linux) query %s%s\n",
1352                         osi_linux.enable ? "honored" : "ignored",
1353                         osi_linux.cmdline ? " via cmdline" :
1354                         osi_linux.dmi ? " via DMI" : "");
1355
1356                 if (osi_linux.enable)
1357                         return AE_OK;
1358         }
1359         return AE_SUPPORT;
1360 }
1361
1362 static inline int acpi_res_list_add(struct acpi_res_list *res)
1363 {
1364         struct acpi_res_list *res_list_elem;
1365
1366         list_for_each_entry(res_list_elem, &resource_list_head,
1367                             resource_list) {
1368
1369                 if (res->resource_type == res_list_elem->resource_type &&
1370                     res->start == res_list_elem->start &&
1371                     res->end == res_list_elem->end) {
1372
1373                         /*
1374                          * The Region(addr,len) already exist in the list,
1375                          * just increase the count
1376                          */
1377
1378                         res_list_elem->count++;
1379                         return 0;
1380                 }
1381         }
1382
1383         res->count = 1;
1384         list_add(&res->resource_list, &resource_list_head);
1385         return 1;
1386 }
1387
1388 static inline void acpi_res_list_del(struct acpi_res_list *res)
1389 {
1390         struct acpi_res_list *res_list_elem;
1391
1392         list_for_each_entry(res_list_elem, &resource_list_head,
1393                             resource_list) {
1394
1395                 if (res->resource_type == res_list_elem->resource_type &&
1396                     res->start == res_list_elem->start &&
1397                     res->end == res_list_elem->end) {
1398
1399                         /*
1400                          * If the res count is decreased to 0,
1401                          * remove and free it
1402                          */
1403
1404                         if (--res_list_elem->count == 0) {
1405                                 list_del(&res_list_elem->resource_list);
1406                                 kfree(res_list_elem);
1407                         }
1408                         return;
1409                 }
1410         }
1411 }
1412
1413 acpi_status
1414 acpi_os_invalidate_address(
1415     u8                   space_id,
1416     acpi_physical_address   address,
1417     acpi_size               length)
1418 {
1419         struct acpi_res_list res;
1420
1421         switch (space_id) {
1422         case ACPI_ADR_SPACE_SYSTEM_IO:
1423         case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1424                 /* Only interference checks against SystemIO and SytemMemory
1425                    are needed */
1426                 res.start = address;
1427                 res.end = address + length - 1;
1428                 res.resource_type = space_id;
1429                 spin_lock(&acpi_res_lock);
1430                 acpi_res_list_del(&res);
1431                 spin_unlock(&acpi_res_lock);
1432                 break;
1433         case ACPI_ADR_SPACE_PCI_CONFIG:
1434         case ACPI_ADR_SPACE_EC:
1435         case ACPI_ADR_SPACE_SMBUS:
1436         case ACPI_ADR_SPACE_CMOS:
1437         case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1438         case ACPI_ADR_SPACE_DATA_TABLE:
1439         case ACPI_ADR_SPACE_FIXED_HARDWARE:
1440                 break;
1441         }
1442         return AE_OK;
1443 }
1444
1445 /******************************************************************************
1446  *
1447  * FUNCTION:    acpi_os_validate_address
1448  *
1449  * PARAMETERS:  space_id             - ACPI space ID
1450  *              address             - Physical address
1451  *              length              - Address length
1452  *
1453  * RETURN:      AE_OK if address/length is valid for the space_id. Otherwise,
1454  *              should return AE_AML_ILLEGAL_ADDRESS.
1455  *
1456  * DESCRIPTION: Validate a system address via the host OS. Used to validate
1457  *              the addresses accessed by AML operation regions.
1458  *
1459  *****************************************************************************/
1460
1461 acpi_status
1462 acpi_os_validate_address (
1463     u8                   space_id,
1464     acpi_physical_address   address,
1465     acpi_size               length,
1466     char *name)
1467 {
1468         struct acpi_res_list *res;
1469         int added;
1470         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1471                 return AE_OK;
1472
1473         switch (space_id) {
1474         case ACPI_ADR_SPACE_SYSTEM_IO:
1475         case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1476                 /* Only interference checks against SystemIO and SytemMemory
1477                    are needed */
1478                 res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
1479                 if (!res)
1480                         return AE_OK;
1481                 /* ACPI names are fixed to 4 bytes, still better use strlcpy */
1482                 strlcpy(res->name, name, 5);
1483                 res->start = address;
1484                 res->end = address + length - 1;
1485                 res->resource_type = space_id;
1486                 spin_lock(&acpi_res_lock);
1487                 added = acpi_res_list_add(res);
1488                 spin_unlock(&acpi_res_lock);
1489                 pr_debug("%s %s resource: start: 0x%llx, end: 0x%llx, "
1490                          "name: %s\n", added ? "Added" : "Already exist",
1491                          (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
1492                          ? "SystemIO" : "System Memory",
1493                          (unsigned long long)res->start,
1494                          (unsigned long long)res->end,
1495                          res->name);
1496                 if (!added)
1497                         kfree(res);
1498                 break;
1499         case ACPI_ADR_SPACE_PCI_CONFIG:
1500         case ACPI_ADR_SPACE_EC:
1501         case ACPI_ADR_SPACE_SMBUS:
1502         case ACPI_ADR_SPACE_CMOS:
1503         case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1504         case ACPI_ADR_SPACE_DATA_TABLE:
1505         case ACPI_ADR_SPACE_FIXED_HARDWARE:
1506                 break;
1507         }
1508         return AE_OK;
1509 }
1510
1511 #endif