]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/acpi/osl.c
ACPI: Page based coalescing of I/O remappings optimization
[karo-tx-linux.git] / drivers / acpi / osl.c
1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *  Copyright (c) 2008 Intel Corporation
8  *   Author: Matthew Wilcox <willy@linux.intel.com>
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or
15  *  (at your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; if not, write to the Free Software
24  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  *
28  */
29
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/mm.h>
34 #include <linux/pci.h>
35 #include <linux/interrupt.h>
36 #include <linux/kmod.h>
37 #include <linux/delay.h>
38 #include <linux/workqueue.h>
39 #include <linux/nmi.h>
40 #include <linux/acpi.h>
41 #include <linux/efi.h>
42 #include <linux/ioport.h>
43 #include <linux/list.h>
44 #include <linux/jiffies.h>
45 #include <linux/semaphore.h>
46
47 #include <asm/io.h>
48 #include <asm/uaccess.h>
49
50 #include <acpi/acpi.h>
51 #include <acpi/acpi_bus.h>
52 #include <acpi/processor.h>
53
54 #define _COMPONENT              ACPI_OS_SERVICES
55 ACPI_MODULE_NAME("osl");
56 #define PREFIX          "ACPI: "
57 struct acpi_os_dpc {
58         acpi_osd_exec_callback function;
59         void *context;
60         struct work_struct work;
61         int wait;
62 };
63
64 #ifdef CONFIG_ACPI_CUSTOM_DSDT
65 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
66 #endif
67
68 #ifdef ENABLE_DEBUGGER
69 #include <linux/kdb.h>
70
71 /* stuff for debugger support */
72 int acpi_in_debugger;
73 EXPORT_SYMBOL(acpi_in_debugger);
74
75 extern char line_buf[80];
76 #endif                          /*ENABLE_DEBUGGER */
77
78 static unsigned int acpi_irq_irq;
79 static acpi_osd_handler acpi_irq_handler;
80 static void *acpi_irq_context;
81 static struct workqueue_struct *kacpid_wq;
82 static struct workqueue_struct *kacpi_notify_wq;
83 static struct workqueue_struct *kacpi_hotplug_wq;
84
85 struct acpi_res_list {
86         resource_size_t start;
87         resource_size_t end;
88         acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
89         char name[5];   /* only can have a length of 4 chars, make use of this
90                            one instead of res->name, no need to kalloc then */
91         struct list_head resource_list;
92         int count;
93 };
94
95 static LIST_HEAD(resource_list_head);
96 static DEFINE_SPINLOCK(acpi_res_lock);
97
98 /*
99  * This list of permanent mappings is for memory that may be accessed from
100  * interrupt context, where we can't do the ioremap().
101  */
102 struct acpi_ioremap {
103         struct list_head list;
104         void __iomem *virt;
105         acpi_physical_address phys;
106         acpi_size size;
107         struct kref ref;
108 };
109
110 static LIST_HEAD(acpi_ioremaps);
111 static DEFINE_SPINLOCK(acpi_ioremap_lock);
112
113 #define OSI_STRING_LENGTH_MAX 64        /* arbitrary */
114 static char osi_additional_string[OSI_STRING_LENGTH_MAX];
115
116 /*
117  * The story of _OSI(Linux)
118  *
119  * From pre-history through Linux-2.6.22,
120  * Linux responded TRUE upon a BIOS OSI(Linux) query.
121  *
122  * Unfortunately, reference BIOS writers got wind of this
123  * and put OSI(Linux) in their example code, quickly exposing
124  * this string as ill-conceived and opening the door to
125  * an un-bounded number of BIOS incompatibilities.
126  *
127  * For example, OSI(Linux) was used on resume to re-POST a
128  * video card on one system, because Linux at that time
129  * could not do a speedy restore in its native driver.
130  * But then upon gaining quick native restore capability,
131  * Linux has no way to tell the BIOS to skip the time-consuming
132  * POST -- putting Linux at a permanent performance disadvantage.
133  * On another system, the BIOS writer used OSI(Linux)
134  * to infer native OS support for IPMI!  On other systems,
135  * OSI(Linux) simply got in the way of Linux claiming to
136  * be compatible with other operating systems, exposing
137  * BIOS issues such as skipped device initialization.
138  *
139  * So "Linux" turned out to be a really poor chose of
140  * OSI string, and from Linux-2.6.23 onward we respond FALSE.
141  *
142  * BIOS writers should NOT query _OSI(Linux) on future systems.
143  * Linux will complain on the console when it sees it, and return FALSE.
144  * To get Linux to return TRUE for your system  will require
145  * a kernel source update to add a DMI entry,
146  * or boot with "acpi_osi=Linux"
147  */
148
149 static struct osi_linux {
150         unsigned int    enable:1;
151         unsigned int    dmi:1;
152         unsigned int    cmdline:1;
153         unsigned int    known:1;
154 } osi_linux = { 0, 0, 0, 0};
155
156 static void __init acpi_request_region (struct acpi_generic_address *addr,
157         unsigned int length, char *desc)
158 {
159         if (!addr->address || !length)
160                 return;
161
162         /* Resources are never freed */
163         if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
164                 request_region(addr->address, length, desc);
165         else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
166                 request_mem_region(addr->address, length, desc);
167 }
168
169 static int __init acpi_reserve_resources(void)
170 {
171         acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
172                 "ACPI PM1a_EVT_BLK");
173
174         acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
175                 "ACPI PM1b_EVT_BLK");
176
177         acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
178                 "ACPI PM1a_CNT_BLK");
179
180         acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
181                 "ACPI PM1b_CNT_BLK");
182
183         if (acpi_gbl_FADT.pm_timer_length == 4)
184                 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
185
186         acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
187                 "ACPI PM2_CNT_BLK");
188
189         /* Length of GPE blocks must be a non-negative multiple of 2 */
190
191         if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
192                 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
193                                acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
194
195         if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
196                 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
197                                acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
198
199         return 0;
200 }
201 device_initcall(acpi_reserve_resources);
202
203 void acpi_os_printf(const char *fmt, ...)
204 {
205         va_list args;
206         va_start(args, fmt);
207         acpi_os_vprintf(fmt, args);
208         va_end(args);
209 }
210
211 void acpi_os_vprintf(const char *fmt, va_list args)
212 {
213         static char buffer[512];
214
215         vsprintf(buffer, fmt, args);
216
217 #ifdef ENABLE_DEBUGGER
218         if (acpi_in_debugger) {
219                 kdb_printf("%s", buffer);
220         } else {
221                 printk(KERN_CONT "%s", buffer);
222         }
223 #else
224         printk(KERN_CONT "%s", buffer);
225 #endif
226 }
227
228 acpi_physical_address __init acpi_os_get_root_pointer(void)
229 {
230         if (efi_enabled) {
231                 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
232                         return efi.acpi20;
233                 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
234                         return efi.acpi;
235                 else {
236                         printk(KERN_ERR PREFIX
237                                "System description tables not found\n");
238                         return 0;
239                 }
240         } else {
241                 acpi_physical_address pa = 0;
242
243                 acpi_find_root_pointer(&pa);
244                 return pa;
245         }
246 }
247
248 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
249 static struct acpi_ioremap *
250 acpi_map_lookup(acpi_physical_address phys, acpi_size size)
251 {
252         struct acpi_ioremap *map;
253
254         list_for_each_entry_rcu(map, &acpi_ioremaps, list)
255                 if (map->phys <= phys &&
256                     phys + size <= map->phys + map->size)
257                         return map;
258
259         return NULL;
260 }
261
262 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
263 static void __iomem *
264 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
265 {
266         struct acpi_ioremap *map;
267
268         map = acpi_map_lookup(phys, size);
269         if (map)
270                 return map->virt + (phys - map->phys);
271
272         return NULL;
273 }
274
275 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
276 static struct acpi_ioremap *
277 acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
278 {
279         struct acpi_ioremap *map;
280
281         list_for_each_entry_rcu(map, &acpi_ioremaps, list)
282                 if (map->virt <= virt &&
283                     virt + size <= map->virt + map->size)
284                         return map;
285
286         return NULL;
287 }
288
289 void __iomem *__init_refok
290 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
291 {
292         struct acpi_ioremap *map, *tmp_map;
293         unsigned long flags, pg_sz;
294         void __iomem *virt;
295         phys_addr_t pg_off;
296
297         if (phys > ULONG_MAX) {
298                 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
299                 return NULL;
300         }
301
302         if (!acpi_gbl_permanent_mmap)
303                 return __acpi_map_table((unsigned long)phys, size);
304
305         map = kzalloc(sizeof(*map), GFP_KERNEL);
306         if (!map)
307                 return NULL;
308
309         pg_off = round_down(phys, PAGE_SIZE);
310         pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
311         virt = ioremap(pg_off, pg_sz);
312         if (!virt) {
313                 kfree(map);
314                 return NULL;
315         }
316
317         INIT_LIST_HEAD(&map->list);
318         map->virt = virt;
319         map->phys = pg_off;
320         map->size = pg_sz;
321         kref_init(&map->ref);
322
323         spin_lock_irqsave(&acpi_ioremap_lock, flags);
324         /* Check if page has already been mapped. */
325         tmp_map = acpi_map_lookup(phys, size);
326         if (tmp_map) {
327                 kref_get(&tmp_map->ref);
328                 spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
329                 iounmap(map->virt);
330                 kfree(map);
331                 return tmp_map->virt + (phys - tmp_map->phys);
332         }
333         list_add_tail_rcu(&map->list, &acpi_ioremaps);
334         spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
335
336         return map->virt + (phys - map->phys);
337 }
338 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
339
340 static void acpi_kref_del_iomap(struct kref *ref)
341 {
342         struct acpi_ioremap *map;
343
344         map = container_of(ref, struct acpi_ioremap, ref);
345         list_del_rcu(&map->list);
346 }
347
348 void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
349 {
350         struct acpi_ioremap *map;
351         unsigned long flags;
352         int del;
353
354         if (!acpi_gbl_permanent_mmap) {
355                 __acpi_unmap_table(virt, size);
356                 return;
357         }
358
359         spin_lock_irqsave(&acpi_ioremap_lock, flags);
360         map = acpi_map_lookup_virt(virt, size);
361         if (!map) {
362                 spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
363                 printk(KERN_ERR PREFIX "%s: bad address %p\n", __func__, virt);
364                 dump_stack();
365                 return;
366         }
367
368         del = kref_put(&map->ref, acpi_kref_del_iomap);
369         spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
370
371         if (!del)
372                 return;
373
374         synchronize_rcu();
375         iounmap(map->virt);
376         kfree(map);
377 }
378 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
379
380 void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
381 {
382         if (!acpi_gbl_permanent_mmap)
383                 __acpi_unmap_table(virt, size);
384 }
385
386 int acpi_os_map_generic_address(struct acpi_generic_address *addr)
387 {
388         void __iomem *virt;
389
390         if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
391                 return 0;
392
393         if (!addr->address || !addr->bit_width)
394                 return -EINVAL;
395
396         virt = acpi_os_map_memory(addr->address, addr->bit_width / 8);
397         if (!virt)
398                 return -EIO;
399
400         return 0;
401 }
402 EXPORT_SYMBOL_GPL(acpi_os_map_generic_address);
403
404 void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
405 {
406         void __iomem *virt;
407         unsigned long flags;
408         acpi_size size = addr->bit_width / 8;
409
410         if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
411                 return;
412
413         if (!addr->address || !addr->bit_width)
414                 return;
415
416         spin_lock_irqsave(&acpi_ioremap_lock, flags);
417         virt = acpi_map_vaddr_lookup(addr->address, size);
418         spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
419
420         acpi_os_unmap_memory(virt, size);
421 }
422 EXPORT_SYMBOL_GPL(acpi_os_unmap_generic_address);
423
424 #ifdef ACPI_FUTURE_USAGE
425 acpi_status
426 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
427 {
428         if (!phys || !virt)
429                 return AE_BAD_PARAMETER;
430
431         *phys = virt_to_phys(virt);
432
433         return AE_OK;
434 }
435 #endif
436
437 #define ACPI_MAX_OVERRIDE_LEN 100
438
439 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
440
441 acpi_status
442 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
443                             acpi_string * new_val)
444 {
445         if (!init_val || !new_val)
446                 return AE_BAD_PARAMETER;
447
448         *new_val = NULL;
449         if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
450                 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
451                        acpi_os_name);
452                 *new_val = acpi_os_name;
453         }
454
455         return AE_OK;
456 }
457
458 acpi_status
459 acpi_os_table_override(struct acpi_table_header * existing_table,
460                        struct acpi_table_header ** new_table)
461 {
462         if (!existing_table || !new_table)
463                 return AE_BAD_PARAMETER;
464
465         *new_table = NULL;
466
467 #ifdef CONFIG_ACPI_CUSTOM_DSDT
468         if (strncmp(existing_table->signature, "DSDT", 4) == 0)
469                 *new_table = (struct acpi_table_header *)AmlCode;
470 #endif
471         if (*new_table != NULL) {
472                 printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], "
473                            "this is unsafe: tainting kernel\n",
474                        existing_table->signature,
475                        existing_table->oem_table_id);
476                 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
477         }
478         return AE_OK;
479 }
480
481 static irqreturn_t acpi_irq(int irq, void *dev_id)
482 {
483         u32 handled;
484
485         handled = (*acpi_irq_handler) (acpi_irq_context);
486
487         if (handled) {
488                 acpi_irq_handled++;
489                 return IRQ_HANDLED;
490         } else {
491                 acpi_irq_not_handled++;
492                 return IRQ_NONE;
493         }
494 }
495
496 acpi_status
497 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
498                                   void *context)
499 {
500         unsigned int irq;
501
502         acpi_irq_stats_init();
503
504         /*
505          * Ignore the GSI from the core, and use the value in our copy of the
506          * FADT. It may not be the same if an interrupt source override exists
507          * for the SCI.
508          */
509         gsi = acpi_gbl_FADT.sci_interrupt;
510         if (acpi_gsi_to_irq(gsi, &irq) < 0) {
511                 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
512                        gsi);
513                 return AE_OK;
514         }
515
516         acpi_irq_handler = handler;
517         acpi_irq_context = context;
518         if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
519                 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
520                 return AE_NOT_ACQUIRED;
521         }
522         acpi_irq_irq = irq;
523
524         return AE_OK;
525 }
526
527 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
528 {
529         if (irq) {
530                 free_irq(irq, acpi_irq);
531                 acpi_irq_handler = NULL;
532                 acpi_irq_irq = 0;
533         }
534
535         return AE_OK;
536 }
537
538 /*
539  * Running in interpreter thread context, safe to sleep
540  */
541
542 void acpi_os_sleep(u64 ms)
543 {
544         schedule_timeout_interruptible(msecs_to_jiffies(ms));
545 }
546
547 void acpi_os_stall(u32 us)
548 {
549         while (us) {
550                 u32 delay = 1000;
551
552                 if (delay > us)
553                         delay = us;
554                 udelay(delay);
555                 touch_nmi_watchdog();
556                 us -= delay;
557         }
558 }
559
560 /*
561  * Support ACPI 3.0 AML Timer operand
562  * Returns 64-bit free-running, monotonically increasing timer
563  * with 100ns granularity
564  */
565 u64 acpi_os_get_timer(void)
566 {
567         static u64 t;
568
569 #ifdef  CONFIG_HPET
570         /* TBD: use HPET if available */
571 #endif
572
573 #ifdef  CONFIG_X86_PM_TIMER
574         /* TBD: default to PM timer if HPET was not available */
575 #endif
576         if (!t)
577                 printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n");
578
579         return ++t;
580 }
581
582 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
583 {
584         u32 dummy;
585
586         if (!value)
587                 value = &dummy;
588
589         *value = 0;
590         if (width <= 8) {
591                 *(u8 *) value = inb(port);
592         } else if (width <= 16) {
593                 *(u16 *) value = inw(port);
594         } else if (width <= 32) {
595                 *(u32 *) value = inl(port);
596         } else {
597                 BUG();
598         }
599
600         return AE_OK;
601 }
602
603 EXPORT_SYMBOL(acpi_os_read_port);
604
605 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
606 {
607         if (width <= 8) {
608                 outb(value, port);
609         } else if (width <= 16) {
610                 outw(value, port);
611         } else if (width <= 32) {
612                 outl(value, port);
613         } else {
614                 BUG();
615         }
616
617         return AE_OK;
618 }
619
620 EXPORT_SYMBOL(acpi_os_write_port);
621
622 acpi_status
623 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
624 {
625         u32 dummy;
626         void __iomem *virt_addr;
627         int size = width / 8, unmap = 0;
628
629         rcu_read_lock();
630         virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
631         rcu_read_unlock();
632         if (!virt_addr) {
633                 virt_addr = ioremap(phys_addr, size);
634                 unmap = 1;
635         }
636         if (!value)
637                 value = &dummy;
638
639         switch (width) {
640         case 8:
641                 *(u8 *) value = readb(virt_addr);
642                 break;
643         case 16:
644                 *(u16 *) value = readw(virt_addr);
645                 break;
646         case 32:
647                 *(u32 *) value = readl(virt_addr);
648                 break;
649         default:
650                 BUG();
651         }
652
653         if (unmap)
654                 iounmap(virt_addr);
655
656         return AE_OK;
657 }
658
659 acpi_status
660 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
661 {
662         void __iomem *virt_addr;
663         int size = width / 8, unmap = 0;
664
665         rcu_read_lock();
666         virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
667         rcu_read_unlock();
668         if (!virt_addr) {
669                 virt_addr = ioremap(phys_addr, size);
670                 unmap = 1;
671         }
672
673         switch (width) {
674         case 8:
675                 writeb(value, virt_addr);
676                 break;
677         case 16:
678                 writew(value, virt_addr);
679                 break;
680         case 32:
681                 writel(value, virt_addr);
682                 break;
683         default:
684                 BUG();
685         }
686
687         if (unmap)
688                 iounmap(virt_addr);
689
690         return AE_OK;
691 }
692
693 acpi_status
694 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
695                                u32 *value, u32 width)
696 {
697         int result, size;
698
699         if (!value)
700                 return AE_BAD_PARAMETER;
701
702         switch (width) {
703         case 8:
704                 size = 1;
705                 break;
706         case 16:
707                 size = 2;
708                 break;
709         case 32:
710                 size = 4;
711                 break;
712         default:
713                 return AE_ERROR;
714         }
715
716         result = raw_pci_read(pci_id->segment, pci_id->bus,
717                                 PCI_DEVFN(pci_id->device, pci_id->function),
718                                 reg, size, value);
719
720         return (result ? AE_ERROR : AE_OK);
721 }
722
723 acpi_status
724 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
725                                 u64 value, u32 width)
726 {
727         int result, size;
728
729         switch (width) {
730         case 8:
731                 size = 1;
732                 break;
733         case 16:
734                 size = 2;
735                 break;
736         case 32:
737                 size = 4;
738                 break;
739         default:
740                 return AE_ERROR;
741         }
742
743         result = raw_pci_write(pci_id->segment, pci_id->bus,
744                                 PCI_DEVFN(pci_id->device, pci_id->function),
745                                 reg, size, value);
746
747         return (result ? AE_ERROR : AE_OK);
748 }
749
750 /* TODO: Change code to take advantage of driver model more */
751 static void acpi_os_derive_pci_id_2(acpi_handle rhandle,        /* upper bound  */
752                                     acpi_handle chandle,        /* current node */
753                                     struct acpi_pci_id **id,
754                                     int *is_bridge, u8 * bus_number)
755 {
756         acpi_handle handle;
757         struct acpi_pci_id *pci_id = *id;
758         acpi_status status;
759         unsigned long long temp;
760         acpi_object_type type;
761
762         acpi_get_parent(chandle, &handle);
763         if (handle != rhandle) {
764                 acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge,
765                                         bus_number);
766
767                 status = acpi_get_type(handle, &type);
768                 if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE))
769                         return;
770
771                 status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
772                                           &temp);
773                 if (ACPI_SUCCESS(status)) {
774                         u32 val;
775                         pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp));
776                         pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp));
777
778                         if (*is_bridge)
779                                 pci_id->bus = *bus_number;
780
781                         /* any nicer way to get bus number of bridge ? */
782                         status =
783                             acpi_os_read_pci_configuration(pci_id, 0x0e, &val,
784                                                            8);
785                         if (ACPI_SUCCESS(status)
786                             && ((val & 0x7f) == 1 || (val & 0x7f) == 2)) {
787                                 status =
788                                     acpi_os_read_pci_configuration(pci_id, 0x18,
789                                                                    &val, 8);
790                                 if (!ACPI_SUCCESS(status)) {
791                                         /* Certainly broken...  FIX ME */
792                                         return;
793                                 }
794                                 *is_bridge = 1;
795                                 pci_id->bus = val;
796                                 status =
797                                     acpi_os_read_pci_configuration(pci_id, 0x19,
798                                                                    &val, 8);
799                                 if (ACPI_SUCCESS(status)) {
800                                         *bus_number = val;
801                                 }
802                         } else
803                                 *is_bridge = 0;
804                 }
805         }
806 }
807
808 void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound  */
809                            acpi_handle chandle, /* current node */
810                            struct acpi_pci_id **id)
811 {
812         int is_bridge = 1;
813         u8 bus_number = (*id)->bus;
814
815         acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
816 }
817
818 static void acpi_os_execute_deferred(struct work_struct *work)
819 {
820         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
821
822         if (dpc->wait)
823                 acpi_os_wait_events_complete(NULL);
824
825         dpc->function(dpc->context);
826         kfree(dpc);
827 }
828
829 /*******************************************************************************
830  *
831  * FUNCTION:    acpi_os_execute
832  *
833  * PARAMETERS:  Type               - Type of the callback
834  *              Function           - Function to be executed
835  *              Context            - Function parameters
836  *
837  * RETURN:      Status
838  *
839  * DESCRIPTION: Depending on type, either queues function for deferred execution or
840  *              immediately executes function on a separate thread.
841  *
842  ******************************************************************************/
843
844 static acpi_status __acpi_os_execute(acpi_execute_type type,
845         acpi_osd_exec_callback function, void *context, int hp)
846 {
847         acpi_status status = AE_OK;
848         struct acpi_os_dpc *dpc;
849         struct workqueue_struct *queue;
850         int ret;
851         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
852                           "Scheduling function [%p(%p)] for deferred execution.\n",
853                           function, context));
854
855         /*
856          * Allocate/initialize DPC structure.  Note that this memory will be
857          * freed by the callee.  The kernel handles the work_struct list  in a
858          * way that allows us to also free its memory inside the callee.
859          * Because we may want to schedule several tasks with different
860          * parameters we can't use the approach some kernel code uses of
861          * having a static work_struct.
862          */
863
864         dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
865         if (!dpc)
866                 return AE_NO_MEMORY;
867
868         dpc->function = function;
869         dpc->context = context;
870
871         /*
872          * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq
873          * because the hotplug code may call driver .remove() functions,
874          * which invoke flush_scheduled_work/acpi_os_wait_events_complete
875          * to flush these workqueues.
876          */
877         queue = hp ? kacpi_hotplug_wq :
878                 (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq);
879         dpc->wait = hp ? 1 : 0;
880
881         if (queue == kacpi_hotplug_wq)
882                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
883         else if (queue == kacpi_notify_wq)
884                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
885         else
886                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
887
888         /*
889          * On some machines, a software-initiated SMI causes corruption unless
890          * the SMI runs on CPU 0.  An SMI can be initiated by any AML, but
891          * typically it's done in GPE-related methods that are run via
892          * workqueues, so we can avoid the known corruption cases by always
893          * queueing on CPU 0.
894          */
895         ret = queue_work_on(0, queue, &dpc->work);
896
897         if (!ret) {
898                 printk(KERN_ERR PREFIX
899                           "Call to queue_work() failed.\n");
900                 status = AE_ERROR;
901                 kfree(dpc);
902         }
903         return status;
904 }
905
906 acpi_status acpi_os_execute(acpi_execute_type type,
907                             acpi_osd_exec_callback function, void *context)
908 {
909         return __acpi_os_execute(type, function, context, 0);
910 }
911 EXPORT_SYMBOL(acpi_os_execute);
912
913 acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
914         void *context)
915 {
916         return __acpi_os_execute(0, function, context, 1);
917 }
918
919 void acpi_os_wait_events_complete(void *context)
920 {
921         flush_workqueue(kacpid_wq);
922         flush_workqueue(kacpi_notify_wq);
923 }
924
925 EXPORT_SYMBOL(acpi_os_wait_events_complete);
926
927 /*
928  * Allocate the memory for a spinlock and initialize it.
929  */
930 acpi_status acpi_os_create_lock(acpi_spinlock * handle)
931 {
932         spin_lock_init(*handle);
933
934         return AE_OK;
935 }
936
937 /*
938  * Deallocate the memory for a spinlock.
939  */
940 void acpi_os_delete_lock(acpi_spinlock handle)
941 {
942         return;
943 }
944
945 acpi_status
946 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
947 {
948         struct semaphore *sem = NULL;
949
950         sem = acpi_os_allocate(sizeof(struct semaphore));
951         if (!sem)
952                 return AE_NO_MEMORY;
953         memset(sem, 0, sizeof(struct semaphore));
954
955         sema_init(sem, initial_units);
956
957         *handle = (acpi_handle *) sem;
958
959         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
960                           *handle, initial_units));
961
962         return AE_OK;
963 }
964
965 /*
966  * TODO: A better way to delete semaphores?  Linux doesn't have a
967  * 'delete_semaphore()' function -- may result in an invalid
968  * pointer dereference for non-synchronized consumers.  Should
969  * we at least check for blocked threads and signal/cancel them?
970  */
971
972 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
973 {
974         struct semaphore *sem = (struct semaphore *)handle;
975
976         if (!sem)
977                 return AE_BAD_PARAMETER;
978
979         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
980
981         BUG_ON(!list_empty(&sem->wait_list));
982         kfree(sem);
983         sem = NULL;
984
985         return AE_OK;
986 }
987
988 /*
989  * TODO: Support for units > 1?
990  */
991 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
992 {
993         acpi_status status = AE_OK;
994         struct semaphore *sem = (struct semaphore *)handle;
995         long jiffies;
996         int ret = 0;
997
998         if (!sem || (units < 1))
999                 return AE_BAD_PARAMETER;
1000
1001         if (units > 1)
1002                 return AE_SUPPORT;
1003
1004         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
1005                           handle, units, timeout));
1006
1007         if (timeout == ACPI_WAIT_FOREVER)
1008                 jiffies = MAX_SCHEDULE_TIMEOUT;
1009         else
1010                 jiffies = msecs_to_jiffies(timeout);
1011         
1012         ret = down_timeout(sem, jiffies);
1013         if (ret)
1014                 status = AE_TIME;
1015
1016         if (ACPI_FAILURE(status)) {
1017                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1018                                   "Failed to acquire semaphore[%p|%d|%d], %s",
1019                                   handle, units, timeout,
1020                                   acpi_format_exception(status)));
1021         } else {
1022                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1023                                   "Acquired semaphore[%p|%d|%d]", handle,
1024                                   units, timeout));
1025         }
1026
1027         return status;
1028 }
1029
1030 /*
1031  * TODO: Support for units > 1?
1032  */
1033 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1034 {
1035         struct semaphore *sem = (struct semaphore *)handle;
1036
1037         if (!sem || (units < 1))
1038                 return AE_BAD_PARAMETER;
1039
1040         if (units > 1)
1041                 return AE_SUPPORT;
1042
1043         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1044                           units));
1045
1046         up(sem);
1047
1048         return AE_OK;
1049 }
1050
1051 #ifdef ACPI_FUTURE_USAGE
1052 u32 acpi_os_get_line(char *buffer)
1053 {
1054
1055 #ifdef ENABLE_DEBUGGER
1056         if (acpi_in_debugger) {
1057                 u32 chars;
1058
1059                 kdb_read(buffer, sizeof(line_buf));
1060
1061                 /* remove the CR kdb includes */
1062                 chars = strlen(buffer) - 1;
1063                 buffer[chars] = '\0';
1064         }
1065 #endif
1066
1067         return 0;
1068 }
1069 #endif                          /*  ACPI_FUTURE_USAGE  */
1070
1071 acpi_status acpi_os_signal(u32 function, void *info)
1072 {
1073         switch (function) {
1074         case ACPI_SIGNAL_FATAL:
1075                 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
1076                 break;
1077         case ACPI_SIGNAL_BREAKPOINT:
1078                 /*
1079                  * AML Breakpoint
1080                  * ACPI spec. says to treat it as a NOP unless
1081                  * you are debugging.  So if/when we integrate
1082                  * AML debugger into the kernel debugger its
1083                  * hook will go here.  But until then it is
1084                  * not useful to print anything on breakpoints.
1085                  */
1086                 break;
1087         default:
1088                 break;
1089         }
1090
1091         return AE_OK;
1092 }
1093
1094 static int __init acpi_os_name_setup(char *str)
1095 {
1096         char *p = acpi_os_name;
1097         int count = ACPI_MAX_OVERRIDE_LEN - 1;
1098
1099         if (!str || !*str)
1100                 return 0;
1101
1102         for (; count-- && str && *str; str++) {
1103                 if (isalnum(*str) || *str == ' ' || *str == ':')
1104                         *p++ = *str;
1105                 else if (*str == '\'' || *str == '"')
1106                         continue;
1107                 else
1108                         break;
1109         }
1110         *p = 0;
1111
1112         return 1;
1113
1114 }
1115
1116 __setup("acpi_os_name=", acpi_os_name_setup);
1117
1118 static void __init set_osi_linux(unsigned int enable)
1119 {
1120         if (osi_linux.enable != enable) {
1121                 osi_linux.enable = enable;
1122                 printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n",
1123                         enable ? "Add": "Delet");
1124         }
1125         return;
1126 }
1127
1128 static void __init acpi_cmdline_osi_linux(unsigned int enable)
1129 {
1130         osi_linux.cmdline = 1;  /* cmdline set the default */
1131         set_osi_linux(enable);
1132
1133         return;
1134 }
1135
1136 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1137 {
1138         osi_linux.dmi = 1;      /* DMI knows that this box asks OSI(Linux) */
1139
1140         printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1141
1142         if (enable == -1)
1143                 return;
1144
1145         osi_linux.known = 1;    /* DMI knows which OSI(Linux) default needed */
1146
1147         set_osi_linux(enable);
1148
1149         return;
1150 }
1151
1152 /*
1153  * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1154  *
1155  * empty string disables _OSI
1156  * string starting with '!' disables that string
1157  * otherwise string is added to list, augmenting built-in strings
1158  */
1159 int __init acpi_osi_setup(char *str)
1160 {
1161         if (str == NULL || *str == '\0') {
1162                 printk(KERN_INFO PREFIX "_OSI method disabled\n");
1163                 acpi_gbl_create_osi_method = FALSE;
1164         } else if (!strcmp("!Linux", str)) {
1165                 acpi_cmdline_osi_linux(0);      /* !enable */
1166         } else if (*str == '!') {
1167                 if (acpi_osi_invalidate(++str) == AE_OK)
1168                         printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1169         } else if (!strcmp("Linux", str)) {
1170                 acpi_cmdline_osi_linux(1);      /* enable */
1171         } else if (*osi_additional_string == '\0') {
1172                 strncpy(osi_additional_string, str, OSI_STRING_LENGTH_MAX);
1173                 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1174         }
1175
1176         return 1;
1177 }
1178
1179 __setup("acpi_osi=", acpi_osi_setup);
1180
1181 /* enable serialization to combat AE_ALREADY_EXISTS errors */
1182 static int __init acpi_serialize_setup(char *str)
1183 {
1184         printk(KERN_INFO PREFIX "serialize enabled\n");
1185
1186         acpi_gbl_all_methods_serialized = TRUE;
1187
1188         return 1;
1189 }
1190
1191 __setup("acpi_serialize", acpi_serialize_setup);
1192
1193 /* Check of resource interference between native drivers and ACPI
1194  * OperationRegions (SystemIO and System Memory only).
1195  * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1196  * in arbitrary AML code and can interfere with legacy drivers.
1197  * acpi_enforce_resources= can be set to:
1198  *
1199  *   - strict (default) (2)
1200  *     -> further driver trying to access the resources will not load
1201  *   - lax              (1)
1202  *     -> further driver trying to access the resources will load, but you
1203  *     get a system message that something might go wrong...
1204  *
1205  *   - no               (0)
1206  *     -> ACPI Operation Region resources will not be registered
1207  *
1208  */
1209 #define ENFORCE_RESOURCES_STRICT 2
1210 #define ENFORCE_RESOURCES_LAX    1
1211 #define ENFORCE_RESOURCES_NO     0
1212
1213 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1214
1215 static int __init acpi_enforce_resources_setup(char *str)
1216 {
1217         if (str == NULL || *str == '\0')
1218                 return 0;
1219
1220         if (!strcmp("strict", str))
1221                 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1222         else if (!strcmp("lax", str))
1223                 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1224         else if (!strcmp("no", str))
1225                 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1226
1227         return 1;
1228 }
1229
1230 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1231
1232 /* Check for resource conflicts between ACPI OperationRegions and native
1233  * drivers */
1234 int acpi_check_resource_conflict(const struct resource *res)
1235 {
1236         struct acpi_res_list *res_list_elem;
1237         int ioport;
1238         int clash = 0;
1239
1240         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1241                 return 0;
1242         if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1243                 return 0;
1244
1245         ioport = res->flags & IORESOURCE_IO;
1246
1247         spin_lock(&acpi_res_lock);
1248         list_for_each_entry(res_list_elem, &resource_list_head,
1249                             resource_list) {
1250                 if (ioport && (res_list_elem->resource_type
1251                                != ACPI_ADR_SPACE_SYSTEM_IO))
1252                         continue;
1253                 if (!ioport && (res_list_elem->resource_type
1254                                 != ACPI_ADR_SPACE_SYSTEM_MEMORY))
1255                         continue;
1256
1257                 if (res->end < res_list_elem->start
1258                     || res_list_elem->end < res->start)
1259                         continue;
1260                 clash = 1;
1261                 break;
1262         }
1263         spin_unlock(&acpi_res_lock);
1264
1265         if (clash) {
1266                 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1267                         printk(KERN_WARNING "ACPI: resource %s %pR"
1268                                " conflicts with ACPI region %s %pR\n",
1269                                res->name, res, res_list_elem->name,
1270                                res_list_elem);
1271                         if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1272                                 printk(KERN_NOTICE "ACPI: This conflict may"
1273                                        " cause random problems and system"
1274                                        " instability\n");
1275                         printk(KERN_INFO "ACPI: If an ACPI driver is available"
1276                                " for this device, you should use it instead of"
1277                                " the native driver\n");
1278                 }
1279                 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1280                         return -EBUSY;
1281         }
1282         return 0;
1283 }
1284 EXPORT_SYMBOL(acpi_check_resource_conflict);
1285
1286 int acpi_check_region(resource_size_t start, resource_size_t n,
1287                       const char *name)
1288 {
1289         struct resource res = {
1290                 .start = start,
1291                 .end   = start + n - 1,
1292                 .name  = name,
1293                 .flags = IORESOURCE_IO,
1294         };
1295
1296         return acpi_check_resource_conflict(&res);
1297 }
1298 EXPORT_SYMBOL(acpi_check_region);
1299
1300 int acpi_check_mem_region(resource_size_t start, resource_size_t n,
1301                       const char *name)
1302 {
1303         struct resource res = {
1304                 .start = start,
1305                 .end   = start + n - 1,
1306                 .name  = name,
1307                 .flags = IORESOURCE_MEM,
1308         };
1309
1310         return acpi_check_resource_conflict(&res);
1311
1312 }
1313 EXPORT_SYMBOL(acpi_check_mem_region);
1314
1315 /*
1316  * Let drivers know whether the resource checks are effective
1317  */
1318 int acpi_resources_are_enforced(void)
1319 {
1320         return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1321 }
1322 EXPORT_SYMBOL(acpi_resources_are_enforced);
1323
1324 /*
1325  * Acquire a spinlock.
1326  *
1327  * handle is a pointer to the spinlock_t.
1328  */
1329
1330 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1331 {
1332         acpi_cpu_flags flags;
1333         spin_lock_irqsave(lockp, flags);
1334         return flags;
1335 }
1336
1337 /*
1338  * Release a spinlock. See above.
1339  */
1340
1341 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1342 {
1343         spin_unlock_irqrestore(lockp, flags);
1344 }
1345
1346 #ifndef ACPI_USE_LOCAL_CACHE
1347
1348 /*******************************************************************************
1349  *
1350  * FUNCTION:    acpi_os_create_cache
1351  *
1352  * PARAMETERS:  name      - Ascii name for the cache
1353  *              size      - Size of each cached object
1354  *              depth     - Maximum depth of the cache (in objects) <ignored>
1355  *              cache     - Where the new cache object is returned
1356  *
1357  * RETURN:      status
1358  *
1359  * DESCRIPTION: Create a cache object
1360  *
1361  ******************************************************************************/
1362
1363 acpi_status
1364 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1365 {
1366         *cache = kmem_cache_create(name, size, 0, 0, NULL);
1367         if (*cache == NULL)
1368                 return AE_ERROR;
1369         else
1370                 return AE_OK;
1371 }
1372
1373 /*******************************************************************************
1374  *
1375  * FUNCTION:    acpi_os_purge_cache
1376  *
1377  * PARAMETERS:  Cache           - Handle to cache object
1378  *
1379  * RETURN:      Status
1380  *
1381  * DESCRIPTION: Free all objects within the requested cache.
1382  *
1383  ******************************************************************************/
1384
1385 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1386 {
1387         kmem_cache_shrink(cache);
1388         return (AE_OK);
1389 }
1390
1391 /*******************************************************************************
1392  *
1393  * FUNCTION:    acpi_os_delete_cache
1394  *
1395  * PARAMETERS:  Cache           - Handle to cache object
1396  *
1397  * RETURN:      Status
1398  *
1399  * DESCRIPTION: Free all objects within the requested cache and delete the
1400  *              cache object.
1401  *
1402  ******************************************************************************/
1403
1404 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1405 {
1406         kmem_cache_destroy(cache);
1407         return (AE_OK);
1408 }
1409
1410 /*******************************************************************************
1411  *
1412  * FUNCTION:    acpi_os_release_object
1413  *
1414  * PARAMETERS:  Cache       - Handle to cache object
1415  *              Object      - The object to be released
1416  *
1417  * RETURN:      None
1418  *
1419  * DESCRIPTION: Release an object to the specified cache.  If cache is full,
1420  *              the object is deleted.
1421  *
1422  ******************************************************************************/
1423
1424 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1425 {
1426         kmem_cache_free(cache, object);
1427         return (AE_OK);
1428 }
1429
1430 /******************************************************************************
1431  *
1432  * FUNCTION:    acpi_os_validate_interface
1433  *
1434  * PARAMETERS:  interface           - Requested interface to be validated
1435  *
1436  * RETURN:      AE_OK if interface is supported, AE_SUPPORT otherwise
1437  *
1438  * DESCRIPTION: Match an interface string to the interfaces supported by the
1439  *              host. Strings originate from an AML call to the _OSI method.
1440  *
1441  *****************************************************************************/
1442
1443 acpi_status
1444 acpi_os_validate_interface (char *interface)
1445 {
1446         if (!strncmp(osi_additional_string, interface, OSI_STRING_LENGTH_MAX))
1447                 return AE_OK;
1448         if (!strcmp("Linux", interface)) {
1449
1450                 printk(KERN_NOTICE PREFIX
1451                         "BIOS _OSI(Linux) query %s%s\n",
1452                         osi_linux.enable ? "honored" : "ignored",
1453                         osi_linux.cmdline ? " via cmdline" :
1454                         osi_linux.dmi ? " via DMI" : "");
1455
1456                 if (osi_linux.enable)
1457                         return AE_OK;
1458         }
1459         return AE_SUPPORT;
1460 }
1461
1462 static inline int acpi_res_list_add(struct acpi_res_list *res)
1463 {
1464         struct acpi_res_list *res_list_elem;
1465
1466         list_for_each_entry(res_list_elem, &resource_list_head,
1467                             resource_list) {
1468
1469                 if (res->resource_type == res_list_elem->resource_type &&
1470                     res->start == res_list_elem->start &&
1471                     res->end == res_list_elem->end) {
1472
1473                         /*
1474                          * The Region(addr,len) already exist in the list,
1475                          * just increase the count
1476                          */
1477
1478                         res_list_elem->count++;
1479                         return 0;
1480                 }
1481         }
1482
1483         res->count = 1;
1484         list_add(&res->resource_list, &resource_list_head);
1485         return 1;
1486 }
1487
1488 static inline void acpi_res_list_del(struct acpi_res_list *res)
1489 {
1490         struct acpi_res_list *res_list_elem;
1491
1492         list_for_each_entry(res_list_elem, &resource_list_head,
1493                             resource_list) {
1494
1495                 if (res->resource_type == res_list_elem->resource_type &&
1496                     res->start == res_list_elem->start &&
1497                     res->end == res_list_elem->end) {
1498
1499                         /*
1500                          * If the res count is decreased to 0,
1501                          * remove and free it
1502                          */
1503
1504                         if (--res_list_elem->count == 0) {
1505                                 list_del(&res_list_elem->resource_list);
1506                                 kfree(res_list_elem);
1507                         }
1508                         return;
1509                 }
1510         }
1511 }
1512
1513 acpi_status
1514 acpi_os_invalidate_address(
1515     u8                   space_id,
1516     acpi_physical_address   address,
1517     acpi_size               length)
1518 {
1519         struct acpi_res_list res;
1520
1521         switch (space_id) {
1522         case ACPI_ADR_SPACE_SYSTEM_IO:
1523         case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1524                 /* Only interference checks against SystemIO and SystemMemory
1525                    are needed */
1526                 res.start = address;
1527                 res.end = address + length - 1;
1528                 res.resource_type = space_id;
1529                 spin_lock(&acpi_res_lock);
1530                 acpi_res_list_del(&res);
1531                 spin_unlock(&acpi_res_lock);
1532                 break;
1533         case ACPI_ADR_SPACE_PCI_CONFIG:
1534         case ACPI_ADR_SPACE_EC:
1535         case ACPI_ADR_SPACE_SMBUS:
1536         case ACPI_ADR_SPACE_CMOS:
1537         case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1538         case ACPI_ADR_SPACE_DATA_TABLE:
1539         case ACPI_ADR_SPACE_FIXED_HARDWARE:
1540                 break;
1541         }
1542         return AE_OK;
1543 }
1544
1545 /******************************************************************************
1546  *
1547  * FUNCTION:    acpi_os_validate_address
1548  *
1549  * PARAMETERS:  space_id             - ACPI space ID
1550  *              address             - Physical address
1551  *              length              - Address length
1552  *
1553  * RETURN:      AE_OK if address/length is valid for the space_id. Otherwise,
1554  *              should return AE_AML_ILLEGAL_ADDRESS.
1555  *
1556  * DESCRIPTION: Validate a system address via the host OS. Used to validate
1557  *              the addresses accessed by AML operation regions.
1558  *
1559  *****************************************************************************/
1560
1561 acpi_status
1562 acpi_os_validate_address (
1563     u8                   space_id,
1564     acpi_physical_address   address,
1565     acpi_size               length,
1566     char *name)
1567 {
1568         struct acpi_res_list *res;
1569         int added;
1570         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1571                 return AE_OK;
1572
1573         switch (space_id) {
1574         case ACPI_ADR_SPACE_SYSTEM_IO:
1575         case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1576                 /* Only interference checks against SystemIO and SystemMemory
1577                    are needed */
1578                 res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
1579                 if (!res)
1580                         return AE_OK;
1581                 /* ACPI names are fixed to 4 bytes, still better use strlcpy */
1582                 strlcpy(res->name, name, 5);
1583                 res->start = address;
1584                 res->end = address + length - 1;
1585                 res->resource_type = space_id;
1586                 spin_lock(&acpi_res_lock);
1587                 added = acpi_res_list_add(res);
1588                 spin_unlock(&acpi_res_lock);
1589                 pr_debug("%s %s resource: start: 0x%llx, end: 0x%llx, "
1590                          "name: %s\n", added ? "Added" : "Already exist",
1591                          (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
1592                          ? "SystemIO" : "System Memory",
1593                          (unsigned long long)res->start,
1594                          (unsigned long long)res->end,
1595                          res->name);
1596                 if (!added)
1597                         kfree(res);
1598                 break;
1599         case ACPI_ADR_SPACE_PCI_CONFIG:
1600         case ACPI_ADR_SPACE_EC:
1601         case ACPI_ADR_SPACE_SMBUS:
1602         case ACPI_ADR_SPACE_CMOS:
1603         case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1604         case ACPI_ADR_SPACE_DATA_TABLE:
1605         case ACPI_ADR_SPACE_FIXED_HARDWARE:
1606                 break;
1607         }
1608         return AE_OK;
1609 }
1610 #endif
1611
1612 acpi_status __init acpi_os_initialize(void)
1613 {
1614         acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1615         acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1616         acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
1617         acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
1618
1619         return AE_OK;
1620 }
1621
1622 acpi_status acpi_os_initialize1(void)
1623 {
1624         kacpid_wq = create_workqueue("kacpid");
1625         kacpi_notify_wq = create_workqueue("kacpi_notify");
1626         kacpi_hotplug_wq = create_workqueue("kacpi_hotplug");
1627         BUG_ON(!kacpid_wq);
1628         BUG_ON(!kacpi_notify_wq);
1629         BUG_ON(!kacpi_hotplug_wq);
1630         return AE_OK;
1631 }
1632
1633 acpi_status acpi_os_terminate(void)
1634 {
1635         if (acpi_irq_handler) {
1636                 acpi_os_remove_interrupt_handler(acpi_irq_irq,
1637                                                  acpi_irq_handler);
1638         }
1639
1640         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
1641         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
1642         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1643         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1644
1645         destroy_workqueue(kacpid_wq);
1646         destroy_workqueue(kacpi_notify_wq);
1647         destroy_workqueue(kacpi_hotplug_wq);
1648
1649         return AE_OK;
1650 }