]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/acpi/acpica/evgpe.c
Merge git://git.infradead.org/battery-2.6
[mv-sheeva.git] / drivers / acpi / acpica / evgpe.c
1 /******************************************************************************
2  *
3  * Module Name: evgpe - General Purpose Event handling and dispatch
4  *
5  *****************************************************************************/
6
7 /*
8  * Copyright (C) 2000 - 2010, Intel Corp.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions, and the following disclaimer,
16  *    without modification.
17  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18  *    substantially similar to the "NO WARRANTY" disclaimer below
19  *    ("Disclaimer") and any redistribution must be conditioned upon
20  *    including a substantially similar Disclaimer requirement for further
21  *    binary redistribution.
22  * 3. Neither the names of the above-listed copyright holders nor the names
23  *    of any contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * Alternatively, this software may be distributed under the terms of the
27  * GNU General Public License ("GPL") version 2 as published by the Free
28  * Software Foundation.
29  *
30  * NO WARRANTY
31  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41  * POSSIBILITY OF SUCH DAMAGES.
42  */
43
44 #include <acpi/acpi.h>
45 #include "accommon.h"
46 #include "acevents.h"
47 #include "acnamesp.h"
48
49 #define _COMPONENT          ACPI_EVENTS
50 ACPI_MODULE_NAME("evgpe")
51
52 /* Local prototypes */
53 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
54
55 /*******************************************************************************
56  *
57  * FUNCTION:    acpi_ev_update_gpe_enable_mask
58  *
59  * PARAMETERS:  gpe_event_info          - GPE to update
60  *
61  * RETURN:      Status
62  *
63  * DESCRIPTION: Updates GPE register enable mask based upon whether there are
64  *              runtime references to this GPE
65  *
66  ******************************************************************************/
67
68 acpi_status
69 acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
70 {
71         struct acpi_gpe_register_info *gpe_register_info;
72         u32 register_bit;
73
74         ACPI_FUNCTION_TRACE(ev_update_gpe_enable_mask);
75
76         gpe_register_info = gpe_event_info->register_info;
77         if (!gpe_register_info) {
78                 return_ACPI_STATUS(AE_NOT_EXIST);
79         }
80
81         register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
82                                                 gpe_register_info);
83
84         /* Clear the run bit up front */
85
86         ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
87
88         /* Set the mask bit only if there are references to this GPE */
89
90         if (gpe_event_info->runtime_count) {
91                 ACPI_SET_BIT(gpe_register_info->enable_for_run, (u8)register_bit);
92         }
93
94         return_ACPI_STATUS(AE_OK);
95 }
96
97 /*******************************************************************************
98  *
99  * FUNCTION:    acpi_ev_enable_gpe
100  *
101  * PARAMETERS:  gpe_event_info  - GPE to enable
102  *
103  * RETURN:      Status
104  *
105  * DESCRIPTION: Clear the given GPE from stale events and enable it.
106  *
107  ******************************************************************************/
108 acpi_status
109 acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
110 {
111         acpi_status status;
112
113         ACPI_FUNCTION_TRACE(ev_enable_gpe);
114
115         /*
116          * We will only allow a GPE to be enabled if it has either an
117          * associated method (_Lxx/_Exx) or a handler. Otherwise, the
118          * GPE will be immediately disabled by acpi_ev_gpe_dispatch the
119          * first time it fires.
120          */
121         if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) {
122                 return_ACPI_STATUS(AE_NO_HANDLER);
123         }
124
125         /* Clear the GPE (of stale events) */
126         status = acpi_hw_clear_gpe(gpe_event_info);
127         if (ACPI_FAILURE(status)) {
128                 return_ACPI_STATUS(status);
129         }
130
131         /* Enable the requested GPE */
132         status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
133
134         return_ACPI_STATUS(status);
135 }
136
137
138 /*******************************************************************************
139  *
140  * FUNCTION:    acpi_ev_low_get_gpe_info
141  *
142  * PARAMETERS:  gpe_number          - Raw GPE number
143  *              gpe_block           - A GPE info block
144  *
145  * RETURN:      A GPE event_info struct. NULL if not a valid GPE (The gpe_number
146  *              is not within the specified GPE block)
147  *
148  * DESCRIPTION: Returns the event_info struct associated with this GPE. This is
149  *              the low-level implementation of ev_get_gpe_event_info.
150  *
151  ******************************************************************************/
152
153 struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
154                                                      struct acpi_gpe_block_info
155                                                      *gpe_block)
156 {
157         u32 gpe_index;
158
159         /*
160          * Validate that the gpe_number is within the specified gpe_block.
161          * (Two steps)
162          */
163         if (!gpe_block || (gpe_number < gpe_block->block_base_number)) {
164                 return (NULL);
165         }
166
167         gpe_index = gpe_number - gpe_block->block_base_number;
168         if (gpe_index >= gpe_block->gpe_count) {
169                 return (NULL);
170         }
171
172         return (&gpe_block->event_info[gpe_index]);
173 }
174
175
176 /*******************************************************************************
177  *
178  * FUNCTION:    acpi_ev_get_gpe_event_info
179  *
180  * PARAMETERS:  gpe_device          - Device node. NULL for GPE0/GPE1
181  *              gpe_number          - Raw GPE number
182  *
183  * RETURN:      A GPE event_info struct. NULL if not a valid GPE
184  *
185  * DESCRIPTION: Returns the event_info struct associated with this GPE.
186  *              Validates the gpe_block and the gpe_number
187  *
188  *              Should be called only when the GPE lists are semaphore locked
189  *              and not subject to change.
190  *
191  ******************************************************************************/
192
193 struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
194                                                        u32 gpe_number)
195 {
196         union acpi_operand_object *obj_desc;
197         struct acpi_gpe_event_info *gpe_info;
198         u32 i;
199
200         ACPI_FUNCTION_ENTRY();
201
202         /* A NULL gpe_device means use the FADT-defined GPE block(s) */
203
204         if (!gpe_device) {
205
206                 /* Examine GPE Block 0 and 1 (These blocks are permanent) */
207
208                 for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
209                         gpe_info = acpi_ev_low_get_gpe_info(gpe_number,
210                                                             acpi_gbl_gpe_fadt_blocks
211                                                             [i]);
212                         if (gpe_info) {
213                                 return (gpe_info);
214                         }
215                 }
216
217                 /* The gpe_number was not in the range of either FADT GPE block */
218
219                 return (NULL);
220         }
221
222         /* A Non-NULL gpe_device means this is a GPE Block Device */
223
224         obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *)
225                                                gpe_device);
226         if (!obj_desc || !obj_desc->device.gpe_block) {
227                 return (NULL);
228         }
229
230         return (acpi_ev_low_get_gpe_info
231                 (gpe_number, obj_desc->device.gpe_block));
232 }
233
234 /*******************************************************************************
235  *
236  * FUNCTION:    acpi_ev_gpe_detect
237  *
238  * PARAMETERS:  gpe_xrupt_list      - Interrupt block for this interrupt.
239  *                                    Can have multiple GPE blocks attached.
240  *
241  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
242  *
243  * DESCRIPTION: Detect if any GP events have occurred. This function is
244  *              executed at interrupt level.
245  *
246  ******************************************************************************/
247
248 u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
249 {
250         acpi_status status;
251         struct acpi_gpe_block_info *gpe_block;
252         struct acpi_gpe_register_info *gpe_register_info;
253         u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
254         u8 enabled_status_byte;
255         u32 status_reg;
256         u32 enable_reg;
257         acpi_cpu_flags flags;
258         u32 i;
259         u32 j;
260
261         ACPI_FUNCTION_NAME(ev_gpe_detect);
262
263         /* Check for the case where there are no GPEs */
264
265         if (!gpe_xrupt_list) {
266                 return (int_status);
267         }
268
269         /*
270          * We need to obtain the GPE lock for both the data structs and registers
271          * Note: Not necessary to obtain the hardware lock, since the GPE
272          * registers are owned by the gpe_lock.
273          */
274         flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
275
276         /* Examine all GPE blocks attached to this interrupt level */
277
278         gpe_block = gpe_xrupt_list->gpe_block_list_head;
279         while (gpe_block) {
280                 /*
281                  * Read all of the 8-bit GPE status and enable registers in this GPE
282                  * block, saving all of them. Find all currently active GP events.
283                  */
284                 for (i = 0; i < gpe_block->register_count; i++) {
285
286                         /* Get the next status/enable pair */
287
288                         gpe_register_info = &gpe_block->register_info[i];
289
290                         /* Read the Status Register */
291
292                         status =
293                             acpi_hw_read(&status_reg,
294                                          &gpe_register_info->status_address);
295                         if (ACPI_FAILURE(status)) {
296                                 goto unlock_and_exit;
297                         }
298
299                         /* Read the Enable Register */
300
301                         status =
302                             acpi_hw_read(&enable_reg,
303                                          &gpe_register_info->enable_address);
304                         if (ACPI_FAILURE(status)) {
305                                 goto unlock_and_exit;
306                         }
307
308                         ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
309                                           "Read GPE Register at GPE%X: Status=%02X, Enable=%02X\n",
310                                           gpe_register_info->base_gpe_number,
311                                           status_reg, enable_reg));
312
313                         /* Check if there is anything active at all in this register */
314
315                         enabled_status_byte = (u8) (status_reg & enable_reg);
316                         if (!enabled_status_byte) {
317
318                                 /* No active GPEs in this register, move on */
319
320                                 continue;
321                         }
322
323                         /* Now look at the individual GPEs in this byte register */
324
325                         for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
326
327                                 /* Examine one GPE bit */
328
329                                 if (enabled_status_byte & (1 << j)) {
330                                         /*
331                                          * Found an active GPE. Dispatch the event to a handler
332                                          * or method.
333                                          */
334                                         int_status |=
335                                             acpi_ev_gpe_dispatch(&gpe_block->
336                                                 event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
337                                 }
338                         }
339                 }
340
341                 gpe_block = gpe_block->next;
342         }
343
344       unlock_and_exit:
345
346         acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
347         return (int_status);
348 }
349
350 /*******************************************************************************
351  *
352  * FUNCTION:    acpi_ev_asynch_execute_gpe_method
353  *
354  * PARAMETERS:  Context (gpe_event_info) - Info for this GPE
355  *
356  * RETURN:      None
357  *
358  * DESCRIPTION: Perform the actual execution of a GPE control method. This
359  *              function is called from an invocation of acpi_os_execute and
360  *              therefore does NOT execute at interrupt level - so that
361  *              the control method itself is not executed in the context of
362  *              an interrupt handler.
363  *
364  ******************************************************************************/
365 static void acpi_ev_asynch_enable_gpe(void *context);
366
367 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
368 {
369         struct acpi_gpe_event_info *gpe_event_info = (void *)context;
370         acpi_status status;
371         struct acpi_gpe_event_info local_gpe_event_info;
372         struct acpi_evaluate_info *info;
373
374         ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
375
376         status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
377         if (ACPI_FAILURE(status)) {
378                 return_VOID;
379         }
380
381         /* Must revalidate the gpe_number/gpe_block */
382
383         if (!acpi_ev_valid_gpe_event(gpe_event_info)) {
384                 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
385                 return_VOID;
386         }
387
388         /*
389          * Take a snapshot of the GPE info for this level - we copy the info to
390          * prevent a race condition with remove_handler/remove_block.
391          */
392         ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info,
393                     sizeof(struct acpi_gpe_event_info));
394
395         status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
396         if (ACPI_FAILURE(status)) {
397                 return_VOID;
398         }
399
400         /*
401          * Must check for control method type dispatch one more time to avoid a
402          * race with ev_gpe_install_handler
403          */
404         if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) ==
405             ACPI_GPE_DISPATCH_METHOD) {
406
407                 /* Allocate the evaluation information block */
408
409                 info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
410                 if (!info) {
411                         status = AE_NO_MEMORY;
412                 } else {
413                         /*
414                          * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx
415                          * control method that corresponds to this GPE
416                          */
417                         info->prefix_node =
418                             local_gpe_event_info.dispatch.method_node;
419                         info->flags = ACPI_IGNORE_RETURN_VALUE;
420
421                         status = acpi_ns_evaluate(info);
422                         ACPI_FREE(info);
423                 }
424
425                 if (ACPI_FAILURE(status)) {
426                         ACPI_EXCEPTION((AE_INFO, status,
427                                         "while evaluating GPE method [%4.4s]",
428                                         acpi_ut_get_node_name
429                                         (local_gpe_event_info.dispatch.
430                                          method_node)));
431                 }
432         }
433         /* Defer enabling of GPE until all notify handlers are done */
434         acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe,
435                                 gpe_event_info);
436         return_VOID;
437 }
438
439 static void acpi_ev_asynch_enable_gpe(void *context)
440 {
441         struct acpi_gpe_event_info *gpe_event_info = context;
442         acpi_status status;
443         if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
444             ACPI_GPE_LEVEL_TRIGGERED) {
445                 /*
446                  * GPE is level-triggered, we clear the GPE status bit after handling
447                  * the event.
448                  */
449                 status = acpi_hw_clear_gpe(gpe_event_info);
450                 if (ACPI_FAILURE(status)) {
451                         return_VOID;
452                 }
453         }
454
455         /*
456          * Enable this GPE, conditionally. This means that the GPE will only be
457          * physically enabled if the enable_for_run bit is set in the event_info
458          */
459         (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_COND_ENABLE);
460
461         return_VOID;
462 }
463
464 /*******************************************************************************
465  *
466  * FUNCTION:    acpi_ev_gpe_dispatch
467  *
468  * PARAMETERS:  gpe_event_info  - Info for this GPE
469  *              gpe_number      - Number relative to the parent GPE block
470  *
471  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
472  *
473  * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC)
474  *              or method (e.g. _Lxx/_Exx) handler.
475  *
476  *              This function executes at interrupt level.
477  *
478  ******************************************************************************/
479
480 u32
481 acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
482 {
483         acpi_status status;
484
485         ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
486
487         acpi_os_gpe_count(gpe_number);
488
489         /*
490          * If edge-triggered, clear the GPE status bit now. Note that
491          * level-triggered events are cleared after the GPE is serviced.
492          */
493         if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
494             ACPI_GPE_EDGE_TRIGGERED) {
495                 status = acpi_hw_clear_gpe(gpe_event_info);
496                 if (ACPI_FAILURE(status)) {
497                         ACPI_EXCEPTION((AE_INFO, status,
498                                         "Unable to clear GPE[0x%2X]",
499                                         gpe_number));
500                         return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
501                 }
502         }
503
504         /*
505          * Dispatch the GPE to either an installed handler, or the control method
506          * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke
507          * it and do not attempt to run the method. If there is neither a handler
508          * nor a method, we disable this GPE to prevent further such pointless
509          * events from firing.
510          */
511         switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
512         case ACPI_GPE_DISPATCH_HANDLER:
513
514                 /*
515                  * Invoke the installed handler (at interrupt level)
516                  * Ignore return status for now.
517                  * TBD: leave GPE disabled on error?
518                  */
519                 (void)gpe_event_info->dispatch.handler->address(gpe_event_info->
520                                                                 dispatch.
521                                                                 handler->
522                                                                 context);
523
524                 /* It is now safe to clear level-triggered events. */
525
526                 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
527                     ACPI_GPE_LEVEL_TRIGGERED) {
528                         status = acpi_hw_clear_gpe(gpe_event_info);
529                         if (ACPI_FAILURE(status)) {
530                                 ACPI_EXCEPTION((AE_INFO, status,
531                                         "Unable to clear GPE[0x%2X]",
532                                                 gpe_number));
533                                 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
534                         }
535                 }
536                 break;
537
538         case ACPI_GPE_DISPATCH_METHOD:
539
540                 /*
541                  * Disable the GPE, so it doesn't keep firing before the method has a
542                  * chance to run (it runs asynchronously with interrupts enabled).
543                  */
544                 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
545                 if (ACPI_FAILURE(status)) {
546                         ACPI_EXCEPTION((AE_INFO, status,
547                                         "Unable to disable GPE[0x%2X]",
548                                         gpe_number));
549                         return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
550                 }
551
552                 /*
553                  * Execute the method associated with the GPE
554                  * NOTE: Level-triggered GPEs are cleared after the method completes.
555                  */
556                 status = acpi_os_execute(OSL_GPE_HANDLER,
557                                          acpi_ev_asynch_execute_gpe_method,
558                                          gpe_event_info);
559                 if (ACPI_FAILURE(status)) {
560                         ACPI_EXCEPTION((AE_INFO, status,
561                                         "Unable to queue handler for GPE[0x%2X] - event disabled",
562                                         gpe_number));
563                 }
564                 break;
565
566         default:
567
568                 /*
569                  * No handler or method to run!
570                  * 03/2010: This case should no longer be possible. We will not allow
571                  * a GPE to be enabled if it has no handler or method.
572                  */
573                 ACPI_ERROR((AE_INFO,
574                             "No handler or method for GPE[0x%2X], disabling event",
575                             gpe_number));
576
577                 /*
578                  * Disable the GPE. The GPE will remain disabled a handler
579                  * is installed or ACPICA is restarted.
580                  */
581                 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
582                 if (ACPI_FAILURE(status)) {
583                         ACPI_EXCEPTION((AE_INFO, status,
584                                         "Unable to disable GPE[0x%2X]",
585                                         gpe_number));
586                         return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
587                 }
588                 break;
589         }
590
591         return_UINT32(ACPI_INTERRUPT_HANDLED);
592 }