]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/acpi/acpica/evgpeutil.c
Merge branch 'master' of git://1984.lsi.us.es/nf
[karo-tx-linux.git] / drivers / acpi / acpica / evgpeutil.c
1 /******************************************************************************
2  *
3  * Module Name: evgpeutil - GPE utilities
4  *
5  *****************************************************************************/
6
7 /*
8  * Copyright (C) 2000 - 2013, Intel Corp.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions, and the following disclaimer,
16  *    without modification.
17  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18  *    substantially similar to the "NO WARRANTY" disclaimer below
19  *    ("Disclaimer") and any redistribution must be conditioned upon
20  *    including a substantially similar Disclaimer requirement for further
21  *    binary redistribution.
22  * 3. Neither the names of the above-listed copyright holders nor the names
23  *    of any contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * Alternatively, this software may be distributed under the terms of the
27  * GNU General Public License ("GPL") version 2 as published by the Free
28  * Software Foundation.
29  *
30  * NO WARRANTY
31  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41  * POSSIBILITY OF SUCH DAMAGES.
42  */
43
44 #include <acpi/acpi.h>
45 #include "accommon.h"
46 #include "acevents.h"
47
48 #define _COMPONENT          ACPI_EVENTS
49 ACPI_MODULE_NAME("evgpeutil")
50
51 #if (!ACPI_REDUCED_HARDWARE)    /* Entire module */
52 /*******************************************************************************
53  *
54  * FUNCTION:    acpi_ev_walk_gpe_list
55  *
56  * PARAMETERS:  gpe_walk_callback   - Routine called for each GPE block
57  *              context             - Value passed to callback
58  *
59  * RETURN:      Status
60  *
61  * DESCRIPTION: Walk the GPE lists.
62  *
63  ******************************************************************************/
64 acpi_status
65 acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
66 {
67         struct acpi_gpe_block_info *gpe_block;
68         struct acpi_gpe_xrupt_info *gpe_xrupt_info;
69         acpi_status status = AE_OK;
70         acpi_cpu_flags flags;
71
72         ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
73
74         flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
75
76         /* Walk the interrupt level descriptor list */
77
78         gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
79         while (gpe_xrupt_info) {
80
81                 /* Walk all Gpe Blocks attached to this interrupt level */
82
83                 gpe_block = gpe_xrupt_info->gpe_block_list_head;
84                 while (gpe_block) {
85
86                         /* One callback per GPE block */
87
88                         status =
89                             gpe_walk_callback(gpe_xrupt_info, gpe_block,
90                                               context);
91                         if (ACPI_FAILURE(status)) {
92                                 if (status == AE_CTRL_END) {    /* Callback abort */
93                                         status = AE_OK;
94                                 }
95                                 goto unlock_and_exit;
96                         }
97
98                         gpe_block = gpe_block->next;
99                 }
100
101                 gpe_xrupt_info = gpe_xrupt_info->next;
102         }
103
104       unlock_and_exit:
105         acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
106         return_ACPI_STATUS(status);
107 }
108
109 /*******************************************************************************
110  *
111  * FUNCTION:    acpi_ev_valid_gpe_event
112  *
113  * PARAMETERS:  gpe_event_info              - Info for this GPE
114  *
115  * RETURN:      TRUE if the gpe_event is valid
116  *
117  * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
118  *              Should be called only when the GPE lists are semaphore locked
119  *              and not subject to change.
120  *
121  ******************************************************************************/
122
123 u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
124 {
125         struct acpi_gpe_xrupt_info *gpe_xrupt_block;
126         struct acpi_gpe_block_info *gpe_block;
127
128         ACPI_FUNCTION_ENTRY();
129
130         /* No need for spin lock since we are not changing any list elements */
131
132         /* Walk the GPE interrupt levels */
133
134         gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
135         while (gpe_xrupt_block) {
136                 gpe_block = gpe_xrupt_block->gpe_block_list_head;
137
138                 /* Walk the GPE blocks on this interrupt level */
139
140                 while (gpe_block) {
141                         if ((&gpe_block->event_info[0] <= gpe_event_info) &&
142                             (&gpe_block->event_info[gpe_block->gpe_count] >
143                              gpe_event_info)) {
144                                 return (TRUE);
145                         }
146
147                         gpe_block = gpe_block->next;
148                 }
149
150                 gpe_xrupt_block = gpe_xrupt_block->next;
151         }
152
153         return (FALSE);
154 }
155
156 /*******************************************************************************
157  *
158  * FUNCTION:    acpi_ev_get_gpe_device
159  *
160  * PARAMETERS:  GPE_WALK_CALLBACK
161  *
162  * RETURN:      Status
163  *
164  * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
165  *              block device. NULL if the GPE is one of the FADT-defined GPEs.
166  *
167  ******************************************************************************/
168
169 acpi_status
170 acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
171                        struct acpi_gpe_block_info *gpe_block, void *context)
172 {
173         struct acpi_gpe_device_info *info = context;
174
175         /* Increment Index by the number of GPEs in this block */
176
177         info->next_block_base_index += gpe_block->gpe_count;
178
179         if (info->index < info->next_block_base_index) {
180                 /*
181                  * The GPE index is within this block, get the node. Leave the node
182                  * NULL for the FADT-defined GPEs
183                  */
184                 if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
185                         info->gpe_device = gpe_block->node;
186                 }
187
188                 info->status = AE_OK;
189                 return (AE_CTRL_END);
190         }
191
192         return (AE_OK);
193 }
194
195 /*******************************************************************************
196  *
197  * FUNCTION:    acpi_ev_get_gpe_xrupt_block
198  *
199  * PARAMETERS:  interrupt_number     - Interrupt for a GPE block
200  *
201  * RETURN:      A GPE interrupt block
202  *
203  * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
204  *              block per unique interrupt level used for GPEs. Should be
205  *              called only when the GPE lists are semaphore locked and not
206  *              subject to change.
207  *
208  ******************************************************************************/
209
210 struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)
211 {
212         struct acpi_gpe_xrupt_info *next_gpe_xrupt;
213         struct acpi_gpe_xrupt_info *gpe_xrupt;
214         acpi_status status;
215         acpi_cpu_flags flags;
216
217         ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
218
219         /* No need for lock since we are not changing any list elements here */
220
221         next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
222         while (next_gpe_xrupt) {
223                 if (next_gpe_xrupt->interrupt_number == interrupt_number) {
224                         return_PTR(next_gpe_xrupt);
225                 }
226
227                 next_gpe_xrupt = next_gpe_xrupt->next;
228         }
229
230         /* Not found, must allocate a new xrupt descriptor */
231
232         gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
233         if (!gpe_xrupt) {
234                 return_PTR(NULL);
235         }
236
237         gpe_xrupt->interrupt_number = interrupt_number;
238
239         /* Install new interrupt descriptor with spin lock */
240
241         flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
242         if (acpi_gbl_gpe_xrupt_list_head) {
243                 next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
244                 while (next_gpe_xrupt->next) {
245                         next_gpe_xrupt = next_gpe_xrupt->next;
246                 }
247
248                 next_gpe_xrupt->next = gpe_xrupt;
249                 gpe_xrupt->previous = next_gpe_xrupt;
250         } else {
251                 acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
252         }
253         acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
254
255         /* Install new interrupt handler if not SCI_INT */
256
257         if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
258                 status = acpi_os_install_interrupt_handler(interrupt_number,
259                                                            acpi_ev_gpe_xrupt_handler,
260                                                            gpe_xrupt);
261                 if (ACPI_FAILURE(status)) {
262                         ACPI_ERROR((AE_INFO,
263                                     "Could not install GPE interrupt handler at level 0x%X",
264                                     interrupt_number));
265                         return_PTR(NULL);
266                 }
267         }
268
269         return_PTR(gpe_xrupt);
270 }
271
272 /*******************************************************************************
273  *
274  * FUNCTION:    acpi_ev_delete_gpe_xrupt
275  *
276  * PARAMETERS:  gpe_xrupt       - A GPE interrupt info block
277  *
278  * RETURN:      Status
279  *
280  * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
281  *              interrupt handler if not the SCI interrupt.
282  *
283  ******************************************************************************/
284
285 acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
286 {
287         acpi_status status;
288         acpi_cpu_flags flags;
289
290         ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
291
292         /* We never want to remove the SCI interrupt handler */
293
294         if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
295                 gpe_xrupt->gpe_block_list_head = NULL;
296                 return_ACPI_STATUS(AE_OK);
297         }
298
299         /* Disable this interrupt */
300
301         status =
302             acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
303                                              acpi_ev_gpe_xrupt_handler);
304         if (ACPI_FAILURE(status)) {
305                 return_ACPI_STATUS(status);
306         }
307
308         /* Unlink the interrupt block with lock */
309
310         flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
311         if (gpe_xrupt->previous) {
312                 gpe_xrupt->previous->next = gpe_xrupt->next;
313         } else {
314                 /* No previous, update list head */
315
316                 acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
317         }
318
319         if (gpe_xrupt->next) {
320                 gpe_xrupt->next->previous = gpe_xrupt->previous;
321         }
322         acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
323
324         /* Free the block */
325
326         ACPI_FREE(gpe_xrupt);
327         return_ACPI_STATUS(AE_OK);
328 }
329
330 /*******************************************************************************
331  *
332  * FUNCTION:    acpi_ev_delete_gpe_handlers
333  *
334  * PARAMETERS:  gpe_xrupt_info      - GPE Interrupt info
335  *              gpe_block           - Gpe Block info
336  *
337  * RETURN:      Status
338  *
339  * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
340  *              Used only prior to termination.
341  *
342  ******************************************************************************/
343
344 acpi_status
345 acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
346                             struct acpi_gpe_block_info *gpe_block,
347                             void *context)
348 {
349         struct acpi_gpe_event_info *gpe_event_info;
350         struct acpi_gpe_notify_info *notify;
351         struct acpi_gpe_notify_info *next;
352         u32 i;
353         u32 j;
354
355         ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
356
357         /* Examine each GPE Register within the block */
358
359         for (i = 0; i < gpe_block->register_count; i++) {
360
361                 /* Now look at the individual GPEs in this byte register */
362
363                 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
364                         gpe_event_info = &gpe_block->event_info[((acpi_size) i *
365                                                                  ACPI_GPE_REGISTER_WIDTH)
366                                                                 + j];
367
368                         if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
369                             ACPI_GPE_DISPATCH_HANDLER) {
370
371                                 /* Delete an installed handler block */
372
373                                 ACPI_FREE(gpe_event_info->dispatch.handler);
374                                 gpe_event_info->dispatch.handler = NULL;
375                                 gpe_event_info->flags &=
376                                     ~ACPI_GPE_DISPATCH_MASK;
377                         } else
378                             if ((gpe_event_info->
379                                  flags & ACPI_GPE_DISPATCH_MASK) ==
380                                 ACPI_GPE_DISPATCH_NOTIFY) {
381
382                                 /* Delete the implicit notification device list */
383
384                                 notify = gpe_event_info->dispatch.notify_list;
385                                 while (notify) {
386                                         next = notify->next;
387                                         ACPI_FREE(notify);
388                                         notify = next;
389                                 }
390                                 gpe_event_info->dispatch.notify_list = NULL;
391                                 gpe_event_info->flags &=
392                                     ~ACPI_GPE_DISPATCH_MASK;
393                         }
394                 }
395         }
396
397         return_ACPI_STATUS(AE_OK);
398 }
399
400 #endif                          /* !ACPI_REDUCED_HARDWARE */