]> git.karo-electronics.de Git - karo-tx-linux.git/blob - kernel/irq/manage.c
Merge tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso...
[karo-tx-linux.git] / kernel / irq / manage.c
1 /*
2  * linux/kernel/irq/manage.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006 Thomas Gleixner
6  *
7  * This file contains driver APIs to the irq subsystem.
8  */
9
10 #define pr_fmt(fmt) "genirq: " fmt
11
12 #include <linux/irq.h>
13 #include <linux/kthread.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/task_work.h>
20
21 #include "internals.h"
22
23 #ifdef CONFIG_IRQ_FORCED_THREADING
24 __read_mostly bool force_irqthreads;
25
26 static int __init setup_forced_irqthreads(char *arg)
27 {
28         force_irqthreads = true;
29         return 0;
30 }
31 early_param("threadirqs", setup_forced_irqthreads);
32 #endif
33
34 /**
35  *      synchronize_irq - wait for pending IRQ handlers (on other CPUs)
36  *      @irq: interrupt number to wait for
37  *
38  *      This function waits for any pending IRQ handlers for this interrupt
39  *      to complete before returning. If you use this function while
40  *      holding a resource the IRQ handler may need you will deadlock.
41  *
42  *      This function may be called - with care - from IRQ context.
43  */
44 void synchronize_irq(unsigned int irq)
45 {
46         struct irq_desc *desc = irq_to_desc(irq);
47         bool inprogress;
48
49         if (!desc)
50                 return;
51
52         do {
53                 unsigned long flags;
54
55                 /*
56                  * Wait until we're out of the critical section.  This might
57                  * give the wrong answer due to the lack of memory barriers.
58                  */
59                 while (irqd_irq_inprogress(&desc->irq_data))
60                         cpu_relax();
61
62                 /* Ok, that indicated we're done: double-check carefully. */
63                 raw_spin_lock_irqsave(&desc->lock, flags);
64                 inprogress = irqd_irq_inprogress(&desc->irq_data);
65                 raw_spin_unlock_irqrestore(&desc->lock, flags);
66
67                 /* Oops, that failed? */
68         } while (inprogress);
69
70         /*
71          * We made sure that no hardirq handler is running. Now verify
72          * that no threaded handlers are active.
73          */
74         wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
75 }
76 EXPORT_SYMBOL(synchronize_irq);
77
78 #ifdef CONFIG_SMP
79 cpumask_var_t irq_default_affinity;
80
81 /**
82  *      irq_can_set_affinity - Check if the affinity of a given irq can be set
83  *      @irq:           Interrupt to check
84  *
85  */
86 int irq_can_set_affinity(unsigned int irq)
87 {
88         struct irq_desc *desc = irq_to_desc(irq);
89
90         if (!desc || !irqd_can_balance(&desc->irq_data) ||
91             !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
92                 return 0;
93
94         return 1;
95 }
96
97 /**
98  *      irq_set_thread_affinity - Notify irq threads to adjust affinity
99  *      @desc:          irq descriptor which has affitnity changed
100  *
101  *      We just set IRQTF_AFFINITY and delegate the affinity setting
102  *      to the interrupt thread itself. We can not call
103  *      set_cpus_allowed_ptr() here as we hold desc->lock and this
104  *      code can be called from hard interrupt context.
105  */
106 void irq_set_thread_affinity(struct irq_desc *desc)
107 {
108         struct irqaction *action = desc->action;
109
110         while (action) {
111                 if (action->thread)
112                         set_bit(IRQTF_AFFINITY, &action->thread_flags);
113                 action = action->next;
114         }
115 }
116
117 #ifdef CONFIG_GENERIC_PENDING_IRQ
118 static inline bool irq_can_move_pcntxt(struct irq_data *data)
119 {
120         return irqd_can_move_in_process_context(data);
121 }
122 static inline bool irq_move_pending(struct irq_data *data)
123 {
124         return irqd_is_setaffinity_pending(data);
125 }
126 static inline void
127 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
128 {
129         cpumask_copy(desc->pending_mask, mask);
130 }
131 static inline void
132 irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
133 {
134         cpumask_copy(mask, desc->pending_mask);
135 }
136 #else
137 static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
138 static inline bool irq_move_pending(struct irq_data *data) { return false; }
139 static inline void
140 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
141 static inline void
142 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
143 #endif
144
145 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
146                         bool force)
147 {
148         struct irq_desc *desc = irq_data_to_desc(data);
149         struct irq_chip *chip = irq_data_get_irq_chip(data);
150         int ret;
151
152         ret = chip->irq_set_affinity(data, mask, false);
153         switch (ret) {
154         case IRQ_SET_MASK_OK:
155                 cpumask_copy(data->affinity, mask);
156         case IRQ_SET_MASK_OK_NOCOPY:
157                 irq_set_thread_affinity(desc);
158                 ret = 0;
159         }
160
161         return ret;
162 }
163
164 int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
165 {
166         struct irq_chip *chip = irq_data_get_irq_chip(data);
167         struct irq_desc *desc = irq_data_to_desc(data);
168         int ret = 0;
169
170         if (!chip || !chip->irq_set_affinity)
171                 return -EINVAL;
172
173         if (irq_can_move_pcntxt(data)) {
174                 ret = irq_do_set_affinity(data, mask, false);
175         } else {
176                 irqd_set_move_pending(data);
177                 irq_copy_pending(desc, mask);
178         }
179
180         if (desc->affinity_notify) {
181                 kref_get(&desc->affinity_notify->kref);
182                 schedule_work(&desc->affinity_notify->work);
183         }
184         irqd_set(data, IRQD_AFFINITY_SET);
185
186         return ret;
187 }
188
189 /**
190  *      irq_set_affinity - Set the irq affinity of a given irq
191  *      @irq:           Interrupt to set affinity
192  *      @mask:          cpumask
193  *
194  */
195 int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
196 {
197         struct irq_desc *desc = irq_to_desc(irq);
198         unsigned long flags;
199         int ret;
200
201         if (!desc)
202                 return -EINVAL;
203
204         raw_spin_lock_irqsave(&desc->lock, flags);
205         ret =  __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
206         raw_spin_unlock_irqrestore(&desc->lock, flags);
207         return ret;
208 }
209
210 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
211 {
212         unsigned long flags;
213         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
214
215         if (!desc)
216                 return -EINVAL;
217         desc->affinity_hint = m;
218         irq_put_desc_unlock(desc, flags);
219         return 0;
220 }
221 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
222
223 static void irq_affinity_notify(struct work_struct *work)
224 {
225         struct irq_affinity_notify *notify =
226                 container_of(work, struct irq_affinity_notify, work);
227         struct irq_desc *desc = irq_to_desc(notify->irq);
228         cpumask_var_t cpumask;
229         unsigned long flags;
230
231         if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
232                 goto out;
233
234         raw_spin_lock_irqsave(&desc->lock, flags);
235         if (irq_move_pending(&desc->irq_data))
236                 irq_get_pending(cpumask, desc);
237         else
238                 cpumask_copy(cpumask, desc->irq_data.affinity);
239         raw_spin_unlock_irqrestore(&desc->lock, flags);
240
241         notify->notify(notify, cpumask);
242
243         free_cpumask_var(cpumask);
244 out:
245         kref_put(&notify->kref, notify->release);
246 }
247
248 /**
249  *      irq_set_affinity_notifier - control notification of IRQ affinity changes
250  *      @irq:           Interrupt for which to enable/disable notification
251  *      @notify:        Context for notification, or %NULL to disable
252  *                      notification.  Function pointers must be initialised;
253  *                      the other fields will be initialised by this function.
254  *
255  *      Must be called in process context.  Notification may only be enabled
256  *      after the IRQ is allocated and must be disabled before the IRQ is
257  *      freed using free_irq().
258  */
259 int
260 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
261 {
262         struct irq_desc *desc = irq_to_desc(irq);
263         struct irq_affinity_notify *old_notify;
264         unsigned long flags;
265
266         /* The release function is promised process context */
267         might_sleep();
268
269         if (!desc)
270                 return -EINVAL;
271
272         /* Complete initialisation of *notify */
273         if (notify) {
274                 notify->irq = irq;
275                 kref_init(&notify->kref);
276                 INIT_WORK(&notify->work, irq_affinity_notify);
277         }
278
279         raw_spin_lock_irqsave(&desc->lock, flags);
280         old_notify = desc->affinity_notify;
281         desc->affinity_notify = notify;
282         raw_spin_unlock_irqrestore(&desc->lock, flags);
283
284         if (old_notify)
285                 kref_put(&old_notify->kref, old_notify->release);
286
287         return 0;
288 }
289 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
290
291 #ifndef CONFIG_AUTO_IRQ_AFFINITY
292 /*
293  * Generic version of the affinity autoselector.
294  */
295 static int
296 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
297 {
298         struct cpumask *set = irq_default_affinity;
299         int node = desc->irq_data.node;
300
301         /* Excludes PER_CPU and NO_BALANCE interrupts */
302         if (!irq_can_set_affinity(irq))
303                 return 0;
304
305         /*
306          * Preserve an userspace affinity setup, but make sure that
307          * one of the targets is online.
308          */
309         if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
310                 if (cpumask_intersects(desc->irq_data.affinity,
311                                        cpu_online_mask))
312                         set = desc->irq_data.affinity;
313                 else
314                         irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
315         }
316
317         cpumask_and(mask, cpu_online_mask, set);
318         if (node != NUMA_NO_NODE) {
319                 const struct cpumask *nodemask = cpumask_of_node(node);
320
321                 /* make sure at least one of the cpus in nodemask is online */
322                 if (cpumask_intersects(mask, nodemask))
323                         cpumask_and(mask, mask, nodemask);
324         }
325         irq_do_set_affinity(&desc->irq_data, mask, false);
326         return 0;
327 }
328 #else
329 static inline int
330 setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
331 {
332         return irq_select_affinity(irq);
333 }
334 #endif
335
336 /*
337  * Called when affinity is set via /proc/irq
338  */
339 int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
340 {
341         struct irq_desc *desc = irq_to_desc(irq);
342         unsigned long flags;
343         int ret;
344
345         raw_spin_lock_irqsave(&desc->lock, flags);
346         ret = setup_affinity(irq, desc, mask);
347         raw_spin_unlock_irqrestore(&desc->lock, flags);
348         return ret;
349 }
350
351 #else
352 static inline int
353 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
354 {
355         return 0;
356 }
357 #endif
358
359 void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
360 {
361         if (suspend) {
362                 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
363                         return;
364                 desc->istate |= IRQS_SUSPENDED;
365         }
366
367         if (!desc->depth++)
368                 irq_disable(desc);
369 }
370
371 static int __disable_irq_nosync(unsigned int irq)
372 {
373         unsigned long flags;
374         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
375
376         if (!desc)
377                 return -EINVAL;
378         __disable_irq(desc, irq, false);
379         irq_put_desc_busunlock(desc, flags);
380         return 0;
381 }
382
383 /**
384  *      disable_irq_nosync - disable an irq without waiting
385  *      @irq: Interrupt to disable
386  *
387  *      Disable the selected interrupt line.  Disables and Enables are
388  *      nested.
389  *      Unlike disable_irq(), this function does not ensure existing
390  *      instances of the IRQ handler have completed before returning.
391  *
392  *      This function may be called from IRQ context.
393  */
394 void disable_irq_nosync(unsigned int irq)
395 {
396         __disable_irq_nosync(irq);
397 }
398 EXPORT_SYMBOL(disable_irq_nosync);
399
400 /**
401  *      disable_irq - disable an irq and wait for completion
402  *      @irq: Interrupt to disable
403  *
404  *      Disable the selected interrupt line.  Enables and Disables are
405  *      nested.
406  *      This function waits for any pending IRQ handlers for this interrupt
407  *      to complete before returning. If you use this function while
408  *      holding a resource the IRQ handler may need you will deadlock.
409  *
410  *      This function may be called - with care - from IRQ context.
411  */
412 void disable_irq(unsigned int irq)
413 {
414         if (!__disable_irq_nosync(irq))
415                 synchronize_irq(irq);
416 }
417 EXPORT_SYMBOL(disable_irq);
418
419 void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
420 {
421         if (resume) {
422                 if (!(desc->istate & IRQS_SUSPENDED)) {
423                         if (!desc->action)
424                                 return;
425                         if (!(desc->action->flags & IRQF_FORCE_RESUME))
426                                 return;
427                         /* Pretend that it got disabled ! */
428                         desc->depth++;
429                 }
430                 desc->istate &= ~IRQS_SUSPENDED;
431         }
432
433         switch (desc->depth) {
434         case 0:
435  err_out:
436                 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
437                 break;
438         case 1: {
439                 if (desc->istate & IRQS_SUSPENDED)
440                         goto err_out;
441                 /* Prevent probing on this irq: */
442                 irq_settings_set_noprobe(desc);
443                 irq_enable(desc);
444                 check_irq_resend(desc, irq);
445                 /* fall-through */
446         }
447         default:
448                 desc->depth--;
449         }
450 }
451
452 /**
453  *      enable_irq - enable handling of an irq
454  *      @irq: Interrupt to enable
455  *
456  *      Undoes the effect of one call to disable_irq().  If this
457  *      matches the last disable, processing of interrupts on this
458  *      IRQ line is re-enabled.
459  *
460  *      This function may be called from IRQ context only when
461  *      desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
462  */
463 void enable_irq(unsigned int irq)
464 {
465         unsigned long flags;
466         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
467
468         if (!desc)
469                 return;
470         if (WARN(!desc->irq_data.chip,
471                  KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
472                 goto out;
473
474         __enable_irq(desc, irq, false);
475 out:
476         irq_put_desc_busunlock(desc, flags);
477 }
478 EXPORT_SYMBOL(enable_irq);
479
480 static int set_irq_wake_real(unsigned int irq, unsigned int on)
481 {
482         struct irq_desc *desc = irq_to_desc(irq);
483         int ret = -ENXIO;
484
485         if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
486                 return 0;
487
488         if (desc->irq_data.chip->irq_set_wake)
489                 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
490
491         return ret;
492 }
493
494 /**
495  *      irq_set_irq_wake - control irq power management wakeup
496  *      @irq:   interrupt to control
497  *      @on:    enable/disable power management wakeup
498  *
499  *      Enable/disable power management wakeup mode, which is
500  *      disabled by default.  Enables and disables must match,
501  *      just as they match for non-wakeup mode support.
502  *
503  *      Wakeup mode lets this IRQ wake the system from sleep
504  *      states like "suspend to RAM".
505  */
506 int irq_set_irq_wake(unsigned int irq, unsigned int on)
507 {
508         unsigned long flags;
509         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
510         int ret = 0;
511
512         if (!desc)
513                 return -EINVAL;
514
515         /* wakeup-capable irqs can be shared between drivers that
516          * don't need to have the same sleep mode behaviors.
517          */
518         if (on) {
519                 if (desc->wake_depth++ == 0) {
520                         ret = set_irq_wake_real(irq, on);
521                         if (ret)
522                                 desc->wake_depth = 0;
523                         else
524                                 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
525                 }
526         } else {
527                 if (desc->wake_depth == 0) {
528                         WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
529                 } else if (--desc->wake_depth == 0) {
530                         ret = set_irq_wake_real(irq, on);
531                         if (ret)
532                                 desc->wake_depth = 1;
533                         else
534                                 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
535                 }
536         }
537         irq_put_desc_busunlock(desc, flags);
538         return ret;
539 }
540 EXPORT_SYMBOL(irq_set_irq_wake);
541
542 /*
543  * Internal function that tells the architecture code whether a
544  * particular irq has been exclusively allocated or is available
545  * for driver use.
546  */
547 int can_request_irq(unsigned int irq, unsigned long irqflags)
548 {
549         unsigned long flags;
550         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
551         int canrequest = 0;
552
553         if (!desc)
554                 return 0;
555
556         if (irq_settings_can_request(desc)) {
557                 if (desc->action)
558                         if (irqflags & desc->action->flags & IRQF_SHARED)
559                                 canrequest =1;
560         }
561         irq_put_desc_unlock(desc, flags);
562         return canrequest;
563 }
564
565 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
566                       unsigned long flags)
567 {
568         struct irq_chip *chip = desc->irq_data.chip;
569         int ret, unmask = 0;
570
571         if (!chip || !chip->irq_set_type) {
572                 /*
573                  * IRQF_TRIGGER_* but the PIC does not support multiple
574                  * flow-types?
575                  */
576                 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
577                          chip ? (chip->name ? : "unknown") : "unknown");
578                 return 0;
579         }
580
581         flags &= IRQ_TYPE_SENSE_MASK;
582
583         if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
584                 if (!irqd_irq_masked(&desc->irq_data))
585                         mask_irq(desc);
586                 if (!irqd_irq_disabled(&desc->irq_data))
587                         unmask = 1;
588         }
589
590         /* caller masked out all except trigger mode flags */
591         ret = chip->irq_set_type(&desc->irq_data, flags);
592
593         switch (ret) {
594         case IRQ_SET_MASK_OK:
595                 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
596                 irqd_set(&desc->irq_data, flags);
597
598         case IRQ_SET_MASK_OK_NOCOPY:
599                 flags = irqd_get_trigger_type(&desc->irq_data);
600                 irq_settings_set_trigger_mask(desc, flags);
601                 irqd_clear(&desc->irq_data, IRQD_LEVEL);
602                 irq_settings_clr_level(desc);
603                 if (flags & IRQ_TYPE_LEVEL_MASK) {
604                         irq_settings_set_level(desc);
605                         irqd_set(&desc->irq_data, IRQD_LEVEL);
606                 }
607
608                 ret = 0;
609                 break;
610         default:
611                 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
612                        flags, irq, chip->irq_set_type);
613         }
614         if (unmask)
615                 unmask_irq(desc);
616         return ret;
617 }
618
619 /*
620  * Default primary interrupt handler for threaded interrupts. Is
621  * assigned as primary handler when request_threaded_irq is called
622  * with handler == NULL. Useful for oneshot interrupts.
623  */
624 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
625 {
626         return IRQ_WAKE_THREAD;
627 }
628
629 /*
630  * Primary handler for nested threaded interrupts. Should never be
631  * called.
632  */
633 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
634 {
635         WARN(1, "Primary handler called for nested irq %d\n", irq);
636         return IRQ_NONE;
637 }
638
639 static int irq_wait_for_interrupt(struct irqaction *action)
640 {
641         set_current_state(TASK_INTERRUPTIBLE);
642
643         while (!kthread_should_stop()) {
644
645                 if (test_and_clear_bit(IRQTF_RUNTHREAD,
646                                        &action->thread_flags)) {
647                         __set_current_state(TASK_RUNNING);
648                         return 0;
649                 }
650                 schedule();
651                 set_current_state(TASK_INTERRUPTIBLE);
652         }
653         __set_current_state(TASK_RUNNING);
654         return -1;
655 }
656
657 /*
658  * Oneshot interrupts keep the irq line masked until the threaded
659  * handler finished. unmask if the interrupt has not been disabled and
660  * is marked MASKED.
661  */
662 static void irq_finalize_oneshot(struct irq_desc *desc,
663                                  struct irqaction *action)
664 {
665         if (!(desc->istate & IRQS_ONESHOT))
666                 return;
667 again:
668         chip_bus_lock(desc);
669         raw_spin_lock_irq(&desc->lock);
670
671         /*
672          * Implausible though it may be we need to protect us against
673          * the following scenario:
674          *
675          * The thread is faster done than the hard interrupt handler
676          * on the other CPU. If we unmask the irq line then the
677          * interrupt can come in again and masks the line, leaves due
678          * to IRQS_INPROGRESS and the irq line is masked forever.
679          *
680          * This also serializes the state of shared oneshot handlers
681          * versus "desc->threads_onehsot |= action->thread_mask;" in
682          * irq_wake_thread(). See the comment there which explains the
683          * serialization.
684          */
685         if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
686                 raw_spin_unlock_irq(&desc->lock);
687                 chip_bus_sync_unlock(desc);
688                 cpu_relax();
689                 goto again;
690         }
691
692         /*
693          * Now check again, whether the thread should run. Otherwise
694          * we would clear the threads_oneshot bit of this thread which
695          * was just set.
696          */
697         if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
698                 goto out_unlock;
699
700         desc->threads_oneshot &= ~action->thread_mask;
701
702         if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
703             irqd_irq_masked(&desc->irq_data))
704                 unmask_irq(desc);
705
706 out_unlock:
707         raw_spin_unlock_irq(&desc->lock);
708         chip_bus_sync_unlock(desc);
709 }
710
711 #ifdef CONFIG_SMP
712 /*
713  * Check whether we need to chasnge the affinity of the interrupt thread.
714  */
715 static void
716 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
717 {
718         cpumask_var_t mask;
719
720         if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
721                 return;
722
723         /*
724          * In case we are out of memory we set IRQTF_AFFINITY again and
725          * try again next time
726          */
727         if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
728                 set_bit(IRQTF_AFFINITY, &action->thread_flags);
729                 return;
730         }
731
732         raw_spin_lock_irq(&desc->lock);
733         cpumask_copy(mask, desc->irq_data.affinity);
734         raw_spin_unlock_irq(&desc->lock);
735
736         set_cpus_allowed_ptr(current, mask);
737         free_cpumask_var(mask);
738 }
739 #else
740 static inline void
741 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
742 #endif
743
744 /*
745  * Interrupts which are not explicitely requested as threaded
746  * interrupts rely on the implicit bh/preempt disable of the hard irq
747  * context. So we need to disable bh here to avoid deadlocks and other
748  * side effects.
749  */
750 static irqreturn_t
751 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
752 {
753         irqreturn_t ret;
754
755         local_bh_disable();
756         ret = action->thread_fn(action->irq, action->dev_id);
757         irq_finalize_oneshot(desc, action);
758         local_bh_enable();
759         return ret;
760 }
761
762 /*
763  * Interrupts explicitely requested as threaded interupts want to be
764  * preemtible - many of them need to sleep and wait for slow busses to
765  * complete.
766  */
767 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
768                 struct irqaction *action)
769 {
770         irqreturn_t ret;
771
772         ret = action->thread_fn(action->irq, action->dev_id);
773         irq_finalize_oneshot(desc, action);
774         return ret;
775 }
776
777 static void wake_threads_waitq(struct irq_desc *desc)
778 {
779         if (atomic_dec_and_test(&desc->threads_active) &&
780             waitqueue_active(&desc->wait_for_threads))
781                 wake_up(&desc->wait_for_threads);
782 }
783
784 static void irq_thread_dtor(struct callback_head *unused)
785 {
786         struct task_struct *tsk = current;
787         struct irq_desc *desc;
788         struct irqaction *action;
789
790         if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
791                 return;
792
793         action = kthread_data(tsk);
794
795         pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
796                tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
797
798
799         desc = irq_to_desc(action->irq);
800         /*
801          * If IRQTF_RUNTHREAD is set, we need to decrement
802          * desc->threads_active and wake possible waiters.
803          */
804         if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
805                 wake_threads_waitq(desc);
806
807         /* Prevent a stale desc->threads_oneshot */
808         irq_finalize_oneshot(desc, action);
809 }
810
811 /*
812  * Interrupt handler thread
813  */
814 static int irq_thread(void *data)
815 {
816         struct callback_head on_exit_work;
817         static const struct sched_param param = {
818                 .sched_priority = MAX_USER_RT_PRIO/2,
819         };
820         struct irqaction *action = data;
821         struct irq_desc *desc = irq_to_desc(action->irq);
822         irqreturn_t (*handler_fn)(struct irq_desc *desc,
823                         struct irqaction *action);
824
825         if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
826                                         &action->thread_flags))
827                 handler_fn = irq_forced_thread_fn;
828         else
829                 handler_fn = irq_thread_fn;
830
831         sched_setscheduler(current, SCHED_FIFO, &param);
832
833         init_task_work(&on_exit_work, irq_thread_dtor);
834         task_work_add(current, &on_exit_work, false);
835
836         while (!irq_wait_for_interrupt(action)) {
837                 irqreturn_t action_ret;
838
839                 irq_thread_check_affinity(desc, action);
840
841                 action_ret = handler_fn(desc, action);
842                 if (!noirqdebug)
843                         note_interrupt(action->irq, desc, action_ret);
844
845                 wake_threads_waitq(desc);
846         }
847
848         /*
849          * This is the regular exit path. __free_irq() is stopping the
850          * thread via kthread_stop() after calling
851          * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
852          * oneshot mask bit can be set. We cannot verify that as we
853          * cannot touch the oneshot mask at this point anymore as
854          * __setup_irq() might have given out currents thread_mask
855          * again.
856          */
857         task_work_cancel(current, irq_thread_dtor);
858         return 0;
859 }
860
861 static void irq_setup_forced_threading(struct irqaction *new)
862 {
863         if (!force_irqthreads)
864                 return;
865         if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
866                 return;
867
868         new->flags |= IRQF_ONESHOT;
869
870         if (!new->thread_fn) {
871                 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
872                 new->thread_fn = new->handler;
873                 new->handler = irq_default_primary_handler;
874         }
875 }
876
877 /*
878  * Internal function to register an irqaction - typically used to
879  * allocate special interrupts that are part of the architecture.
880  */
881 static int
882 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
883 {
884         struct irqaction *old, **old_ptr;
885         unsigned long flags, thread_mask = 0;
886         int ret, nested, shared = 0;
887         cpumask_var_t mask;
888
889         if (!desc)
890                 return -EINVAL;
891
892         if (desc->irq_data.chip == &no_irq_chip)
893                 return -ENOSYS;
894         if (!try_module_get(desc->owner))
895                 return -ENODEV;
896
897         /*
898          * Check whether the interrupt nests into another interrupt
899          * thread.
900          */
901         nested = irq_settings_is_nested_thread(desc);
902         if (nested) {
903                 if (!new->thread_fn) {
904                         ret = -EINVAL;
905                         goto out_mput;
906                 }
907                 /*
908                  * Replace the primary handler which was provided from
909                  * the driver for non nested interrupt handling by the
910                  * dummy function which warns when called.
911                  */
912                 new->handler = irq_nested_primary_handler;
913         } else {
914                 if (irq_settings_can_thread(desc))
915                         irq_setup_forced_threading(new);
916         }
917
918         /*
919          * Create a handler thread when a thread function is supplied
920          * and the interrupt does not nest into another interrupt
921          * thread.
922          */
923         if (new->thread_fn && !nested) {
924                 struct task_struct *t;
925
926                 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
927                                    new->name);
928                 if (IS_ERR(t)) {
929                         ret = PTR_ERR(t);
930                         goto out_mput;
931                 }
932                 /*
933                  * We keep the reference to the task struct even if
934                  * the thread dies to avoid that the interrupt code
935                  * references an already freed task_struct.
936                  */
937                 get_task_struct(t);
938                 new->thread = t;
939         }
940
941         if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
942                 ret = -ENOMEM;
943                 goto out_thread;
944         }
945
946         /*
947          * The following block of code has to be executed atomically
948          */
949         raw_spin_lock_irqsave(&desc->lock, flags);
950         old_ptr = &desc->action;
951         old = *old_ptr;
952         if (old) {
953                 /*
954                  * Can't share interrupts unless both agree to and are
955                  * the same type (level, edge, polarity). So both flag
956                  * fields must have IRQF_SHARED set and the bits which
957                  * set the trigger type must match. Also all must
958                  * agree on ONESHOT.
959                  */
960                 if (!((old->flags & new->flags) & IRQF_SHARED) ||
961                     ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
962                     ((old->flags ^ new->flags) & IRQF_ONESHOT))
963                         goto mismatch;
964
965                 /* All handlers must agree on per-cpuness */
966                 if ((old->flags & IRQF_PERCPU) !=
967                     (new->flags & IRQF_PERCPU))
968                         goto mismatch;
969
970                 /* add new interrupt at end of irq queue */
971                 do {
972                         /*
973                          * Or all existing action->thread_mask bits,
974                          * so we can find the next zero bit for this
975                          * new action.
976                          */
977                         thread_mask |= old->thread_mask;
978                         old_ptr = &old->next;
979                         old = *old_ptr;
980                 } while (old);
981                 shared = 1;
982         }
983
984         /*
985          * Setup the thread mask for this irqaction for ONESHOT. For
986          * !ONESHOT irqs the thread mask is 0 so we can avoid a
987          * conditional in irq_wake_thread().
988          */
989         if (new->flags & IRQF_ONESHOT) {
990                 /*
991                  * Unlikely to have 32 resp 64 irqs sharing one line,
992                  * but who knows.
993                  */
994                 if (thread_mask == ~0UL) {
995                         ret = -EBUSY;
996                         goto out_mask;
997                 }
998                 /*
999                  * The thread_mask for the action is or'ed to
1000                  * desc->thread_active to indicate that the
1001                  * IRQF_ONESHOT thread handler has been woken, but not
1002                  * yet finished. The bit is cleared when a thread
1003                  * completes. When all threads of a shared interrupt
1004                  * line have completed desc->threads_active becomes
1005                  * zero and the interrupt line is unmasked. See
1006                  * handle.c:irq_wake_thread() for further information.
1007                  *
1008                  * If no thread is woken by primary (hard irq context)
1009                  * interrupt handlers, then desc->threads_active is
1010                  * also checked for zero to unmask the irq line in the
1011                  * affected hard irq flow handlers
1012                  * (handle_[fasteoi|level]_irq).
1013                  *
1014                  * The new action gets the first zero bit of
1015                  * thread_mask assigned. See the loop above which or's
1016                  * all existing action->thread_mask bits.
1017                  */
1018                 new->thread_mask = 1 << ffz(thread_mask);
1019
1020         } else if (new->handler == irq_default_primary_handler) {
1021                 /*
1022                  * The interrupt was requested with handler = NULL, so
1023                  * we use the default primary handler for it. But it
1024                  * does not have the oneshot flag set. In combination
1025                  * with level interrupts this is deadly, because the
1026                  * default primary handler just wakes the thread, then
1027                  * the irq lines is reenabled, but the device still
1028                  * has the level irq asserted. Rinse and repeat....
1029                  *
1030                  * While this works for edge type interrupts, we play
1031                  * it safe and reject unconditionally because we can't
1032                  * say for sure which type this interrupt really
1033                  * has. The type flags are unreliable as the
1034                  * underlying chip implementation can override them.
1035                  */
1036                 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1037                        irq);
1038                 ret = -EINVAL;
1039                 goto out_mask;
1040         }
1041
1042         if (!shared) {
1043                 init_waitqueue_head(&desc->wait_for_threads);
1044
1045                 /* Setup the type (level, edge polarity) if configured: */
1046                 if (new->flags & IRQF_TRIGGER_MASK) {
1047                         ret = __irq_set_trigger(desc, irq,
1048                                         new->flags & IRQF_TRIGGER_MASK);
1049
1050                         if (ret)
1051                                 goto out_mask;
1052                 }
1053
1054                 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1055                                   IRQS_ONESHOT | IRQS_WAITING);
1056                 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1057
1058                 if (new->flags & IRQF_PERCPU) {
1059                         irqd_set(&desc->irq_data, IRQD_PER_CPU);
1060                         irq_settings_set_per_cpu(desc);
1061                 }
1062
1063                 if (new->flags & IRQF_ONESHOT)
1064                         desc->istate |= IRQS_ONESHOT;
1065
1066                 if (irq_settings_can_autoenable(desc))
1067                         irq_startup(desc, true);
1068                 else
1069                         /* Undo nested disables: */
1070                         desc->depth = 1;
1071
1072                 /* Exclude IRQ from balancing if requested */
1073                 if (new->flags & IRQF_NOBALANCING) {
1074                         irq_settings_set_no_balancing(desc);
1075                         irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1076                 }
1077
1078                 /* Set default affinity mask once everything is setup */
1079                 setup_affinity(irq, desc, mask);
1080
1081         } else if (new->flags & IRQF_TRIGGER_MASK) {
1082                 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1083                 unsigned int omsk = irq_settings_get_trigger_mask(desc);
1084
1085                 if (nmsk != omsk)
1086                         /* hope the handler works with current  trigger mode */
1087                         pr_warning("irq %d uses trigger mode %u; requested %u\n",
1088                                    irq, nmsk, omsk);
1089         }
1090
1091         new->irq = irq;
1092         *old_ptr = new;
1093
1094         /* Reset broken irq detection when installing new handler */
1095         desc->irq_count = 0;
1096         desc->irqs_unhandled = 0;
1097
1098         /*
1099          * Check whether we disabled the irq via the spurious handler
1100          * before. Reenable it and give it another chance.
1101          */
1102         if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1103                 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1104                 __enable_irq(desc, irq, false);
1105         }
1106
1107         raw_spin_unlock_irqrestore(&desc->lock, flags);
1108
1109         /*
1110          * Strictly no need to wake it up, but hung_task complains
1111          * when no hard interrupt wakes the thread up.
1112          */
1113         if (new->thread)
1114                 wake_up_process(new->thread);
1115
1116         register_irq_proc(irq, desc);
1117         new->dir = NULL;
1118         register_handler_proc(irq, new);
1119         free_cpumask_var(mask);
1120
1121         return 0;
1122
1123 mismatch:
1124         if (!(new->flags & IRQF_PROBE_SHARED)) {
1125                 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1126                        irq, new->flags, new->name, old->flags, old->name);
1127 #ifdef CONFIG_DEBUG_SHIRQ
1128                 dump_stack();
1129 #endif
1130         }
1131         ret = -EBUSY;
1132
1133 out_mask:
1134         raw_spin_unlock_irqrestore(&desc->lock, flags);
1135         free_cpumask_var(mask);
1136
1137 out_thread:
1138         if (new->thread) {
1139                 struct task_struct *t = new->thread;
1140
1141                 new->thread = NULL;
1142                 kthread_stop(t);
1143                 put_task_struct(t);
1144         }
1145 out_mput:
1146         module_put(desc->owner);
1147         return ret;
1148 }
1149
1150 /**
1151  *      setup_irq - setup an interrupt
1152  *      @irq: Interrupt line to setup
1153  *      @act: irqaction for the interrupt
1154  *
1155  * Used to statically setup interrupts in the early boot process.
1156  */
1157 int setup_irq(unsigned int irq, struct irqaction *act)
1158 {
1159         int retval;
1160         struct irq_desc *desc = irq_to_desc(irq);
1161
1162         if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1163                 return -EINVAL;
1164         chip_bus_lock(desc);
1165         retval = __setup_irq(irq, desc, act);
1166         chip_bus_sync_unlock(desc);
1167
1168         return retval;
1169 }
1170 EXPORT_SYMBOL_GPL(setup_irq);
1171
1172 /*
1173  * Internal function to unregister an irqaction - used to free
1174  * regular and special interrupts that are part of the architecture.
1175  */
1176 static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1177 {
1178         struct irq_desc *desc = irq_to_desc(irq);
1179         struct irqaction *action, **action_ptr;
1180         unsigned long flags;
1181
1182         WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1183
1184         if (!desc)
1185                 return NULL;
1186
1187         raw_spin_lock_irqsave(&desc->lock, flags);
1188
1189         /*
1190          * There can be multiple actions per IRQ descriptor, find the right
1191          * one based on the dev_id:
1192          */
1193         action_ptr = &desc->action;
1194         for (;;) {
1195                 action = *action_ptr;
1196
1197                 if (!action) {
1198                         WARN(1, "Trying to free already-free IRQ %d\n", irq);
1199                         raw_spin_unlock_irqrestore(&desc->lock, flags);
1200
1201                         return NULL;
1202                 }
1203
1204                 if (action->dev_id == dev_id)
1205                         break;
1206                 action_ptr = &action->next;
1207         }
1208
1209         /* Found it - now remove it from the list of entries: */
1210         *action_ptr = action->next;
1211
1212         /* If this was the last handler, shut down the IRQ line: */
1213         if (!desc->action)
1214                 irq_shutdown(desc);
1215
1216 #ifdef CONFIG_SMP
1217         /* make sure affinity_hint is cleaned up */
1218         if (WARN_ON_ONCE(desc->affinity_hint))
1219                 desc->affinity_hint = NULL;
1220 #endif
1221
1222         raw_spin_unlock_irqrestore(&desc->lock, flags);
1223
1224         unregister_handler_proc(irq, action);
1225
1226         /* Make sure it's not being used on another CPU: */
1227         synchronize_irq(irq);
1228
1229 #ifdef CONFIG_DEBUG_SHIRQ
1230         /*
1231          * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1232          * event to happen even now it's being freed, so let's make sure that
1233          * is so by doing an extra call to the handler ....
1234          *
1235          * ( We do this after actually deregistering it, to make sure that a
1236          *   'real' IRQ doesn't run in * parallel with our fake. )
1237          */
1238         if (action->flags & IRQF_SHARED) {
1239                 local_irq_save(flags);
1240                 action->handler(irq, dev_id);
1241                 local_irq_restore(flags);
1242         }
1243 #endif
1244
1245         if (action->thread) {
1246                 kthread_stop(action->thread);
1247                 put_task_struct(action->thread);
1248         }
1249
1250         module_put(desc->owner);
1251         return action;
1252 }
1253
1254 /**
1255  *      remove_irq - free an interrupt
1256  *      @irq: Interrupt line to free
1257  *      @act: irqaction for the interrupt
1258  *
1259  * Used to remove interrupts statically setup by the early boot process.
1260  */
1261 void remove_irq(unsigned int irq, struct irqaction *act)
1262 {
1263         struct irq_desc *desc = irq_to_desc(irq);
1264
1265         if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1266             __free_irq(irq, act->dev_id);
1267 }
1268 EXPORT_SYMBOL_GPL(remove_irq);
1269
1270 /**
1271  *      free_irq - free an interrupt allocated with request_irq
1272  *      @irq: Interrupt line to free
1273  *      @dev_id: Device identity to free
1274  *
1275  *      Remove an interrupt handler. The handler is removed and if the
1276  *      interrupt line is no longer in use by any driver it is disabled.
1277  *      On a shared IRQ the caller must ensure the interrupt is disabled
1278  *      on the card it drives before calling this function. The function
1279  *      does not return until any executing interrupts for this IRQ
1280  *      have completed.
1281  *
1282  *      This function must not be called from interrupt context.
1283  */
1284 void free_irq(unsigned int irq, void *dev_id)
1285 {
1286         struct irq_desc *desc = irq_to_desc(irq);
1287
1288         if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1289                 return;
1290
1291 #ifdef CONFIG_SMP
1292         if (WARN_ON(desc->affinity_notify))
1293                 desc->affinity_notify = NULL;
1294 #endif
1295
1296         chip_bus_lock(desc);
1297         kfree(__free_irq(irq, dev_id));
1298         chip_bus_sync_unlock(desc);
1299 }
1300 EXPORT_SYMBOL(free_irq);
1301
1302 /**
1303  *      request_threaded_irq - allocate an interrupt line
1304  *      @irq: Interrupt line to allocate
1305  *      @handler: Function to be called when the IRQ occurs.
1306  *                Primary handler for threaded interrupts
1307  *                If NULL and thread_fn != NULL the default
1308  *                primary handler is installed
1309  *      @thread_fn: Function called from the irq handler thread
1310  *                  If NULL, no irq thread is created
1311  *      @irqflags: Interrupt type flags
1312  *      @devname: An ascii name for the claiming device
1313  *      @dev_id: A cookie passed back to the handler function
1314  *
1315  *      This call allocates interrupt resources and enables the
1316  *      interrupt line and IRQ handling. From the point this
1317  *      call is made your handler function may be invoked. Since
1318  *      your handler function must clear any interrupt the board
1319  *      raises, you must take care both to initialise your hardware
1320  *      and to set up the interrupt handler in the right order.
1321  *
1322  *      If you want to set up a threaded irq handler for your device
1323  *      then you need to supply @handler and @thread_fn. @handler is
1324  *      still called in hard interrupt context and has to check
1325  *      whether the interrupt originates from the device. If yes it
1326  *      needs to disable the interrupt on the device and return
1327  *      IRQ_WAKE_THREAD which will wake up the handler thread and run
1328  *      @thread_fn. This split handler design is necessary to support
1329  *      shared interrupts.
1330  *
1331  *      Dev_id must be globally unique. Normally the address of the
1332  *      device data structure is used as the cookie. Since the handler
1333  *      receives this value it makes sense to use it.
1334  *
1335  *      If your interrupt is shared you must pass a non NULL dev_id
1336  *      as this is required when freeing the interrupt.
1337  *
1338  *      Flags:
1339  *
1340  *      IRQF_SHARED             Interrupt is shared
1341  *      IRQF_TRIGGER_*          Specify active edge(s) or level
1342  *
1343  */
1344 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1345                          irq_handler_t thread_fn, unsigned long irqflags,
1346                          const char *devname, void *dev_id)
1347 {
1348         struct irqaction *action;
1349         struct irq_desc *desc;
1350         int retval;
1351
1352         /*
1353          * Sanity-check: shared interrupts must pass in a real dev-ID,
1354          * otherwise we'll have trouble later trying to figure out
1355          * which interrupt is which (messes up the interrupt freeing
1356          * logic etc).
1357          */
1358         if ((irqflags & IRQF_SHARED) && !dev_id)
1359                 return -EINVAL;
1360
1361         desc = irq_to_desc(irq);
1362         if (!desc)
1363                 return -EINVAL;
1364
1365         if (!irq_settings_can_request(desc) ||
1366             WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1367                 return -EINVAL;
1368
1369         if (!handler) {
1370                 if (!thread_fn)
1371                         return -EINVAL;
1372                 handler = irq_default_primary_handler;
1373         }
1374
1375         action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1376         if (!action)
1377                 return -ENOMEM;
1378
1379         action->handler = handler;
1380         action->thread_fn = thread_fn;
1381         action->flags = irqflags;
1382         action->name = devname;
1383         action->dev_id = dev_id;
1384
1385         chip_bus_lock(desc);
1386         retval = __setup_irq(irq, desc, action);
1387         chip_bus_sync_unlock(desc);
1388
1389         if (retval)
1390                 kfree(action);
1391
1392 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
1393         if (!retval && (irqflags & IRQF_SHARED)) {
1394                 /*
1395                  * It's a shared IRQ -- the driver ought to be prepared for it
1396                  * to happen immediately, so let's make sure....
1397                  * We disable the irq to make sure that a 'real' IRQ doesn't
1398                  * run in parallel with our fake.
1399                  */
1400                 unsigned long flags;
1401
1402                 disable_irq(irq);
1403                 local_irq_save(flags);
1404
1405                 handler(irq, dev_id);
1406
1407                 local_irq_restore(flags);
1408                 enable_irq(irq);
1409         }
1410 #endif
1411         return retval;
1412 }
1413 EXPORT_SYMBOL(request_threaded_irq);
1414
1415 /**
1416  *      request_any_context_irq - allocate an interrupt line
1417  *      @irq: Interrupt line to allocate
1418  *      @handler: Function to be called when the IRQ occurs.
1419  *                Threaded handler for threaded interrupts.
1420  *      @flags: Interrupt type flags
1421  *      @name: An ascii name for the claiming device
1422  *      @dev_id: A cookie passed back to the handler function
1423  *
1424  *      This call allocates interrupt resources and enables the
1425  *      interrupt line and IRQ handling. It selects either a
1426  *      hardirq or threaded handling method depending on the
1427  *      context.
1428  *
1429  *      On failure, it returns a negative value. On success,
1430  *      it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1431  */
1432 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1433                             unsigned long flags, const char *name, void *dev_id)
1434 {
1435         struct irq_desc *desc = irq_to_desc(irq);
1436         int ret;
1437
1438         if (!desc)
1439                 return -EINVAL;
1440
1441         if (irq_settings_is_nested_thread(desc)) {
1442                 ret = request_threaded_irq(irq, NULL, handler,
1443                                            flags, name, dev_id);
1444                 return !ret ? IRQC_IS_NESTED : ret;
1445         }
1446
1447         ret = request_irq(irq, handler, flags, name, dev_id);
1448         return !ret ? IRQC_IS_HARDIRQ : ret;
1449 }
1450 EXPORT_SYMBOL_GPL(request_any_context_irq);
1451
1452 void enable_percpu_irq(unsigned int irq, unsigned int type)
1453 {
1454         unsigned int cpu = smp_processor_id();
1455         unsigned long flags;
1456         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1457
1458         if (!desc)
1459                 return;
1460
1461         type &= IRQ_TYPE_SENSE_MASK;
1462         if (type != IRQ_TYPE_NONE) {
1463                 int ret;
1464
1465                 ret = __irq_set_trigger(desc, irq, type);
1466
1467                 if (ret) {
1468                         WARN(1, "failed to set type for IRQ%d\n", irq);
1469                         goto out;
1470                 }
1471         }
1472
1473         irq_percpu_enable(desc, cpu);
1474 out:
1475         irq_put_desc_unlock(desc, flags);
1476 }
1477
1478 void disable_percpu_irq(unsigned int irq)
1479 {
1480         unsigned int cpu = smp_processor_id();
1481         unsigned long flags;
1482         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1483
1484         if (!desc)
1485                 return;
1486
1487         irq_percpu_disable(desc, cpu);
1488         irq_put_desc_unlock(desc, flags);
1489 }
1490
1491 /*
1492  * Internal function to unregister a percpu irqaction.
1493  */
1494 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1495 {
1496         struct irq_desc *desc = irq_to_desc(irq);
1497         struct irqaction *action;
1498         unsigned long flags;
1499
1500         WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1501
1502         if (!desc)
1503                 return NULL;
1504
1505         raw_spin_lock_irqsave(&desc->lock, flags);
1506
1507         action = desc->action;
1508         if (!action || action->percpu_dev_id != dev_id) {
1509                 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1510                 goto bad;
1511         }
1512
1513         if (!cpumask_empty(desc->percpu_enabled)) {
1514                 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1515                      irq, cpumask_first(desc->percpu_enabled));
1516                 goto bad;
1517         }
1518
1519         /* Found it - now remove it from the list of entries: */
1520         desc->action = NULL;
1521
1522         raw_spin_unlock_irqrestore(&desc->lock, flags);
1523
1524         unregister_handler_proc(irq, action);
1525
1526         module_put(desc->owner);
1527         return action;
1528
1529 bad:
1530         raw_spin_unlock_irqrestore(&desc->lock, flags);
1531         return NULL;
1532 }
1533
1534 /**
1535  *      remove_percpu_irq - free a per-cpu interrupt
1536  *      @irq: Interrupt line to free
1537  *      @act: irqaction for the interrupt
1538  *
1539  * Used to remove interrupts statically setup by the early boot process.
1540  */
1541 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1542 {
1543         struct irq_desc *desc = irq_to_desc(irq);
1544
1545         if (desc && irq_settings_is_per_cpu_devid(desc))
1546             __free_percpu_irq(irq, act->percpu_dev_id);
1547 }
1548
1549 /**
1550  *      free_percpu_irq - free an interrupt allocated with request_percpu_irq
1551  *      @irq: Interrupt line to free
1552  *      @dev_id: Device identity to free
1553  *
1554  *      Remove a percpu interrupt handler. The handler is removed, but
1555  *      the interrupt line is not disabled. This must be done on each
1556  *      CPU before calling this function. The function does not return
1557  *      until any executing interrupts for this IRQ have completed.
1558  *
1559  *      This function must not be called from interrupt context.
1560  */
1561 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1562 {
1563         struct irq_desc *desc = irq_to_desc(irq);
1564
1565         if (!desc || !irq_settings_is_per_cpu_devid(desc))
1566                 return;
1567
1568         chip_bus_lock(desc);
1569         kfree(__free_percpu_irq(irq, dev_id));
1570         chip_bus_sync_unlock(desc);
1571 }
1572
1573 /**
1574  *      setup_percpu_irq - setup a per-cpu interrupt
1575  *      @irq: Interrupt line to setup
1576  *      @act: irqaction for the interrupt
1577  *
1578  * Used to statically setup per-cpu interrupts in the early boot process.
1579  */
1580 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1581 {
1582         struct irq_desc *desc = irq_to_desc(irq);
1583         int retval;
1584
1585         if (!desc || !irq_settings_is_per_cpu_devid(desc))
1586                 return -EINVAL;
1587         chip_bus_lock(desc);
1588         retval = __setup_irq(irq, desc, act);
1589         chip_bus_sync_unlock(desc);
1590
1591         return retval;
1592 }
1593
1594 /**
1595  *      request_percpu_irq - allocate a percpu interrupt line
1596  *      @irq: Interrupt line to allocate
1597  *      @handler: Function to be called when the IRQ occurs.
1598  *      @devname: An ascii name for the claiming device
1599  *      @dev_id: A percpu cookie passed back to the handler function
1600  *
1601  *      This call allocates interrupt resources, but doesn't
1602  *      automatically enable the interrupt. It has to be done on each
1603  *      CPU using enable_percpu_irq().
1604  *
1605  *      Dev_id must be globally unique. It is a per-cpu variable, and
1606  *      the handler gets called with the interrupted CPU's instance of
1607  *      that variable.
1608  */
1609 int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1610                        const char *devname, void __percpu *dev_id)
1611 {
1612         struct irqaction *action;
1613         struct irq_desc *desc;
1614         int retval;
1615
1616         if (!dev_id)
1617                 return -EINVAL;
1618
1619         desc = irq_to_desc(irq);
1620         if (!desc || !irq_settings_can_request(desc) ||
1621             !irq_settings_is_per_cpu_devid(desc))
1622                 return -EINVAL;
1623
1624         action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1625         if (!action)
1626                 return -ENOMEM;
1627
1628         action->handler = handler;
1629         action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1630         action->name = devname;
1631         action->percpu_dev_id = dev_id;
1632
1633         chip_bus_lock(desc);
1634         retval = __setup_irq(irq, desc, action);
1635         chip_bus_sync_unlock(desc);
1636
1637         if (retval)
1638                 kfree(action);
1639
1640         return retval;
1641 }