]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/rfkill/core.c
Revert "rfkill: remove dead code"
[karo-tx-linux.git] / net / rfkill / core.c
1 /*
2  * Copyright (C) 2006 - 2007 Ivo van Doorn
3  * Copyright (C) 2007 Dmitry Torokhov
4  * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the
18  * Free Software Foundation, Inc.,
19  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20  */
21
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/workqueue.h>
26 #include <linux/capability.h>
27 #include <linux/list.h>
28 #include <linux/mutex.h>
29 #include <linux/rfkill.h>
30 #include <linux/sched.h>
31 #include <linux/spinlock.h>
32 #include <linux/device.h>
33 #include <linux/miscdevice.h>
34 #include <linux/wait.h>
35 #include <linux/poll.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38
39 #include "rfkill.h"
40
41 #define POLL_INTERVAL           (5 * HZ)
42
43 #define RFKILL_BLOCK_HW         BIT(0)
44 #define RFKILL_BLOCK_SW         BIT(1)
45 #define RFKILL_BLOCK_SW_PREV    BIT(2)
46 #define RFKILL_BLOCK_ANY        (RFKILL_BLOCK_HW |\
47                                  RFKILL_BLOCK_SW |\
48                                  RFKILL_BLOCK_SW_PREV)
49 #define RFKILL_BLOCK_SW_SETCALL BIT(31)
50
51 struct rfkill {
52         spinlock_t              lock;
53
54         const char              *name;
55         enum rfkill_type        type;
56
57         unsigned long           state;
58
59         u32                     idx;
60
61         bool                    registered;
62         bool                    persistent;
63
64         const struct rfkill_ops *ops;
65         void                    *data;
66
67 #ifdef CONFIG_RFKILL_LEDS
68         struct led_trigger      led_trigger;
69         const char              *ledtrigname;
70 #endif
71
72         struct device           dev;
73         struct list_head        node;
74
75         struct delayed_work     poll_work;
76         struct work_struct      uevent_work;
77         struct work_struct      sync_work;
78 };
79 #define to_rfkill(d)    container_of(d, struct rfkill, dev)
80
81 struct rfkill_int_event {
82         struct list_head        list;
83         struct rfkill_event     ev;
84 };
85
86 struct rfkill_data {
87         struct list_head        list;
88         struct list_head        events;
89         struct mutex            mtx;
90         wait_queue_head_t       read_wait;
91         bool                    input_handler;
92 };
93
94
95 MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
96 MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
97 MODULE_DESCRIPTION("RF switch support");
98 MODULE_LICENSE("GPL");
99
100
101 /*
102  * The locking here should be made much smarter, we currently have
103  * a bit of a stupid situation because drivers might want to register
104  * the rfkill struct under their own lock, and take this lock during
105  * rfkill method calls -- which will cause an AB-BA deadlock situation.
106  *
107  * To fix that, we need to rework this code here to be mostly lock-free
108  * and only use the mutex for list manipulations, not to protect the
109  * various other global variables. Then we can avoid holding the mutex
110  * around driver operations, and all is happy.
111  */
112 static LIST_HEAD(rfkill_list);  /* list of registered rf switches */
113 static DEFINE_MUTEX(rfkill_global_mutex);
114 static LIST_HEAD(rfkill_fds);   /* list of open fds of /dev/rfkill */
115
116 static unsigned int rfkill_default_state = 1;
117 module_param_named(default_state, rfkill_default_state, uint, 0444);
118 MODULE_PARM_DESC(default_state,
119                  "Default initial state for all radio types, 0 = radio off");
120
121 static struct {
122         bool cur, sav;
123 } rfkill_global_states[NUM_RFKILL_TYPES];
124
125 static bool rfkill_epo_lock_active;
126
127
128 #ifdef CONFIG_RFKILL_LEDS
129 static void rfkill_led_trigger_event(struct rfkill *rfkill)
130 {
131         struct led_trigger *trigger;
132
133         if (!rfkill->registered)
134                 return;
135
136         trigger = &rfkill->led_trigger;
137
138         if (rfkill->state & RFKILL_BLOCK_ANY)
139                 led_trigger_event(trigger, LED_OFF);
140         else
141                 led_trigger_event(trigger, LED_FULL);
142 }
143
144 static void rfkill_led_trigger_activate(struct led_classdev *led)
145 {
146         struct rfkill *rfkill;
147
148         rfkill = container_of(led->trigger, struct rfkill, led_trigger);
149
150         rfkill_led_trigger_event(rfkill);
151 }
152
153 const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
154 {
155         return rfkill->led_trigger.name;
156 }
157 EXPORT_SYMBOL(rfkill_get_led_trigger_name);
158
159 void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
160 {
161         BUG_ON(!rfkill);
162
163         rfkill->ledtrigname = name;
164 }
165 EXPORT_SYMBOL(rfkill_set_led_trigger_name);
166
167 static int rfkill_led_trigger_register(struct rfkill *rfkill)
168 {
169         rfkill->led_trigger.name = rfkill->ledtrigname
170                                         ? : dev_name(&rfkill->dev);
171         rfkill->led_trigger.activate = rfkill_led_trigger_activate;
172         return led_trigger_register(&rfkill->led_trigger);
173 }
174
175 static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
176 {
177         led_trigger_unregister(&rfkill->led_trigger);
178 }
179 #else
180 static void rfkill_led_trigger_event(struct rfkill *rfkill)
181 {
182 }
183
184 static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
185 {
186         return 0;
187 }
188
189 static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
190 {
191 }
192 #endif /* CONFIG_RFKILL_LEDS */
193
194 static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
195                               enum rfkill_operation op)
196 {
197         unsigned long flags;
198
199         ev->idx = rfkill->idx;
200         ev->type = rfkill->type;
201         ev->op = op;
202
203         spin_lock_irqsave(&rfkill->lock, flags);
204         ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
205         ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
206                                         RFKILL_BLOCK_SW_PREV));
207         spin_unlock_irqrestore(&rfkill->lock, flags);
208 }
209
210 static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
211 {
212         struct rfkill_data *data;
213         struct rfkill_int_event *ev;
214
215         list_for_each_entry(data, &rfkill_fds, list) {
216                 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
217                 if (!ev)
218                         continue;
219                 rfkill_fill_event(&ev->ev, rfkill, op);
220                 mutex_lock(&data->mtx);
221                 list_add_tail(&ev->list, &data->events);
222                 mutex_unlock(&data->mtx);
223                 wake_up_interruptible(&data->read_wait);
224         }
225 }
226
227 static void rfkill_event(struct rfkill *rfkill)
228 {
229         if (!rfkill->registered)
230                 return;
231
232         kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
233
234         /* also send event to /dev/rfkill */
235         rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
236 }
237
238 static bool __rfkill_set_hw_state(struct rfkill *rfkill,
239                                   bool blocked, bool *change)
240 {
241         unsigned long flags;
242         bool prev, any;
243
244         BUG_ON(!rfkill);
245
246         spin_lock_irqsave(&rfkill->lock, flags);
247         prev = !!(rfkill->state & RFKILL_BLOCK_HW);
248         if (blocked)
249                 rfkill->state |= RFKILL_BLOCK_HW;
250         else
251                 rfkill->state &= ~RFKILL_BLOCK_HW;
252         *change = prev != blocked;
253         any = !!(rfkill->state & RFKILL_BLOCK_ANY);
254         spin_unlock_irqrestore(&rfkill->lock, flags);
255
256         rfkill_led_trigger_event(rfkill);
257
258         return any;
259 }
260
261 /**
262  * rfkill_set_block - wrapper for set_block method
263  *
264  * @rfkill: the rfkill struct to use
265  * @blocked: the new software state
266  *
267  * Calls the set_block method (when applicable) and handles notifications
268  * etc. as well.
269  */
270 static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
271 {
272         unsigned long flags;
273         int err;
274
275         if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
276                 return;
277
278         /*
279          * Some platforms (...!) generate input events which affect the
280          * _hard_ kill state -- whenever something tries to change the
281          * current software state query the hardware state too.
282          */
283         if (rfkill->ops->query)
284                 rfkill->ops->query(rfkill, rfkill->data);
285
286         spin_lock_irqsave(&rfkill->lock, flags);
287         if (rfkill->state & RFKILL_BLOCK_SW)
288                 rfkill->state |= RFKILL_BLOCK_SW_PREV;
289         else
290                 rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
291
292         if (blocked)
293                 rfkill->state |= RFKILL_BLOCK_SW;
294         else
295                 rfkill->state &= ~RFKILL_BLOCK_SW;
296
297         rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
298         spin_unlock_irqrestore(&rfkill->lock, flags);
299
300         err = rfkill->ops->set_block(rfkill->data, blocked);
301
302         spin_lock_irqsave(&rfkill->lock, flags);
303         if (err) {
304                 /*
305                  * Failed -- reset status to _prev, this may be different
306                  * from what set set _PREV to earlier in this function
307                  * if rfkill_set_sw_state was invoked.
308                  */
309                 if (rfkill->state & RFKILL_BLOCK_SW_PREV)
310                         rfkill->state |= RFKILL_BLOCK_SW;
311                 else
312                         rfkill->state &= ~RFKILL_BLOCK_SW;
313         }
314         rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
315         rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
316         spin_unlock_irqrestore(&rfkill->lock, flags);
317
318         rfkill_led_trigger_event(rfkill);
319         rfkill_event(rfkill);
320 }
321
322 #ifdef CONFIG_RFKILL_INPUT
323 static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
324
325 /**
326  * __rfkill_switch_all - Toggle state of all switches of given type
327  * @type: type of interfaces to be affected
328  * @state: the new state
329  *
330  * This function sets the state of all switches of given type,
331  * unless a specific switch is claimed by userspace (in which case,
332  * that switch is left alone) or suspended.
333  *
334  * Caller must have acquired rfkill_global_mutex.
335  */
336 static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
337 {
338         struct rfkill *rfkill;
339
340         rfkill_global_states[type].cur = blocked;
341         list_for_each_entry(rfkill, &rfkill_list, node) {
342                 if (rfkill->type != type && type != RFKILL_TYPE_ALL)
343                         continue;
344
345                 rfkill_set_block(rfkill, blocked);
346         }
347 }
348
349 /**
350  * rfkill_switch_all - Toggle state of all switches of given type
351  * @type: type of interfaces to be affected
352  * @state: the new state
353  *
354  * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
355  * Please refer to __rfkill_switch_all() for details.
356  *
357  * Does nothing if the EPO lock is active.
358  */
359 void rfkill_switch_all(enum rfkill_type type, bool blocked)
360 {
361         if (atomic_read(&rfkill_input_disabled))
362                 return;
363
364         mutex_lock(&rfkill_global_mutex);
365
366         if (!rfkill_epo_lock_active)
367                 __rfkill_switch_all(type, blocked);
368
369         mutex_unlock(&rfkill_global_mutex);
370 }
371
372 /**
373  * rfkill_epo - emergency power off all transmitters
374  *
375  * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
376  * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
377  *
378  * The global state before the EPO is saved and can be restored later
379  * using rfkill_restore_states().
380  */
381 void rfkill_epo(void)
382 {
383         struct rfkill *rfkill;
384         int i;
385
386         if (atomic_read(&rfkill_input_disabled))
387                 return;
388
389         mutex_lock(&rfkill_global_mutex);
390
391         rfkill_epo_lock_active = true;
392         list_for_each_entry(rfkill, &rfkill_list, node)
393                 rfkill_set_block(rfkill, true);
394
395         for (i = 0; i < NUM_RFKILL_TYPES; i++) {
396                 rfkill_global_states[i].sav = rfkill_global_states[i].cur;
397                 rfkill_global_states[i].cur = true;
398         }
399
400         mutex_unlock(&rfkill_global_mutex);
401 }
402
403 /**
404  * rfkill_restore_states - restore global states
405  *
406  * Restore (and sync switches to) the global state from the
407  * states in rfkill_default_states.  This can undo the effects of
408  * a call to rfkill_epo().
409  */
410 void rfkill_restore_states(void)
411 {
412         int i;
413
414         if (atomic_read(&rfkill_input_disabled))
415                 return;
416
417         mutex_lock(&rfkill_global_mutex);
418
419         rfkill_epo_lock_active = false;
420         for (i = 0; i < NUM_RFKILL_TYPES; i++)
421                 __rfkill_switch_all(i, rfkill_global_states[i].sav);
422         mutex_unlock(&rfkill_global_mutex);
423 }
424
425 /**
426  * rfkill_remove_epo_lock - unlock state changes
427  *
428  * Used by rfkill-input manually unlock state changes, when
429  * the EPO switch is deactivated.
430  */
431 void rfkill_remove_epo_lock(void)
432 {
433         if (atomic_read(&rfkill_input_disabled))
434                 return;
435
436         mutex_lock(&rfkill_global_mutex);
437         rfkill_epo_lock_active = false;
438         mutex_unlock(&rfkill_global_mutex);
439 }
440
441 /**
442  * rfkill_is_epo_lock_active - returns true EPO is active
443  *
444  * Returns 0 (false) if there is NOT an active EPO contidion,
445  * and 1 (true) if there is an active EPO contition, which
446  * locks all radios in one of the BLOCKED states.
447  *
448  * Can be called in atomic context.
449  */
450 bool rfkill_is_epo_lock_active(void)
451 {
452         return rfkill_epo_lock_active;
453 }
454
455 /**
456  * rfkill_get_global_sw_state - returns global state for a type
457  * @type: the type to get the global state of
458  *
459  * Returns the current global state for a given wireless
460  * device type.
461  */
462 bool rfkill_get_global_sw_state(const enum rfkill_type type)
463 {
464         return rfkill_global_states[type].cur;
465 }
466 #endif
467
468
469 bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
470 {
471         bool ret, change;
472
473         ret = __rfkill_set_hw_state(rfkill, blocked, &change);
474
475         if (!rfkill->registered)
476                 return ret;
477
478         if (change)
479                 schedule_work(&rfkill->uevent_work);
480
481         return ret;
482 }
483 EXPORT_SYMBOL(rfkill_set_hw_state);
484
485 static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
486 {
487         u32 bit = RFKILL_BLOCK_SW;
488
489         /* if in a ops->set_block right now, use other bit */
490         if (rfkill->state & RFKILL_BLOCK_SW_SETCALL)
491                 bit = RFKILL_BLOCK_SW_PREV;
492
493         if (blocked)
494                 rfkill->state |= bit;
495         else
496                 rfkill->state &= ~bit;
497 }
498
499 bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
500 {
501         unsigned long flags;
502         bool prev, hwblock;
503
504         BUG_ON(!rfkill);
505
506         spin_lock_irqsave(&rfkill->lock, flags);
507         prev = !!(rfkill->state & RFKILL_BLOCK_SW);
508         __rfkill_set_sw_state(rfkill, blocked);
509         hwblock = !!(rfkill->state & RFKILL_BLOCK_HW);
510         blocked = blocked || hwblock;
511         spin_unlock_irqrestore(&rfkill->lock, flags);
512
513         if (!rfkill->registered)
514                 return blocked;
515
516         if (prev != blocked && !hwblock)
517                 schedule_work(&rfkill->uevent_work);
518
519         rfkill_led_trigger_event(rfkill);
520
521         return blocked;
522 }
523 EXPORT_SYMBOL(rfkill_set_sw_state);
524
525 void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
526 {
527         unsigned long flags;
528
529         BUG_ON(!rfkill);
530         BUG_ON(rfkill->registered);
531
532         spin_lock_irqsave(&rfkill->lock, flags);
533         __rfkill_set_sw_state(rfkill, blocked);
534         rfkill->persistent = true;
535         spin_unlock_irqrestore(&rfkill->lock, flags);
536 }
537 EXPORT_SYMBOL(rfkill_init_sw_state);
538
539 void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
540 {
541         unsigned long flags;
542         bool swprev, hwprev;
543
544         BUG_ON(!rfkill);
545
546         spin_lock_irqsave(&rfkill->lock, flags);
547
548         /*
549          * No need to care about prev/setblock ... this is for uevent only
550          * and that will get triggered by rfkill_set_block anyway.
551          */
552         swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
553         hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
554         __rfkill_set_sw_state(rfkill, sw);
555         if (hw)
556                 rfkill->state |= RFKILL_BLOCK_HW;
557         else
558                 rfkill->state &= ~RFKILL_BLOCK_HW;
559
560         spin_unlock_irqrestore(&rfkill->lock, flags);
561
562         if (!rfkill->registered) {
563                 rfkill->persistent = true;
564         } else {
565                 if (swprev != sw || hwprev != hw)
566                         schedule_work(&rfkill->uevent_work);
567
568                 rfkill_led_trigger_event(rfkill);
569         }
570 }
571 EXPORT_SYMBOL(rfkill_set_states);
572
573 static ssize_t rfkill_name_show(struct device *dev,
574                                 struct device_attribute *attr,
575                                 char *buf)
576 {
577         struct rfkill *rfkill = to_rfkill(dev);
578
579         return sprintf(buf, "%s\n", rfkill->name);
580 }
581
582 static const char *rfkill_get_type_str(enum rfkill_type type)
583 {
584         BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_FM + 1);
585
586         switch (type) {
587         case RFKILL_TYPE_WLAN:
588                 return "wlan";
589         case RFKILL_TYPE_BLUETOOTH:
590                 return "bluetooth";
591         case RFKILL_TYPE_UWB:
592                 return "ultrawideband";
593         case RFKILL_TYPE_WIMAX:
594                 return "wimax";
595         case RFKILL_TYPE_WWAN:
596                 return "wwan";
597         case RFKILL_TYPE_GPS:
598                 return "gps";
599         case RFKILL_TYPE_FM:
600                 return "fm";
601         default:
602                 BUG();
603         }
604 }
605
606 static ssize_t rfkill_type_show(struct device *dev,
607                                 struct device_attribute *attr,
608                                 char *buf)
609 {
610         struct rfkill *rfkill = to_rfkill(dev);
611
612         return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type));
613 }
614
615 static ssize_t rfkill_idx_show(struct device *dev,
616                                struct device_attribute *attr,
617                                char *buf)
618 {
619         struct rfkill *rfkill = to_rfkill(dev);
620
621         return sprintf(buf, "%d\n", rfkill->idx);
622 }
623
624 static ssize_t rfkill_persistent_show(struct device *dev,
625                                struct device_attribute *attr,
626                                char *buf)
627 {
628         struct rfkill *rfkill = to_rfkill(dev);
629
630         return sprintf(buf, "%d\n", rfkill->persistent);
631 }
632
633 static ssize_t rfkill_hard_show(struct device *dev,
634                                  struct device_attribute *attr,
635                                  char *buf)
636 {
637         struct rfkill *rfkill = to_rfkill(dev);
638
639         return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 );
640 }
641
642 static ssize_t rfkill_soft_show(struct device *dev,
643                                  struct device_attribute *attr,
644                                  char *buf)
645 {
646         struct rfkill *rfkill = to_rfkill(dev);
647
648         return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 );
649 }
650
651 static ssize_t rfkill_soft_store(struct device *dev,
652                                   struct device_attribute *attr,
653                                   const char *buf, size_t count)
654 {
655         struct rfkill *rfkill = to_rfkill(dev);
656         unsigned long state;
657         int err;
658
659         if (!capable(CAP_NET_ADMIN))
660                 return -EPERM;
661
662         err = kstrtoul(buf, 0, &state);
663         if (err)
664                 return err;
665
666         if (state > 1 )
667                 return -EINVAL;
668
669         mutex_lock(&rfkill_global_mutex);
670         rfkill_set_block(rfkill, state);
671         mutex_unlock(&rfkill_global_mutex);
672
673         return err ?: count;
674 }
675
676 static u8 user_state_from_blocked(unsigned long state)
677 {
678         if (state & RFKILL_BLOCK_HW)
679                 return RFKILL_USER_STATE_HARD_BLOCKED;
680         if (state & RFKILL_BLOCK_SW)
681                 return RFKILL_USER_STATE_SOFT_BLOCKED;
682
683         return RFKILL_USER_STATE_UNBLOCKED;
684 }
685
686 static ssize_t rfkill_state_show(struct device *dev,
687                                  struct device_attribute *attr,
688                                  char *buf)
689 {
690         struct rfkill *rfkill = to_rfkill(dev);
691
692         return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state));
693 }
694
695 static ssize_t rfkill_state_store(struct device *dev,
696                                   struct device_attribute *attr,
697                                   const char *buf, size_t count)
698 {
699         struct rfkill *rfkill = to_rfkill(dev);
700         unsigned long state;
701         int err;
702
703         if (!capable(CAP_NET_ADMIN))
704                 return -EPERM;
705
706         err = kstrtoul(buf, 0, &state);
707         if (err)
708                 return err;
709
710         if (state != RFKILL_USER_STATE_SOFT_BLOCKED &&
711             state != RFKILL_USER_STATE_UNBLOCKED)
712                 return -EINVAL;
713
714         mutex_lock(&rfkill_global_mutex);
715         rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
716         mutex_unlock(&rfkill_global_mutex);
717
718         return err ?: count;
719 }
720
721 static ssize_t rfkill_claim_show(struct device *dev,
722                                  struct device_attribute *attr,
723                                  char *buf)
724 {
725         return sprintf(buf, "%d\n", 0);
726 }
727
728 static ssize_t rfkill_claim_store(struct device *dev,
729                                   struct device_attribute *attr,
730                                   const char *buf, size_t count)
731 {
732         return -EOPNOTSUPP;
733 }
734
735 static struct device_attribute rfkill_dev_attrs[] = {
736         __ATTR(name, S_IRUGO, rfkill_name_show, NULL),
737         __ATTR(type, S_IRUGO, rfkill_type_show, NULL),
738         __ATTR(index, S_IRUGO, rfkill_idx_show, NULL),
739         __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL),
740         __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
741         __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
742         __ATTR(soft, S_IRUGO|S_IWUSR, rfkill_soft_show, rfkill_soft_store),
743         __ATTR(hard, S_IRUGO, rfkill_hard_show, NULL),
744         __ATTR_NULL
745 };
746
747 static void rfkill_release(struct device *dev)
748 {
749         struct rfkill *rfkill = to_rfkill(dev);
750
751         kfree(rfkill);
752 }
753
754 static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
755 {
756         struct rfkill *rfkill = to_rfkill(dev);
757         unsigned long flags;
758         u32 state;
759         int error;
760
761         error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
762         if (error)
763                 return error;
764         error = add_uevent_var(env, "RFKILL_TYPE=%s",
765                                rfkill_get_type_str(rfkill->type));
766         if (error)
767                 return error;
768         spin_lock_irqsave(&rfkill->lock, flags);
769         state = rfkill->state;
770         spin_unlock_irqrestore(&rfkill->lock, flags);
771         error = add_uevent_var(env, "RFKILL_STATE=%d",
772                                user_state_from_blocked(state));
773         return error;
774 }
775
776 void rfkill_pause_polling(struct rfkill *rfkill)
777 {
778         BUG_ON(!rfkill);
779
780         if (!rfkill->ops->poll)
781                 return;
782
783         cancel_delayed_work_sync(&rfkill->poll_work);
784 }
785 EXPORT_SYMBOL(rfkill_pause_polling);
786
787 void rfkill_resume_polling(struct rfkill *rfkill)
788 {
789         BUG_ON(!rfkill);
790
791         if (!rfkill->ops->poll)
792                 return;
793
794         schedule_work(&rfkill->poll_work.work);
795 }
796 EXPORT_SYMBOL(rfkill_resume_polling);
797
798 static int rfkill_suspend(struct device *dev, pm_message_t state)
799 {
800         struct rfkill *rfkill = to_rfkill(dev);
801
802         rfkill_pause_polling(rfkill);
803
804         return 0;
805 }
806
807 static int rfkill_resume(struct device *dev)
808 {
809         struct rfkill *rfkill = to_rfkill(dev);
810         bool cur;
811
812         if (!rfkill->persistent) {
813                 cur = !!(rfkill->state & RFKILL_BLOCK_SW);
814                 rfkill_set_block(rfkill, cur);
815         }
816
817         rfkill_resume_polling(rfkill);
818
819         return 0;
820 }
821
822 static struct class rfkill_class = {
823         .name           = "rfkill",
824         .dev_release    = rfkill_release,
825         .dev_attrs      = rfkill_dev_attrs,
826         .dev_uevent     = rfkill_dev_uevent,
827         .suspend        = rfkill_suspend,
828         .resume         = rfkill_resume,
829 };
830
831 bool rfkill_blocked(struct rfkill *rfkill)
832 {
833         unsigned long flags;
834         u32 state;
835
836         spin_lock_irqsave(&rfkill->lock, flags);
837         state = rfkill->state;
838         spin_unlock_irqrestore(&rfkill->lock, flags);
839
840         return !!(state & RFKILL_BLOCK_ANY);
841 }
842 EXPORT_SYMBOL(rfkill_blocked);
843
844
845 struct rfkill * __must_check rfkill_alloc(const char *name,
846                                           struct device *parent,
847                                           const enum rfkill_type type,
848                                           const struct rfkill_ops *ops,
849                                           void *ops_data)
850 {
851         struct rfkill *rfkill;
852         struct device *dev;
853
854         if (WARN_ON(!ops))
855                 return NULL;
856
857         if (WARN_ON(!ops->set_block))
858                 return NULL;
859
860         if (WARN_ON(!name))
861                 return NULL;
862
863         if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
864                 return NULL;
865
866         rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
867         if (!rfkill)
868                 return NULL;
869
870         spin_lock_init(&rfkill->lock);
871         INIT_LIST_HEAD(&rfkill->node);
872         rfkill->type = type;
873         rfkill->name = name;
874         rfkill->ops = ops;
875         rfkill->data = ops_data;
876
877         dev = &rfkill->dev;
878         dev->class = &rfkill_class;
879         dev->parent = parent;
880         device_initialize(dev);
881
882         return rfkill;
883 }
884 EXPORT_SYMBOL(rfkill_alloc);
885
886 static void rfkill_poll(struct work_struct *work)
887 {
888         struct rfkill *rfkill;
889
890         rfkill = container_of(work, struct rfkill, poll_work.work);
891
892         /*
893          * Poll hardware state -- driver will use one of the
894          * rfkill_set{,_hw,_sw}_state functions and use its
895          * return value to update the current status.
896          */
897         rfkill->ops->poll(rfkill, rfkill->data);
898
899         schedule_delayed_work(&rfkill->poll_work,
900                 round_jiffies_relative(POLL_INTERVAL));
901 }
902
903 static void rfkill_uevent_work(struct work_struct *work)
904 {
905         struct rfkill *rfkill;
906
907         rfkill = container_of(work, struct rfkill, uevent_work);
908
909         mutex_lock(&rfkill_global_mutex);
910         rfkill_event(rfkill);
911         mutex_unlock(&rfkill_global_mutex);
912 }
913
914 static void rfkill_sync_work(struct work_struct *work)
915 {
916         struct rfkill *rfkill;
917         bool cur;
918
919         rfkill = container_of(work, struct rfkill, sync_work);
920
921         mutex_lock(&rfkill_global_mutex);
922         cur = rfkill_global_states[rfkill->type].cur;
923         rfkill_set_block(rfkill, cur);
924         mutex_unlock(&rfkill_global_mutex);
925 }
926
927 int __must_check rfkill_register(struct rfkill *rfkill)
928 {
929         static unsigned long rfkill_no;
930         struct device *dev = &rfkill->dev;
931         int error;
932
933         BUG_ON(!rfkill);
934
935         mutex_lock(&rfkill_global_mutex);
936
937         if (rfkill->registered) {
938                 error = -EALREADY;
939                 goto unlock;
940         }
941
942         rfkill->idx = rfkill_no;
943         dev_set_name(dev, "rfkill%lu", rfkill_no);
944         rfkill_no++;
945
946         list_add_tail(&rfkill->node, &rfkill_list);
947
948         error = device_add(dev);
949         if (error)
950                 goto remove;
951
952         error = rfkill_led_trigger_register(rfkill);
953         if (error)
954                 goto devdel;
955
956         rfkill->registered = true;
957
958         INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
959         INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
960         INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
961
962         if (rfkill->ops->poll)
963                 schedule_delayed_work(&rfkill->poll_work,
964                         round_jiffies_relative(POLL_INTERVAL));
965
966         if (!rfkill->persistent || rfkill_epo_lock_active) {
967                 schedule_work(&rfkill->sync_work);
968         } else {
969 #ifdef CONFIG_RFKILL_INPUT
970                 bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW);
971
972                 if (!atomic_read(&rfkill_input_disabled))
973                         __rfkill_switch_all(rfkill->type, soft_blocked);
974 #endif
975         }
976
977         rfkill_send_events(rfkill, RFKILL_OP_ADD);
978
979         mutex_unlock(&rfkill_global_mutex);
980         return 0;
981
982  devdel:
983         device_del(&rfkill->dev);
984  remove:
985         list_del_init(&rfkill->node);
986  unlock:
987         mutex_unlock(&rfkill_global_mutex);
988         return error;
989 }
990 EXPORT_SYMBOL(rfkill_register);
991
992 void rfkill_unregister(struct rfkill *rfkill)
993 {
994         BUG_ON(!rfkill);
995
996         if (rfkill->ops->poll)
997                 cancel_delayed_work_sync(&rfkill->poll_work);
998
999         cancel_work_sync(&rfkill->uevent_work);
1000         cancel_work_sync(&rfkill->sync_work);
1001
1002         rfkill->registered = false;
1003
1004         device_del(&rfkill->dev);
1005
1006         mutex_lock(&rfkill_global_mutex);
1007         rfkill_send_events(rfkill, RFKILL_OP_DEL);
1008         list_del_init(&rfkill->node);
1009         mutex_unlock(&rfkill_global_mutex);
1010
1011         rfkill_led_trigger_unregister(rfkill);
1012 }
1013 EXPORT_SYMBOL(rfkill_unregister);
1014
1015 void rfkill_destroy(struct rfkill *rfkill)
1016 {
1017         if (rfkill)
1018                 put_device(&rfkill->dev);
1019 }
1020 EXPORT_SYMBOL(rfkill_destroy);
1021
1022 static int rfkill_fop_open(struct inode *inode, struct file *file)
1023 {
1024         struct rfkill_data *data;
1025         struct rfkill *rfkill;
1026         struct rfkill_int_event *ev, *tmp;
1027
1028         data = kzalloc(sizeof(*data), GFP_KERNEL);
1029         if (!data)
1030                 return -ENOMEM;
1031
1032         INIT_LIST_HEAD(&data->events);
1033         mutex_init(&data->mtx);
1034         init_waitqueue_head(&data->read_wait);
1035
1036         mutex_lock(&rfkill_global_mutex);
1037         mutex_lock(&data->mtx);
1038         /*
1039          * start getting events from elsewhere but hold mtx to get
1040          * startup events added first
1041          */
1042
1043         list_for_each_entry(rfkill, &rfkill_list, node) {
1044                 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
1045                 if (!ev)
1046                         goto free;
1047                 rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
1048                 list_add_tail(&ev->list, &data->events);
1049         }
1050         list_add(&data->list, &rfkill_fds);
1051         mutex_unlock(&data->mtx);
1052         mutex_unlock(&rfkill_global_mutex);
1053
1054         file->private_data = data;
1055
1056         return nonseekable_open(inode, file);
1057
1058  free:
1059         mutex_unlock(&data->mtx);
1060         mutex_unlock(&rfkill_global_mutex);
1061         mutex_destroy(&data->mtx);
1062         list_for_each_entry_safe(ev, tmp, &data->events, list)
1063                 kfree(ev);
1064         kfree(data);
1065         return -ENOMEM;
1066 }
1067
1068 static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
1069 {
1070         struct rfkill_data *data = file->private_data;
1071         unsigned int res = POLLOUT | POLLWRNORM;
1072
1073         poll_wait(file, &data->read_wait, wait);
1074
1075         mutex_lock(&data->mtx);
1076         if (!list_empty(&data->events))
1077                 res = POLLIN | POLLRDNORM;
1078         mutex_unlock(&data->mtx);
1079
1080         return res;
1081 }
1082
1083 static bool rfkill_readable(struct rfkill_data *data)
1084 {
1085         bool r;
1086
1087         mutex_lock(&data->mtx);
1088         r = !list_empty(&data->events);
1089         mutex_unlock(&data->mtx);
1090
1091         return r;
1092 }
1093
1094 static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
1095                                size_t count, loff_t *pos)
1096 {
1097         struct rfkill_data *data = file->private_data;
1098         struct rfkill_int_event *ev;
1099         unsigned long sz;
1100         int ret;
1101
1102         mutex_lock(&data->mtx);
1103
1104         while (list_empty(&data->events)) {
1105                 if (file->f_flags & O_NONBLOCK) {
1106                         ret = -EAGAIN;
1107                         goto out;
1108                 }
1109                 mutex_unlock(&data->mtx);
1110                 ret = wait_event_interruptible(data->read_wait,
1111                                                rfkill_readable(data));
1112                 mutex_lock(&data->mtx);
1113
1114                 if (ret)
1115                         goto out;
1116         }
1117
1118         ev = list_first_entry(&data->events, struct rfkill_int_event,
1119                                 list);
1120
1121         sz = min_t(unsigned long, sizeof(ev->ev), count);
1122         ret = sz;
1123         if (copy_to_user(buf, &ev->ev, sz))
1124                 ret = -EFAULT;
1125
1126         list_del(&ev->list);
1127         kfree(ev);
1128  out:
1129         mutex_unlock(&data->mtx);
1130         return ret;
1131 }
1132
1133 static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
1134                                 size_t count, loff_t *pos)
1135 {
1136         struct rfkill *rfkill;
1137         struct rfkill_event ev;
1138
1139         /* we don't need the 'hard' variable but accept it */
1140         if (count < RFKILL_EVENT_SIZE_V1 - 1)
1141                 return -EINVAL;
1142
1143         /*
1144          * Copy as much data as we can accept into our 'ev' buffer,
1145          * but tell userspace how much we've copied so it can determine
1146          * our API version even in a write() call, if it cares.
1147          */
1148         count = min(count, sizeof(ev));
1149         if (copy_from_user(&ev, buf, count))
1150                 return -EFAULT;
1151
1152         if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL)
1153                 return -EINVAL;
1154
1155         if (ev.type >= NUM_RFKILL_TYPES)
1156                 return -EINVAL;
1157
1158         mutex_lock(&rfkill_global_mutex);
1159
1160         if (ev.op == RFKILL_OP_CHANGE_ALL) {
1161                 if (ev.type == RFKILL_TYPE_ALL) {
1162                         enum rfkill_type i;
1163                         for (i = 0; i < NUM_RFKILL_TYPES; i++)
1164                                 rfkill_global_states[i].cur = ev.soft;
1165                 } else {
1166                         rfkill_global_states[ev.type].cur = ev.soft;
1167                 }
1168         }
1169
1170         list_for_each_entry(rfkill, &rfkill_list, node) {
1171                 if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL)
1172                         continue;
1173
1174                 if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL)
1175                         continue;
1176
1177                 rfkill_set_block(rfkill, ev.soft);
1178         }
1179         mutex_unlock(&rfkill_global_mutex);
1180
1181         return count;
1182 }
1183
1184 static int rfkill_fop_release(struct inode *inode, struct file *file)
1185 {
1186         struct rfkill_data *data = file->private_data;
1187         struct rfkill_int_event *ev, *tmp;
1188
1189         mutex_lock(&rfkill_global_mutex);
1190         list_del(&data->list);
1191         mutex_unlock(&rfkill_global_mutex);
1192
1193         mutex_destroy(&data->mtx);
1194         list_for_each_entry_safe(ev, tmp, &data->events, list)
1195                 kfree(ev);
1196
1197 #ifdef CONFIG_RFKILL_INPUT
1198         if (data->input_handler)
1199                 if (atomic_dec_return(&rfkill_input_disabled) == 0)
1200                         printk(KERN_DEBUG "rfkill: input handler enabled\n");
1201 #endif
1202
1203         kfree(data);
1204
1205         return 0;
1206 }
1207
1208 #ifdef CONFIG_RFKILL_INPUT
1209 static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
1210                              unsigned long arg)
1211 {
1212         struct rfkill_data *data = file->private_data;
1213
1214         if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC)
1215                 return -ENOSYS;
1216
1217         if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT)
1218                 return -ENOSYS;
1219
1220         mutex_lock(&data->mtx);
1221
1222         if (!data->input_handler) {
1223                 if (atomic_inc_return(&rfkill_input_disabled) == 1)
1224                         printk(KERN_DEBUG "rfkill: input handler disabled\n");
1225                 data->input_handler = true;
1226         }
1227
1228         mutex_unlock(&data->mtx);
1229
1230         return 0;
1231 }
1232 #endif
1233
1234 static const struct file_operations rfkill_fops = {
1235         .owner          = THIS_MODULE,
1236         .open           = rfkill_fop_open,
1237         .read           = rfkill_fop_read,
1238         .write          = rfkill_fop_write,
1239         .poll           = rfkill_fop_poll,
1240         .release        = rfkill_fop_release,
1241 #ifdef CONFIG_RFKILL_INPUT
1242         .unlocked_ioctl = rfkill_fop_ioctl,
1243         .compat_ioctl   = rfkill_fop_ioctl,
1244 #endif
1245         .llseek         = no_llseek,
1246 };
1247
1248 static struct miscdevice rfkill_miscdev = {
1249         .name   = "rfkill",
1250         .fops   = &rfkill_fops,
1251         .minor  = MISC_DYNAMIC_MINOR,
1252 };
1253
1254 static int __init rfkill_init(void)
1255 {
1256         int error;
1257         int i;
1258
1259         for (i = 0; i < NUM_RFKILL_TYPES; i++)
1260                 rfkill_global_states[i].cur = !rfkill_default_state;
1261
1262         error = class_register(&rfkill_class);
1263         if (error)
1264                 goto out;
1265
1266         error = misc_register(&rfkill_miscdev);
1267         if (error) {
1268                 class_unregister(&rfkill_class);
1269                 goto out;
1270         }
1271
1272 #ifdef CONFIG_RFKILL_INPUT
1273         error = rfkill_handler_init();
1274         if (error) {
1275                 misc_deregister(&rfkill_miscdev);
1276                 class_unregister(&rfkill_class);
1277                 goto out;
1278         }
1279 #endif
1280
1281  out:
1282         return error;
1283 }
1284 subsys_initcall(rfkill_init);
1285
1286 static void __exit rfkill_exit(void)
1287 {
1288 #ifdef CONFIG_RFKILL_INPUT
1289         rfkill_handler_exit();
1290 #endif
1291         misc_deregister(&rfkill_miscdev);
1292         class_unregister(&rfkill_class);
1293 }
1294 module_exit(rfkill_exit);