]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/staging/iio/industrialio-ring.c
staging: iio: Make use of the convenient IIO_TRIGGER_NAME_ATTR macro
[mv-sheeva.git] / drivers / staging / iio / industrialio-ring.c
1 /* The industrial I/O core
2  *
3  * Copyright (c) 2008 Jonathan Cameron
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * Handling of ring allocation / resizing.
10  *
11  *
12  * Things to look at here.
13  * - Better memory allocation techniques?
14  * - Alternative access techniques?
15  */
16 #include <linux/kernel.h>
17 #include <linux/device.h>
18 #include <linux/fs.h>
19 #include <linux/poll.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22
23 #include "iio.h"
24 #include "ring_generic.h"
25
26 int iio_push_ring_event(struct iio_ring_buffer *ring_buf,
27                        int event_code,
28                        s64 timestamp)
29 {
30         return __iio_push_event(&ring_buf->ev_int,
31                                event_code,
32                                timestamp,
33                                &ring_buf->shared_ev_pointer);
34 }
35 EXPORT_SYMBOL(iio_push_ring_event);
36
37 int iio_push_or_escallate_ring_event(struct iio_ring_buffer *ring_buf,
38                                     int event_code,
39                                     s64 timestamp)
40 {
41         if (ring_buf->shared_ev_pointer.ev_p)
42                 __iio_change_event(ring_buf->shared_ev_pointer.ev_p,
43                                    event_code,
44                                    timestamp);
45         else
46                 return iio_push_ring_event(ring_buf,
47                                           event_code,
48                                           timestamp);
49         return 0;
50 }
51 EXPORT_SYMBOL(iio_push_or_escallate_ring_event);
52
53 /**
54  * iio_ring_open() - chrdev file open for ring buffer access
55  *
56  * This function relies on all ring buffer implementations having an
57  * iio_ring_buffer as their first element.
58  **/
59 static int iio_ring_open(struct inode *inode, struct file *filp)
60 {
61         struct iio_handler *hand
62                 = container_of(inode->i_cdev, struct iio_handler, chrdev);
63         struct iio_ring_buffer *rb = hand->private;
64
65         filp->private_data = hand->private;
66         if (rb->access.mark_in_use)
67                 rb->access.mark_in_use(rb);
68
69         return 0;
70 }
71
72 /**
73  * iio_ring_release() - chrdev file close ring buffer access
74  *
75  * This function relies on all ring buffer implementations having an
76  * iio_ring_buffer as their first element.
77  **/
78 static int iio_ring_release(struct inode *inode, struct file *filp)
79 {
80         struct cdev *cd = inode->i_cdev;
81         struct iio_handler *hand = iio_cdev_to_handler(cd);
82         struct iio_ring_buffer *rb = hand->private;
83
84         clear_bit(IIO_BUSY_BIT_POS, &rb->access_handler.flags);
85         if (rb->access.unmark_in_use)
86                 rb->access.unmark_in_use(rb);
87
88         return 0;
89 }
90
91 /**
92  * iio_ring_rip_outer() - chrdev read for ring buffer access
93  *
94  * This function relies on all ring buffer implementations having an
95  * iio_ring _bufer as their first element.
96  **/
97 static ssize_t iio_ring_rip_outer(struct file *filp, char __user *buf,
98                                   size_t count, loff_t *f_ps)
99 {
100         struct iio_ring_buffer *rb = filp->private_data;
101         int ret, dead_offset, copied;
102         u8 *data;
103         /* rip lots must exist. */
104         if (!rb->access.rip_lots)
105                 return -EINVAL;
106         copied = rb->access.rip_lots(rb, count, &data, &dead_offset);
107
108         if (copied < 0) {
109                 ret = copied;
110                 goto error_ret;
111         }
112         if (copy_to_user(buf, data + dead_offset, copied))  {
113                 ret =  -EFAULT;
114                 goto error_free_data_cpy;
115         }
116         /* In clever ring buffer designs this may not need to be freed.
117          * When such a design exists I'll add this to ring access funcs.
118          */
119         kfree(data);
120
121         return copied;
122
123 error_free_data_cpy:
124         kfree(data);
125 error_ret:
126         return ret;
127 }
128
129 static const struct file_operations iio_ring_fileops = {
130         .read = iio_ring_rip_outer,
131         .release = iio_ring_release,
132         .open = iio_ring_open,
133         .owner = THIS_MODULE,
134 };
135
136 /**
137  * __iio_request_ring_buffer_event_chrdev() - allocate ring event chrdev
138  * @buf:        ring buffer whose event chrdev we are allocating
139  * @id:         id of this ring buffer (typically 0)
140  * @owner:      the module who owns the ring buffer (for ref counting)
141  * @dev:        device with which the chrdev is associated
142  **/
143 static inline int
144 __iio_request_ring_buffer_event_chrdev(struct iio_ring_buffer *buf,
145                                        int id,
146                                        struct module *owner,
147                                        struct device *dev)
148 {
149         int ret;
150
151         snprintf(buf->ev_int._name, sizeof(buf->ev_int._name),
152                  "%s:event%d",
153                  dev_name(&buf->dev),
154                  id);
155         ret = iio_setup_ev_int(&(buf->ev_int),
156                                buf->ev_int._name,
157                                owner,
158                                dev);
159         if (ret)
160                 goto error_ret;
161         return 0;
162
163 error_ret:
164         return ret;
165 }
166
167 static inline void
168 __iio_free_ring_buffer_event_chrdev(struct iio_ring_buffer *buf)
169 {
170         iio_free_ev_int(&(buf->ev_int));
171 }
172
173 static void iio_ring_access_release(struct device *dev)
174 {
175         struct iio_ring_buffer *buf
176                 = access_dev_to_iio_ring_buffer(dev);
177         cdev_del(&buf->access_handler.chrdev);
178         iio_device_free_chrdev_minor(MINOR(dev->devt));
179 }
180
181 static struct device_type iio_ring_access_type = {
182         .release = iio_ring_access_release,
183 };
184
185 static inline int
186 __iio_request_ring_buffer_access_chrdev(struct iio_ring_buffer *buf,
187                                         int id,
188                                         struct module *owner)
189 {
190         int ret, minor;
191
192         buf->access_handler.flags = 0;
193
194         buf->access_dev.parent = &buf->dev;
195         buf->access_dev.bus = &iio_bus_type;
196         buf->access_dev.type = &iio_ring_access_type;
197         device_initialize(&buf->access_dev);
198
199         minor = iio_device_get_chrdev_minor();
200         if (minor < 0) {
201                 ret = minor;
202                 goto error_device_put;
203         }
204         buf->access_dev.devt = MKDEV(MAJOR(iio_devt), minor);
205
206
207         buf->access_id = id;
208
209         dev_set_name(&buf->access_dev, "%s:access%d",
210                      dev_name(&buf->dev),
211                      buf->access_id);
212         ret = device_add(&buf->access_dev);
213         if (ret < 0) {
214                 printk(KERN_ERR "failed to add the ring access dev\n");
215                 goto error_device_put;
216         }
217
218         cdev_init(&buf->access_handler.chrdev, &iio_ring_fileops);
219         buf->access_handler.chrdev.owner = owner;
220
221         ret = cdev_add(&buf->access_handler.chrdev, buf->access_dev.devt, 1);
222         if (ret) {
223                 printk(KERN_ERR "failed to allocate ring access chrdev\n");
224                 goto error_device_unregister;
225         }
226         return 0;
227
228 error_device_unregister:
229         device_unregister(&buf->access_dev);
230 error_device_put:
231         put_device(&buf->access_dev);
232
233         return ret;
234 }
235
236 static void __iio_free_ring_buffer_access_chrdev(struct iio_ring_buffer *buf)
237 {
238         device_unregister(&buf->access_dev);
239 }
240
241 void iio_ring_buffer_init(struct iio_ring_buffer *ring,
242                           struct iio_dev *dev_info)
243 {
244         if (ring->access.mark_param_change)
245                 ring->access.mark_param_change(ring);
246         ring->indio_dev = dev_info;
247         ring->ev_int.private = ring;
248         ring->access_handler.private = ring;
249         ring->shared_ev_pointer.ev_p = NULL;
250         spin_lock_init(&ring->shared_ev_pointer.lock);
251 }
252 EXPORT_SYMBOL(iio_ring_buffer_init);
253
254 int iio_ring_buffer_register(struct iio_ring_buffer *ring, int id)
255 {
256         int ret;
257
258         ring->id = id;
259
260         dev_set_name(&ring->dev, "%s:buffer%d",
261                      dev_name(ring->dev.parent),
262                      ring->id);
263         ret = device_add(&ring->dev);
264         if (ret)
265                 goto error_ret;
266
267         ret = __iio_request_ring_buffer_event_chrdev(ring,
268                                                      0,
269                                                      ring->owner,
270                                                      &ring->dev);
271         if (ret)
272                 goto error_remove_device;
273
274         ret = __iio_request_ring_buffer_access_chrdev(ring,
275                                                       0,
276                                                       ring->owner);
277
278         if (ret)
279                 goto error_free_ring_buffer_event_chrdev;
280
281         if (ring->scan_el_attrs) {
282                 ret = sysfs_create_group(&ring->dev.kobj,
283                                          ring->scan_el_attrs);
284                 if (ret) {
285                         dev_err(&ring->dev,
286                                 "Failed to add sysfs scan elements\n");
287                         goto error_free_ring_buffer_event_chrdev;
288                 }
289         }
290
291         return ret;
292 error_free_ring_buffer_event_chrdev:
293         __iio_free_ring_buffer_event_chrdev(ring);
294 error_remove_device:
295         device_del(&ring->dev);
296 error_ret:
297         return ret;
298 }
299 EXPORT_SYMBOL(iio_ring_buffer_register);
300
301 void iio_ring_buffer_unregister(struct iio_ring_buffer *ring)
302 {
303         if (ring->scan_el_attrs)
304                 sysfs_remove_group(&ring->dev.kobj,
305                                    ring->scan_el_attrs);
306
307         __iio_free_ring_buffer_access_chrdev(ring);
308         __iio_free_ring_buffer_event_chrdev(ring);
309         device_del(&ring->dev);
310 }
311 EXPORT_SYMBOL(iio_ring_buffer_unregister);
312
313 ssize_t iio_read_ring_length(struct device *dev,
314                              struct device_attribute *attr,
315                              char *buf)
316 {
317         int len = 0;
318         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
319
320         if (ring->access.get_length)
321                 len = sprintf(buf, "%d\n",
322                               ring->access.get_length(ring));
323
324         return len;
325 }
326 EXPORT_SYMBOL(iio_read_ring_length);
327
328  ssize_t iio_write_ring_length(struct device *dev,
329                                struct device_attribute *attr,
330                                const char *buf,
331                                size_t len)
332 {
333         int ret;
334         ulong val;
335         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
336         ret = strict_strtoul(buf, 10, &val);
337         if (ret)
338                 return ret;
339
340         if (ring->access.get_length)
341                 if (val == ring->access.get_length(ring))
342                         return len;
343
344         if (ring->access.set_length) {
345                 ring->access.set_length(ring, val);
346                 if (ring->access.mark_param_change)
347                         ring->access.mark_param_change(ring);
348         }
349
350         return len;
351 }
352 EXPORT_SYMBOL(iio_write_ring_length);
353
354 ssize_t iio_read_ring_bytes_per_datum(struct device *dev,
355                           struct device_attribute *attr,
356                           char *buf)
357 {
358         int len = 0;
359         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
360
361         if (ring->access.get_bytes_per_datum)
362                 len = sprintf(buf, "%d\n",
363                               ring->access.get_bytes_per_datum(ring));
364
365         return len;
366 }
367 EXPORT_SYMBOL(iio_read_ring_bytes_per_datum);
368
369 ssize_t iio_store_ring_enable(struct device *dev,
370                               struct device_attribute *attr,
371                               const char *buf,
372                               size_t len)
373 {
374         int ret;
375         bool requested_state, current_state;
376         int previous_mode;
377         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
378         struct iio_dev *dev_info = ring->indio_dev;
379
380         mutex_lock(&dev_info->mlock);
381         previous_mode = dev_info->currentmode;
382         requested_state = !(buf[0] == '0');
383         current_state = !!(previous_mode & INDIO_ALL_RING_MODES);
384         if (current_state == requested_state) {
385                 printk(KERN_INFO "iio-ring, current state requested again\n");
386                 goto done;
387         }
388         if (requested_state) {
389                 if (ring->preenable) {
390                         ret = ring->preenable(dev_info);
391                         if (ret) {
392                                 printk(KERN_ERR
393                                        "Buffer not started:"
394                                        "ring preenable failed\n");
395                                 goto error_ret;
396                         }
397                 }
398                 if (ring->access.request_update) {
399                         ret = ring->access.request_update(ring);
400                         if (ret) {
401                                 printk(KERN_INFO
402                                        "Buffer not started:"
403                                        "ring parameter update failed\n");
404                                 goto error_ret;
405                         }
406                 }
407                 if (ring->access.mark_in_use)
408                         ring->access.mark_in_use(ring);
409                 /* Definitely possible for devices to support both of these.*/
410                 if (dev_info->modes & INDIO_RING_TRIGGERED) {
411                         if (!dev_info->trig) {
412                                 printk(KERN_INFO
413                                        "Buffer not started: no trigger\n");
414                                 ret = -EINVAL;
415                                 if (ring->access.unmark_in_use)
416                                         ring->access.unmark_in_use(ring);
417                                 goto error_ret;
418                         }
419                         dev_info->currentmode = INDIO_RING_TRIGGERED;
420                 } else if (dev_info->modes & INDIO_RING_HARDWARE_BUFFER)
421                         dev_info->currentmode = INDIO_RING_HARDWARE_BUFFER;
422                 else { /* should never be reached */
423                         ret = -EINVAL;
424                         goto error_ret;
425                 }
426
427                 if (ring->postenable) {
428
429                         ret = ring->postenable(dev_info);
430                         if (ret) {
431                                 printk(KERN_INFO
432                                        "Buffer not started:"
433                                        "postenable failed\n");
434                                 if (ring->access.unmark_in_use)
435                                         ring->access.unmark_in_use(ring);
436                                 dev_info->currentmode = previous_mode;
437                                 if (ring->postdisable)
438                                         ring->postdisable(dev_info);
439                                 goto error_ret;
440                         }
441                 }
442         } else {
443                 if (ring->predisable) {
444                         ret = ring->predisable(dev_info);
445                         if (ret)
446                                 goto error_ret;
447                 }
448                 if (ring->access.unmark_in_use)
449                         ring->access.unmark_in_use(ring);
450                 dev_info->currentmode = INDIO_DIRECT_MODE;
451                 if (ring->postdisable) {
452                         ret = ring->postdisable(dev_info);
453                         if (ret)
454                                 goto error_ret;
455                 }
456         }
457 done:
458         mutex_unlock(&dev_info->mlock);
459         return len;
460
461 error_ret:
462         mutex_unlock(&dev_info->mlock);
463         return ret;
464 }
465 EXPORT_SYMBOL(iio_store_ring_enable);
466 ssize_t iio_show_ring_enable(struct device *dev,
467                                     struct device_attribute *attr,
468                                     char *buf)
469 {
470         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
471         return sprintf(buf, "%d\n", !!(ring->indio_dev->currentmode
472                                        & INDIO_ALL_RING_MODES));
473 }
474 EXPORT_SYMBOL(iio_show_ring_enable);
475
476 ssize_t iio_scan_el_show(struct device *dev,
477                          struct device_attribute *attr,
478                          char *buf)
479 {
480         int ret;
481         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
482         struct iio_scan_el *this_el = to_iio_scan_el(attr);
483
484         ret = iio_scan_mask_query(ring, this_el->number);
485         if (ret < 0)
486                 return ret;
487         return sprintf(buf, "%d\n", ret);
488 }
489 EXPORT_SYMBOL(iio_scan_el_show);
490
491 ssize_t iio_scan_el_store(struct device *dev,
492                           struct device_attribute *attr,
493                           const char *buf,
494                           size_t len)
495 {
496         int ret = 0;
497         bool state;
498         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
499         struct iio_dev *indio_dev = ring->indio_dev;
500         struct iio_scan_el *this_el = to_iio_scan_el(attr);
501
502         state = !(buf[0] == '0');
503         mutex_lock(&indio_dev->mlock);
504         if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
505                 ret = -EBUSY;
506                 goto error_ret;
507         }
508         ret = iio_scan_mask_query(ring, this_el->number);
509         if (ret < 0)
510                 goto error_ret;
511         if (!state && ret) {
512                 ret = iio_scan_mask_clear(ring, this_el->number);
513                 if (ret)
514                         goto error_ret;
515         } else if (state && !ret) {
516                 ret = iio_scan_mask_set(ring, this_el->number);
517                 if (ret)
518                         goto error_ret;
519         }
520         if (this_el->set_state)
521                 ret = this_el->set_state(this_el, indio_dev, state);
522 error_ret:
523         mutex_unlock(&indio_dev->mlock);
524
525         return ret ? ret : len;
526
527 }
528 EXPORT_SYMBOL(iio_scan_el_store);
529
530 ssize_t iio_scan_el_ts_show(struct device *dev,
531                             struct device_attribute *attr,
532                             char *buf)
533 {
534         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
535         return sprintf(buf, "%d\n", ring->scan_timestamp);
536 }
537 EXPORT_SYMBOL(iio_scan_el_ts_show);
538
539 ssize_t iio_scan_el_ts_store(struct device *dev,
540                              struct device_attribute *attr,
541                              const char *buf,
542                              size_t len)
543 {
544         int ret = 0;
545         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
546         struct iio_dev *indio_dev = ring->indio_dev;
547         bool state;
548         state = !(buf[0] == '0');
549         mutex_lock(&indio_dev->mlock);
550         if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
551                 ret = -EBUSY;
552                 goto error_ret;
553         }
554         ring->scan_timestamp = state;
555 error_ret:
556         mutex_unlock(&indio_dev->mlock);
557
558         return ret ? ret : len;
559 }
560 EXPORT_SYMBOL(iio_scan_el_ts_store);
561