1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of ring allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/device.h>
19 #include <linux/poll.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
24 #include "ring_generic.h"
26 int iio_push_ring_event(struct iio_ring_buffer *ring_buf,
30 return __iio_push_event(&ring_buf->ev_int,
33 &ring_buf->shared_ev_pointer);
35 EXPORT_SYMBOL(iio_push_ring_event);
37 int iio_push_or_escallate_ring_event(struct iio_ring_buffer *ring_buf,
41 if (ring_buf->shared_ev_pointer.ev_p)
42 __iio_change_event(ring_buf->shared_ev_pointer.ev_p,
46 return iio_push_ring_event(ring_buf,
51 EXPORT_SYMBOL(iio_push_or_escallate_ring_event);
54 * iio_ring_open() - chrdev file open for ring buffer access
56 * This function relies on all ring buffer implementations having an
57 * iio_ring_buffer as their first element.
59 static int iio_ring_open(struct inode *inode, struct file *filp)
61 struct iio_handler *hand
62 = container_of(inode->i_cdev, struct iio_handler, chrdev);
63 struct iio_ring_buffer *rb = hand->private;
65 filp->private_data = hand->private;
66 if (rb->access.mark_in_use)
67 rb->access.mark_in_use(rb);
73 * iio_ring_release() - chrdev file close ring buffer access
75 * This function relies on all ring buffer implementations having an
76 * iio_ring_buffer as their first element.
78 static int iio_ring_release(struct inode *inode, struct file *filp)
80 struct cdev *cd = inode->i_cdev;
81 struct iio_handler *hand = iio_cdev_to_handler(cd);
82 struct iio_ring_buffer *rb = hand->private;
84 clear_bit(IIO_BUSY_BIT_POS, &rb->access_handler.flags);
85 if (rb->access.unmark_in_use)
86 rb->access.unmark_in_use(rb);
92 * iio_ring_rip_outer() - chrdev read for ring buffer access
94 * This function relies on all ring buffer implementations having an
95 * iio_ring _bufer as their first element.
97 static ssize_t iio_ring_rip_outer(struct file *filp, char __user *buf,
98 size_t count, loff_t *f_ps)
100 struct iio_ring_buffer *rb = filp->private_data;
101 int ret, dead_offset, copied;
103 /* rip lots must exist. */
104 if (!rb->access.rip_lots)
106 copied = rb->access.rip_lots(rb, count, &data, &dead_offset);
112 if (copy_to_user(buf, data + dead_offset, copied)) {
114 goto error_free_data_cpy;
116 /* In clever ring buffer designs this may not need to be freed.
117 * When such a design exists I'll add this to ring access funcs.
129 static const struct file_operations iio_ring_fileops = {
130 .read = iio_ring_rip_outer,
131 .release = iio_ring_release,
132 .open = iio_ring_open,
133 .owner = THIS_MODULE,
137 * __iio_request_ring_buffer_event_chrdev() - allocate ring event chrdev
138 * @buf: ring buffer whose event chrdev we are allocating
139 * @id: id of this ring buffer (typically 0)
140 * @owner: the module who owns the ring buffer (for ref counting)
141 * @dev: device with which the chrdev is associated
144 __iio_request_ring_buffer_event_chrdev(struct iio_ring_buffer *buf,
146 struct module *owner,
151 snprintf(buf->ev_int._name, sizeof(buf->ev_int._name),
155 ret = iio_setup_ev_int(&(buf->ev_int),
168 __iio_free_ring_buffer_event_chrdev(struct iio_ring_buffer *buf)
170 iio_free_ev_int(&(buf->ev_int));
173 static void iio_ring_access_release(struct device *dev)
175 struct iio_ring_buffer *buf
176 = access_dev_to_iio_ring_buffer(dev);
177 cdev_del(&buf->access_handler.chrdev);
178 iio_device_free_chrdev_minor(MINOR(dev->devt));
181 static struct device_type iio_ring_access_type = {
182 .release = iio_ring_access_release,
186 __iio_request_ring_buffer_access_chrdev(struct iio_ring_buffer *buf,
188 struct module *owner)
192 buf->access_handler.flags = 0;
194 buf->access_dev.parent = &buf->dev;
195 buf->access_dev.bus = &iio_bus_type;
196 buf->access_dev.type = &iio_ring_access_type;
197 device_initialize(&buf->access_dev);
199 minor = iio_device_get_chrdev_minor();
202 goto error_device_put;
204 buf->access_dev.devt = MKDEV(MAJOR(iio_devt), minor);
209 dev_set_name(&buf->access_dev, "%s:access%d",
212 ret = device_add(&buf->access_dev);
214 printk(KERN_ERR "failed to add the ring access dev\n");
215 goto error_device_put;
218 cdev_init(&buf->access_handler.chrdev, &iio_ring_fileops);
219 buf->access_handler.chrdev.owner = owner;
221 ret = cdev_add(&buf->access_handler.chrdev, buf->access_dev.devt, 1);
223 printk(KERN_ERR "failed to allocate ring access chrdev\n");
224 goto error_device_unregister;
228 error_device_unregister:
229 device_unregister(&buf->access_dev);
231 put_device(&buf->access_dev);
236 static void __iio_free_ring_buffer_access_chrdev(struct iio_ring_buffer *buf)
238 device_unregister(&buf->access_dev);
241 void iio_ring_buffer_init(struct iio_ring_buffer *ring,
242 struct iio_dev *dev_info)
244 if (ring->access.mark_param_change)
245 ring->access.mark_param_change(ring);
246 ring->indio_dev = dev_info;
247 ring->ev_int.private = ring;
248 ring->access_handler.private = ring;
249 ring->shared_ev_pointer.ev_p = NULL;
250 spin_lock_init(&ring->shared_ev_pointer.lock);
252 EXPORT_SYMBOL(iio_ring_buffer_init);
254 int iio_ring_buffer_register(struct iio_ring_buffer *ring, int id)
260 dev_set_name(&ring->dev, "%s:buffer%d",
261 dev_name(ring->dev.parent),
263 ret = device_add(&ring->dev);
267 ret = __iio_request_ring_buffer_event_chrdev(ring,
272 goto error_remove_device;
274 ret = __iio_request_ring_buffer_access_chrdev(ring,
279 goto error_free_ring_buffer_event_chrdev;
281 if (ring->scan_el_attrs) {
282 ret = sysfs_create_group(&ring->dev.kobj,
283 ring->scan_el_attrs);
286 "Failed to add sysfs scan elements\n");
287 goto error_free_ring_buffer_event_chrdev;
292 error_free_ring_buffer_event_chrdev:
293 __iio_free_ring_buffer_event_chrdev(ring);
295 device_del(&ring->dev);
299 EXPORT_SYMBOL(iio_ring_buffer_register);
301 void iio_ring_buffer_unregister(struct iio_ring_buffer *ring)
303 if (ring->scan_el_attrs)
304 sysfs_remove_group(&ring->dev.kobj,
305 ring->scan_el_attrs);
307 __iio_free_ring_buffer_access_chrdev(ring);
308 __iio_free_ring_buffer_event_chrdev(ring);
309 device_del(&ring->dev);
311 EXPORT_SYMBOL(iio_ring_buffer_unregister);
313 ssize_t iio_read_ring_length(struct device *dev,
314 struct device_attribute *attr,
318 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
320 if (ring->access.get_length)
321 len = sprintf(buf, "%d\n",
322 ring->access.get_length(ring));
326 EXPORT_SYMBOL(iio_read_ring_length);
328 ssize_t iio_write_ring_length(struct device *dev,
329 struct device_attribute *attr,
335 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
336 ret = strict_strtoul(buf, 10, &val);
340 if (ring->access.get_length)
341 if (val == ring->access.get_length(ring))
344 if (ring->access.set_length) {
345 ring->access.set_length(ring, val);
346 if (ring->access.mark_param_change)
347 ring->access.mark_param_change(ring);
352 EXPORT_SYMBOL(iio_write_ring_length);
354 ssize_t iio_read_ring_bytes_per_datum(struct device *dev,
355 struct device_attribute *attr,
359 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
361 if (ring->access.get_bytes_per_datum)
362 len = sprintf(buf, "%d\n",
363 ring->access.get_bytes_per_datum(ring));
367 EXPORT_SYMBOL(iio_read_ring_bytes_per_datum);
369 ssize_t iio_store_ring_enable(struct device *dev,
370 struct device_attribute *attr,
375 bool requested_state, current_state;
377 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
378 struct iio_dev *dev_info = ring->indio_dev;
380 mutex_lock(&dev_info->mlock);
381 previous_mode = dev_info->currentmode;
382 requested_state = !(buf[0] == '0');
383 current_state = !!(previous_mode & INDIO_ALL_RING_MODES);
384 if (current_state == requested_state) {
385 printk(KERN_INFO "iio-ring, current state requested again\n");
388 if (requested_state) {
389 if (ring->preenable) {
390 ret = ring->preenable(dev_info);
393 "Buffer not started:"
394 "ring preenable failed\n");
398 if (ring->access.request_update) {
399 ret = ring->access.request_update(ring);
402 "Buffer not started:"
403 "ring parameter update failed\n");
407 if (ring->access.mark_in_use)
408 ring->access.mark_in_use(ring);
409 /* Definitely possible for devices to support both of these.*/
410 if (dev_info->modes & INDIO_RING_TRIGGERED) {
411 if (!dev_info->trig) {
413 "Buffer not started: no trigger\n");
415 if (ring->access.unmark_in_use)
416 ring->access.unmark_in_use(ring);
419 dev_info->currentmode = INDIO_RING_TRIGGERED;
420 } else if (dev_info->modes & INDIO_RING_HARDWARE_BUFFER)
421 dev_info->currentmode = INDIO_RING_HARDWARE_BUFFER;
422 else { /* should never be reached */
427 if (ring->postenable) {
429 ret = ring->postenable(dev_info);
432 "Buffer not started:"
433 "postenable failed\n");
434 if (ring->access.unmark_in_use)
435 ring->access.unmark_in_use(ring);
436 dev_info->currentmode = previous_mode;
437 if (ring->postdisable)
438 ring->postdisable(dev_info);
443 if (ring->predisable) {
444 ret = ring->predisable(dev_info);
448 if (ring->access.unmark_in_use)
449 ring->access.unmark_in_use(ring);
450 dev_info->currentmode = INDIO_DIRECT_MODE;
451 if (ring->postdisable) {
452 ret = ring->postdisable(dev_info);
458 mutex_unlock(&dev_info->mlock);
462 mutex_unlock(&dev_info->mlock);
465 EXPORT_SYMBOL(iio_store_ring_enable);
466 ssize_t iio_show_ring_enable(struct device *dev,
467 struct device_attribute *attr,
470 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
471 return sprintf(buf, "%d\n", !!(ring->indio_dev->currentmode
472 & INDIO_ALL_RING_MODES));
474 EXPORT_SYMBOL(iio_show_ring_enable);
476 ssize_t iio_scan_el_show(struct device *dev,
477 struct device_attribute *attr,
481 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
482 struct iio_scan_el *this_el = to_iio_scan_el(attr);
484 ret = iio_scan_mask_query(ring, this_el->number);
487 return sprintf(buf, "%d\n", ret);
489 EXPORT_SYMBOL(iio_scan_el_show);
491 ssize_t iio_scan_el_store(struct device *dev,
492 struct device_attribute *attr,
498 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
499 struct iio_dev *indio_dev = ring->indio_dev;
500 struct iio_scan_el *this_el = to_iio_scan_el(attr);
502 state = !(buf[0] == '0');
503 mutex_lock(&indio_dev->mlock);
504 if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
508 ret = iio_scan_mask_query(ring, this_el->number);
512 ret = iio_scan_mask_clear(ring, this_el->number);
515 } else if (state && !ret) {
516 ret = iio_scan_mask_set(ring, this_el->number);
520 if (this_el->set_state)
521 ret = this_el->set_state(this_el, indio_dev, state);
523 mutex_unlock(&indio_dev->mlock);
525 return ret ? ret : len;
528 EXPORT_SYMBOL(iio_scan_el_store);
530 ssize_t iio_scan_el_ts_show(struct device *dev,
531 struct device_attribute *attr,
534 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
535 return sprintf(buf, "%d\n", ring->scan_timestamp);
537 EXPORT_SYMBOL(iio_scan_el_ts_show);
539 ssize_t iio_scan_el_ts_store(struct device *dev,
540 struct device_attribute *attr,
545 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
546 struct iio_dev *indio_dev = ring->indio_dev;
548 state = !(buf[0] == '0');
549 mutex_lock(&indio_dev->mlock);
550 if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
554 ring->scan_timestamp = state;
556 mutex_unlock(&indio_dev->mlock);
558 return ret ? ret : len;
560 EXPORT_SYMBOL(iio_scan_el_ts_store);