1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of buffer allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/device.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
25 #include <linux/iio/iio.h>
27 #include <linux/iio/sysfs.h>
28 #include <linux/iio/buffer.h>
30 static const char * const iio_endian_prefix[] = {
35 static bool iio_buffer_is_active(struct iio_buffer *buf)
37 return !list_empty(&buf->buffer_list);
40 static bool iio_buffer_data_available(struct iio_buffer *buf)
42 return buf->access->data_available(buf);
46 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
48 * This function relies on all buffer implementations having an
49 * iio_buffer as their first element.
51 ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
52 size_t n, loff_t *f_ps)
54 struct iio_dev *indio_dev = filp->private_data;
55 struct iio_buffer *rb = indio_dev->buffer;
61 if (!rb || !rb->access->read_first_n)
65 if (!iio_buffer_data_available(rb)) {
66 if (filp->f_flags & O_NONBLOCK)
69 ret = wait_event_interruptible(rb->pollq,
70 iio_buffer_data_available(rb) ||
71 indio_dev->info == NULL);
74 if (indio_dev->info == NULL)
78 ret = rb->access->read_first_n(rb, n, buf);
79 if (ret == 0 && (filp->f_flags & O_NONBLOCK))
87 * iio_buffer_poll() - poll the buffer to find out if it has data
89 unsigned int iio_buffer_poll(struct file *filp,
90 struct poll_table_struct *wait)
92 struct iio_dev *indio_dev = filp->private_data;
93 struct iio_buffer *rb = indio_dev->buffer;
98 poll_wait(filp, &rb->pollq, wait);
99 if (iio_buffer_data_available(rb))
100 return POLLIN | POLLRDNORM;
101 /* need a way of knowing if there may be enough data... */
106 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
107 * @indio_dev: The IIO device
109 * Wakes up the event waitqueue used for poll(). Should usually
110 * be called when the device is unregistered.
112 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
114 if (!indio_dev->buffer)
117 wake_up(&indio_dev->buffer->pollq);
120 void iio_buffer_init(struct iio_buffer *buffer)
122 INIT_LIST_HEAD(&buffer->demux_list);
123 INIT_LIST_HEAD(&buffer->buffer_list);
124 init_waitqueue_head(&buffer->pollq);
125 kref_init(&buffer->ref);
127 EXPORT_SYMBOL(iio_buffer_init);
129 static ssize_t iio_show_scan_index(struct device *dev,
130 struct device_attribute *attr,
133 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
136 static ssize_t iio_show_fixed_type(struct device *dev,
137 struct device_attribute *attr,
140 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
141 u8 type = this_attr->c->scan_type.endianness;
143 if (type == IIO_CPU) {
144 #ifdef __LITTLE_ENDIAN
150 if (this_attr->c->scan_type.repeat > 1)
151 return sprintf(buf, "%s:%c%d/%dX%d>>%u\n",
152 iio_endian_prefix[type],
153 this_attr->c->scan_type.sign,
154 this_attr->c->scan_type.realbits,
155 this_attr->c->scan_type.storagebits,
156 this_attr->c->scan_type.repeat,
157 this_attr->c->scan_type.shift);
159 return sprintf(buf, "%s:%c%d/%d>>%u\n",
160 iio_endian_prefix[type],
161 this_attr->c->scan_type.sign,
162 this_attr->c->scan_type.realbits,
163 this_attr->c->scan_type.storagebits,
164 this_attr->c->scan_type.shift);
167 static ssize_t iio_scan_el_show(struct device *dev,
168 struct device_attribute *attr,
172 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
174 /* Ensure ret is 0 or 1. */
175 ret = !!test_bit(to_iio_dev_attr(attr)->address,
176 indio_dev->buffer->scan_mask);
178 return sprintf(buf, "%d\n", ret);
181 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
183 clear_bit(bit, buffer->scan_mask);
187 static ssize_t iio_scan_el_store(struct device *dev,
188 struct device_attribute *attr,
194 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
195 struct iio_buffer *buffer = indio_dev->buffer;
196 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
198 ret = strtobool(buf, &state);
201 mutex_lock(&indio_dev->mlock);
202 if (iio_buffer_is_active(indio_dev->buffer)) {
206 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
210 ret = iio_scan_mask_clear(buffer, this_attr->address);
213 } else if (state && !ret) {
214 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
220 mutex_unlock(&indio_dev->mlock);
222 return ret < 0 ? ret : len;
226 static ssize_t iio_scan_el_ts_show(struct device *dev,
227 struct device_attribute *attr,
230 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
231 return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
234 static ssize_t iio_scan_el_ts_store(struct device *dev,
235 struct device_attribute *attr,
240 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
243 ret = strtobool(buf, &state);
247 mutex_lock(&indio_dev->mlock);
248 if (iio_buffer_is_active(indio_dev->buffer)) {
252 indio_dev->buffer->scan_timestamp = state;
254 mutex_unlock(&indio_dev->mlock);
256 return ret ? ret : len;
259 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
260 const struct iio_chan_spec *chan)
262 int ret, attrcount = 0;
263 struct iio_buffer *buffer = indio_dev->buffer;
265 ret = __iio_add_chan_devattr("index",
267 &iio_show_scan_index,
272 &buffer->scan_el_dev_attr_list);
276 ret = __iio_add_chan_devattr("type",
278 &iio_show_fixed_type,
283 &buffer->scan_el_dev_attr_list);
287 if (chan->type != IIO_TIMESTAMP)
288 ret = __iio_add_chan_devattr("en",
295 &buffer->scan_el_dev_attr_list);
297 ret = __iio_add_chan_devattr("en",
299 &iio_scan_el_ts_show,
300 &iio_scan_el_ts_store,
304 &buffer->scan_el_dev_attr_list);
312 static const char * const iio_scan_elements_group_name = "scan_elements";
314 int iio_buffer_register(struct iio_dev *indio_dev,
315 const struct iio_chan_spec *channels,
318 struct iio_dev_attr *p;
319 struct attribute **attr;
320 struct iio_buffer *buffer = indio_dev->buffer;
321 int ret, i, attrn, attrcount, attrcount_orig = 0;
324 indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
326 if (buffer->scan_el_attrs != NULL) {
327 attr = buffer->scan_el_attrs->attrs;
328 while (*attr++ != NULL)
331 attrcount = attrcount_orig;
332 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
335 for (i = 0; i < num_channels; i++) {
336 if (channels[i].scan_index < 0)
339 /* Establish necessary mask length */
340 if (channels[i].scan_index >
341 (int)indio_dev->masklength - 1)
342 indio_dev->masklength
343 = channels[i].scan_index + 1;
345 ret = iio_buffer_add_channel_sysfs(indio_dev,
348 goto error_cleanup_dynamic;
350 if (channels[i].type == IIO_TIMESTAMP)
351 indio_dev->scan_index_timestamp =
352 channels[i].scan_index;
354 if (indio_dev->masklength && buffer->scan_mask == NULL) {
355 buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
356 sizeof(*buffer->scan_mask),
358 if (buffer->scan_mask == NULL) {
360 goto error_cleanup_dynamic;
365 buffer->scan_el_group.name = iio_scan_elements_group_name;
367 buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
368 sizeof(buffer->scan_el_group.attrs[0]),
370 if (buffer->scan_el_group.attrs == NULL) {
372 goto error_free_scan_mask;
374 if (buffer->scan_el_attrs)
375 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
376 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
377 attrn = attrcount_orig;
379 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
380 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
381 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
385 error_free_scan_mask:
386 kfree(buffer->scan_mask);
387 error_cleanup_dynamic:
388 iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
392 EXPORT_SYMBOL(iio_buffer_register);
394 void iio_buffer_unregister(struct iio_dev *indio_dev)
396 kfree(indio_dev->buffer->scan_mask);
397 kfree(indio_dev->buffer->scan_el_group.attrs);
398 iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
400 EXPORT_SYMBOL(iio_buffer_unregister);
402 ssize_t iio_buffer_read_length(struct device *dev,
403 struct device_attribute *attr,
406 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
407 struct iio_buffer *buffer = indio_dev->buffer;
409 if (buffer->access->get_length)
410 return sprintf(buf, "%d\n",
411 buffer->access->get_length(buffer));
415 EXPORT_SYMBOL(iio_buffer_read_length);
417 ssize_t iio_buffer_write_length(struct device *dev,
418 struct device_attribute *attr,
422 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
423 struct iio_buffer *buffer = indio_dev->buffer;
427 ret = kstrtouint(buf, 10, &val);
431 if (buffer->access->get_length)
432 if (val == buffer->access->get_length(buffer))
435 mutex_lock(&indio_dev->mlock);
436 if (iio_buffer_is_active(indio_dev->buffer)) {
439 if (buffer->access->set_length)
440 buffer->access->set_length(buffer, val);
443 mutex_unlock(&indio_dev->mlock);
445 return ret ? ret : len;
447 EXPORT_SYMBOL(iio_buffer_write_length);
449 ssize_t iio_buffer_show_enable(struct device *dev,
450 struct device_attribute *attr,
453 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
454 return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
456 EXPORT_SYMBOL(iio_buffer_show_enable);
458 /* Note NULL used as error indicator as it doesn't make sense. */
459 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
460 unsigned int masklength,
461 const unsigned long *mask)
463 if (bitmap_empty(mask, masklength))
466 if (bitmap_subset(mask, av_masks, masklength))
468 av_masks += BITS_TO_LONGS(masklength);
473 static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
474 const unsigned long *mask, bool timestamp)
476 const struct iio_chan_spec *ch;
480 /* How much space will the demuxed element take? */
481 for_each_set_bit(i, mask,
482 indio_dev->masklength) {
483 ch = iio_find_channel_from_si(indio_dev, i);
484 if (ch->scan_type.repeat > 1)
485 length = ch->scan_type.storagebits / 8 *
486 ch->scan_type.repeat;
488 length = ch->scan_type.storagebits / 8;
489 bytes = ALIGN(bytes, length);
493 ch = iio_find_channel_from_si(indio_dev,
494 indio_dev->scan_index_timestamp);
495 if (ch->scan_type.repeat > 1)
496 length = ch->scan_type.storagebits / 8 *
497 ch->scan_type.repeat;
499 length = ch->scan_type.storagebits / 8;
500 bytes = ALIGN(bytes, length);
506 static void iio_buffer_activate(struct iio_dev *indio_dev,
507 struct iio_buffer *buffer)
509 iio_buffer_get(buffer);
510 list_add(&buffer->buffer_list, &indio_dev->buffer_list);
513 static void iio_buffer_deactivate(struct iio_buffer *buffer)
515 list_del_init(&buffer->buffer_list);
516 iio_buffer_put(buffer);
519 void iio_disable_all_buffers(struct iio_dev *indio_dev)
521 struct iio_buffer *buffer, *_buffer;
523 if (list_empty(&indio_dev->buffer_list))
526 if (indio_dev->setup_ops->predisable)
527 indio_dev->setup_ops->predisable(indio_dev);
529 list_for_each_entry_safe(buffer, _buffer,
530 &indio_dev->buffer_list, buffer_list)
531 iio_buffer_deactivate(buffer);
533 indio_dev->currentmode = INDIO_DIRECT_MODE;
534 if (indio_dev->setup_ops->postdisable)
535 indio_dev->setup_ops->postdisable(indio_dev);
537 if (indio_dev->available_scan_masks == NULL)
538 kfree(indio_dev->active_scan_mask);
541 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
542 struct iio_buffer *buffer)
546 if (!buffer->access->set_bytes_per_datum)
549 bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
550 buffer->scan_timestamp);
552 buffer->access->set_bytes_per_datum(buffer, bytes);
555 static int __iio_update_buffers(struct iio_dev *indio_dev,
556 struct iio_buffer *insert_buffer,
557 struct iio_buffer *remove_buffer)
561 struct iio_buffer *buffer;
562 unsigned long *compound_mask;
563 const unsigned long *old_mask;
565 /* Wind down existing buffers - iff there are any */
566 if (!list_empty(&indio_dev->buffer_list)) {
567 if (indio_dev->setup_ops->predisable) {
568 ret = indio_dev->setup_ops->predisable(indio_dev);
572 indio_dev->currentmode = INDIO_DIRECT_MODE;
573 if (indio_dev->setup_ops->postdisable) {
574 ret = indio_dev->setup_ops->postdisable(indio_dev);
579 /* Keep a copy of current setup to allow roll back */
580 old_mask = indio_dev->active_scan_mask;
581 if (!indio_dev->available_scan_masks)
582 indio_dev->active_scan_mask = NULL;
585 iio_buffer_deactivate(remove_buffer);
587 iio_buffer_activate(indio_dev, insert_buffer);
589 /* If no buffers in list, we are done */
590 if (list_empty(&indio_dev->buffer_list)) {
591 indio_dev->currentmode = INDIO_DIRECT_MODE;
592 if (indio_dev->available_scan_masks == NULL)
597 /* What scan mask do we actually have? */
598 compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
599 sizeof(long), GFP_KERNEL);
600 if (compound_mask == NULL) {
601 if (indio_dev->available_scan_masks == NULL)
605 indio_dev->scan_timestamp = 0;
607 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
608 bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
609 indio_dev->masklength);
610 indio_dev->scan_timestamp |= buffer->scan_timestamp;
612 if (indio_dev->available_scan_masks) {
613 indio_dev->active_scan_mask =
614 iio_scan_mask_match(indio_dev->available_scan_masks,
615 indio_dev->masklength,
617 if (indio_dev->active_scan_mask == NULL) {
620 * Note can only occur when adding a buffer.
622 iio_buffer_deactivate(insert_buffer);
624 indio_dev->active_scan_mask = old_mask;
628 kfree(compound_mask);
634 indio_dev->active_scan_mask = compound_mask;
637 iio_update_demux(indio_dev);
640 if (indio_dev->setup_ops->preenable) {
641 ret = indio_dev->setup_ops->preenable(indio_dev);
644 "Buffer not started: buffer preenable failed (%d)\n", ret);
645 goto error_remove_inserted;
648 indio_dev->scan_bytes =
649 iio_compute_scan_bytes(indio_dev,
650 indio_dev->active_scan_mask,
651 indio_dev->scan_timestamp);
652 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
653 iio_buffer_update_bytes_per_datum(indio_dev, buffer);
654 if (buffer->access->request_update) {
655 ret = buffer->access->request_update(buffer);
658 "Buffer not started: buffer parameter update failed (%d)\n", ret);
659 goto error_run_postdisable;
663 if (indio_dev->info->update_scan_mode) {
664 ret = indio_dev->info
665 ->update_scan_mode(indio_dev,
666 indio_dev->active_scan_mask);
668 printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
669 goto error_run_postdisable;
672 /* Definitely possible for devices to support both of these. */
673 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
674 if (!indio_dev->trig) {
675 printk(KERN_INFO "Buffer not started: no trigger\n");
677 /* Can only occur on first buffer */
678 goto error_run_postdisable;
680 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
681 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
682 indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
683 } else { /* Should never be reached */
685 goto error_run_postdisable;
688 if (indio_dev->setup_ops->postenable) {
689 ret = indio_dev->setup_ops->postenable(indio_dev);
692 "Buffer not started: postenable failed (%d)\n", ret);
693 indio_dev->currentmode = INDIO_DIRECT_MODE;
694 if (indio_dev->setup_ops->postdisable)
695 indio_dev->setup_ops->postdisable(indio_dev);
696 goto error_disable_all_buffers;
700 if (indio_dev->available_scan_masks)
701 kfree(compound_mask);
707 error_disable_all_buffers:
708 indio_dev->currentmode = INDIO_DIRECT_MODE;
709 error_run_postdisable:
710 if (indio_dev->setup_ops->postdisable)
711 indio_dev->setup_ops->postdisable(indio_dev);
712 error_remove_inserted:
714 iio_buffer_deactivate(insert_buffer);
715 indio_dev->active_scan_mask = old_mask;
716 kfree(compound_mask);
720 int iio_update_buffers(struct iio_dev *indio_dev,
721 struct iio_buffer *insert_buffer,
722 struct iio_buffer *remove_buffer)
726 if (insert_buffer == remove_buffer)
729 mutex_lock(&indio_dev->info_exist_lock);
730 mutex_lock(&indio_dev->mlock);
732 if (insert_buffer && iio_buffer_is_active(insert_buffer))
733 insert_buffer = NULL;
735 if (remove_buffer && !iio_buffer_is_active(remove_buffer))
736 remove_buffer = NULL;
738 if (!insert_buffer && !remove_buffer) {
743 if (indio_dev->info == NULL) {
748 ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
751 mutex_unlock(&indio_dev->mlock);
752 mutex_unlock(&indio_dev->info_exist_lock);
756 EXPORT_SYMBOL_GPL(iio_update_buffers);
758 ssize_t iio_buffer_store_enable(struct device *dev,
759 struct device_attribute *attr,
764 bool requested_state;
765 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
768 ret = strtobool(buf, &requested_state);
772 mutex_lock(&indio_dev->mlock);
774 /* Find out if it is in the list */
775 inlist = iio_buffer_is_active(indio_dev->buffer);
776 /* Already in desired state */
777 if (inlist == requested_state)
781 ret = __iio_update_buffers(indio_dev,
782 indio_dev->buffer, NULL);
784 ret = __iio_update_buffers(indio_dev,
785 NULL, indio_dev->buffer);
790 mutex_unlock(&indio_dev->mlock);
791 return (ret < 0) ? ret : len;
793 EXPORT_SYMBOL(iio_buffer_store_enable);
796 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
797 * @indio_dev: the iio device
798 * @mask: scan mask to be checked
800 * Return true if exactly one bit is set in the scan mask, false otherwise. It
801 * can be used for devices where only one channel can be active for sampling at
804 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
805 const unsigned long *mask)
807 return bitmap_weight(mask, indio_dev->masklength) == 1;
809 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
811 static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
812 const unsigned long *mask)
814 if (!indio_dev->setup_ops->validate_scan_mask)
817 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
821 * iio_scan_mask_set() - set particular bit in the scan mask
822 * @indio_dev: the iio device
823 * @buffer: the buffer whose scan mask we are interested in
824 * @bit: the bit to be set.
826 * Note that at this point we have no way of knowing what other
827 * buffers might request, hence this code only verifies that the
828 * individual buffers request is plausible.
830 int iio_scan_mask_set(struct iio_dev *indio_dev,
831 struct iio_buffer *buffer, int bit)
833 const unsigned long *mask;
834 unsigned long *trialmask;
836 trialmask = kmalloc(sizeof(*trialmask)*
837 BITS_TO_LONGS(indio_dev->masklength),
840 if (trialmask == NULL)
842 if (!indio_dev->masklength) {
843 WARN_ON("Trying to set scanmask prior to registering buffer\n");
844 goto err_invalid_mask;
846 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
847 set_bit(bit, trialmask);
849 if (!iio_validate_scan_mask(indio_dev, trialmask))
850 goto err_invalid_mask;
852 if (indio_dev->available_scan_masks) {
853 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
854 indio_dev->masklength,
857 goto err_invalid_mask;
859 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
869 EXPORT_SYMBOL_GPL(iio_scan_mask_set);
871 int iio_scan_mask_query(struct iio_dev *indio_dev,
872 struct iio_buffer *buffer, int bit)
874 if (bit > indio_dev->masklength)
877 if (!buffer->scan_mask)
880 /* Ensure return value is 0 or 1. */
881 return !!test_bit(bit, buffer->scan_mask);
883 EXPORT_SYMBOL_GPL(iio_scan_mask_query);
886 * struct iio_demux_table() - table describing demux memcpy ops
887 * @from: index to copy from
888 * @to: index to copy to
889 * @length: how many bytes to copy
890 * @l: list head used for management
892 struct iio_demux_table {
899 static const void *iio_demux(struct iio_buffer *buffer,
902 struct iio_demux_table *t;
904 if (list_empty(&buffer->demux_list))
906 list_for_each_entry(t, &buffer->demux_list, l)
907 memcpy(buffer->demux_bounce + t->to,
908 datain + t->from, t->length);
910 return buffer->demux_bounce;
913 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
915 const void *dataout = iio_demux(buffer, data);
917 return buffer->access->store_to(buffer, dataout);
920 static void iio_buffer_demux_free(struct iio_buffer *buffer)
922 struct iio_demux_table *p, *q;
923 list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
930 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
933 struct iio_buffer *buf;
935 list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
936 ret = iio_push_to_buffer(buf, data);
943 EXPORT_SYMBOL_GPL(iio_push_to_buffers);
945 static int iio_buffer_add_demux(struct iio_buffer *buffer,
946 struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
950 if (*p && (*p)->from + (*p)->length == in_loc &&
951 (*p)->to + (*p)->length == out_loc) {
952 (*p)->length += length;
954 *p = kmalloc(sizeof(**p), GFP_KERNEL);
959 (*p)->length = length;
960 list_add_tail(&(*p)->l, &buffer->demux_list);
966 static int iio_buffer_update_demux(struct iio_dev *indio_dev,
967 struct iio_buffer *buffer)
969 const struct iio_chan_spec *ch;
970 int ret, in_ind = -1, out_ind, length;
971 unsigned in_loc = 0, out_loc = 0;
972 struct iio_demux_table *p = NULL;
974 /* Clear out any old demux */
975 iio_buffer_demux_free(buffer);
976 kfree(buffer->demux_bounce);
977 buffer->demux_bounce = NULL;
979 /* First work out which scan mode we will actually have */
980 if (bitmap_equal(indio_dev->active_scan_mask,
982 indio_dev->masklength))
985 /* Now we have the two masks, work from least sig and build up sizes */
986 for_each_set_bit(out_ind,
988 indio_dev->masklength) {
989 in_ind = find_next_bit(indio_dev->active_scan_mask,
990 indio_dev->masklength,
992 while (in_ind != out_ind) {
993 in_ind = find_next_bit(indio_dev->active_scan_mask,
994 indio_dev->masklength,
996 ch = iio_find_channel_from_si(indio_dev, in_ind);
997 if (ch->scan_type.repeat > 1)
998 length = ch->scan_type.storagebits / 8 *
999 ch->scan_type.repeat;
1001 length = ch->scan_type.storagebits / 8;
1002 /* Make sure we are aligned */
1003 in_loc = roundup(in_loc, length) + length;
1005 ch = iio_find_channel_from_si(indio_dev, in_ind);
1006 if (ch->scan_type.repeat > 1)
1007 length = ch->scan_type.storagebits / 8 *
1008 ch->scan_type.repeat;
1010 length = ch->scan_type.storagebits / 8;
1011 out_loc = roundup(out_loc, length);
1012 in_loc = roundup(in_loc, length);
1013 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1015 goto error_clear_mux_table;
1019 /* Relies on scan_timestamp being last */
1020 if (buffer->scan_timestamp) {
1021 ch = iio_find_channel_from_si(indio_dev,
1022 indio_dev->scan_index_timestamp);
1023 if (ch->scan_type.repeat > 1)
1024 length = ch->scan_type.storagebits / 8 *
1025 ch->scan_type.repeat;
1027 length = ch->scan_type.storagebits / 8;
1028 out_loc = roundup(out_loc, length);
1029 in_loc = roundup(in_loc, length);
1030 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1032 goto error_clear_mux_table;
1036 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
1037 if (buffer->demux_bounce == NULL) {
1039 goto error_clear_mux_table;
1043 error_clear_mux_table:
1044 iio_buffer_demux_free(buffer);
1049 int iio_update_demux(struct iio_dev *indio_dev)
1051 struct iio_buffer *buffer;
1054 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
1055 ret = iio_buffer_update_demux(indio_dev, buffer);
1057 goto error_clear_mux_table;
1061 error_clear_mux_table:
1062 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
1063 iio_buffer_demux_free(buffer);
1067 EXPORT_SYMBOL_GPL(iio_update_demux);
1070 * iio_buffer_release() - Free a buffer's resources
1071 * @ref: Pointer to the kref embedded in the iio_buffer struct
1073 * This function is called when the last reference to the buffer has been
1074 * dropped. It will typically free all resources allocated by the buffer. Do not
1075 * call this function manually, always use iio_buffer_put() when done using a
1078 static void iio_buffer_release(struct kref *ref)
1080 struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1082 buffer->access->release(buffer);
1086 * iio_buffer_get() - Grab a reference to the buffer
1087 * @buffer: The buffer to grab a reference for, may be NULL
1089 * Returns the pointer to the buffer that was passed into the function.
1091 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1094 kref_get(&buffer->ref);
1098 EXPORT_SYMBOL_GPL(iio_buffer_get);
1101 * iio_buffer_put() - Release the reference to the buffer
1102 * @buffer: The buffer to release the reference for, may be NULL
1104 void iio_buffer_put(struct iio_buffer *buffer)
1107 kref_put(&buffer->ref, iio_buffer_release);
1109 EXPORT_SYMBOL_GPL(iio_buffer_put);