1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of buffer allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/device.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
25 #include <linux/iio/iio.h>
27 #include <linux/iio/sysfs.h>
28 #include <linux/iio/buffer.h>
30 static const char * const iio_endian_prefix[] = {
35 static bool iio_buffer_is_active(struct iio_buffer *buf)
37 return !list_empty(&buf->buffer_list);
40 static size_t iio_buffer_data_available(struct iio_buffer *buf)
42 return buf->access->data_available(buf);
45 static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
46 struct iio_buffer *buf, size_t required)
48 if (!indio_dev->info->hwfifo_flush_to_buffer)
51 return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
54 static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
55 size_t to_wait, int to_flush)
60 /* wakeup if the device was unregistered */
64 /* drain the buffer if it was disabled */
65 if (!iio_buffer_is_active(buf)) {
66 to_wait = min_t(size_t, to_wait, 1);
70 avail = iio_buffer_data_available(buf);
72 if (avail >= to_wait) {
73 /* force a flush for non-blocking reads */
74 if (!to_wait && avail < to_flush)
75 iio_buffer_flush_hwfifo(indio_dev, buf,
81 flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
86 if (avail + flushed >= to_wait)
93 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
94 * @filp: File structure pointer for the char device
95 * @buf: Destination buffer for iio buffer read
96 * @n: First n bytes to read
97 * @f_ps: Long offset provided by the user as a seek position
99 * This function relies on all buffer implementations having an
100 * iio_buffer as their first element.
102 * Return: negative values corresponding to error codes or ret != 0
103 * for ending the reading activity
105 ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
106 size_t n, loff_t *f_ps)
108 struct iio_dev *indio_dev = filp->private_data;
109 struct iio_buffer *rb = indio_dev->buffer;
110 DEFINE_WAIT_FUNC(wait, woken_wake_function);
115 if (!indio_dev->info)
118 if (!rb || !rb->access->read_first_n)
121 datum_size = rb->bytes_per_datum;
124 * If datum_size is 0 there will never be anything to read from the
125 * buffer, so signal end of file now.
130 if (filp->f_flags & O_NONBLOCK)
133 to_wait = min_t(size_t, n / datum_size, rb->watermark);
135 add_wait_queue(&rb->pollq, &wait);
137 if (!indio_dev->info) {
142 if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
143 if (signal_pending(current)) {
148 wait_woken(&wait, TASK_INTERRUPTIBLE,
149 MAX_SCHEDULE_TIMEOUT);
153 ret = rb->access->read_first_n(rb, n, buf);
154 if (ret == 0 && (filp->f_flags & O_NONBLOCK))
157 remove_wait_queue(&rb->pollq, &wait);
163 * iio_buffer_poll() - poll the buffer to find out if it has data
164 * @filp: File structure pointer for device access
165 * @wait: Poll table structure pointer for which the driver adds
168 * Return: (POLLIN | POLLRDNORM) if data is available for reading
169 * or 0 for other cases
171 unsigned int iio_buffer_poll(struct file *filp,
172 struct poll_table_struct *wait)
174 struct iio_dev *indio_dev = filp->private_data;
175 struct iio_buffer *rb = indio_dev->buffer;
177 if (!indio_dev->info)
180 poll_wait(filp, &rb->pollq, wait);
181 if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
182 return POLLIN | POLLRDNORM;
187 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
188 * @indio_dev: The IIO device
190 * Wakes up the event waitqueue used for poll(). Should usually
191 * be called when the device is unregistered.
193 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
195 if (!indio_dev->buffer)
198 wake_up(&indio_dev->buffer->pollq);
201 void iio_buffer_init(struct iio_buffer *buffer)
203 INIT_LIST_HEAD(&buffer->demux_list);
204 INIT_LIST_HEAD(&buffer->buffer_list);
205 init_waitqueue_head(&buffer->pollq);
206 kref_init(&buffer->ref);
207 if (!buffer->watermark)
208 buffer->watermark = 1;
210 EXPORT_SYMBOL(iio_buffer_init);
212 static ssize_t iio_show_scan_index(struct device *dev,
213 struct device_attribute *attr,
216 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
219 static ssize_t iio_show_fixed_type(struct device *dev,
220 struct device_attribute *attr,
223 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
224 u8 type = this_attr->c->scan_type.endianness;
226 if (type == IIO_CPU) {
227 #ifdef __LITTLE_ENDIAN
233 if (this_attr->c->scan_type.repeat > 1)
234 return sprintf(buf, "%s:%c%d/%dX%d>>%u\n",
235 iio_endian_prefix[type],
236 this_attr->c->scan_type.sign,
237 this_attr->c->scan_type.realbits,
238 this_attr->c->scan_type.storagebits,
239 this_attr->c->scan_type.repeat,
240 this_attr->c->scan_type.shift);
242 return sprintf(buf, "%s:%c%d/%d>>%u\n",
243 iio_endian_prefix[type],
244 this_attr->c->scan_type.sign,
245 this_attr->c->scan_type.realbits,
246 this_attr->c->scan_type.storagebits,
247 this_attr->c->scan_type.shift);
250 static ssize_t iio_scan_el_show(struct device *dev,
251 struct device_attribute *attr,
255 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
257 /* Ensure ret is 0 or 1. */
258 ret = !!test_bit(to_iio_dev_attr(attr)->address,
259 indio_dev->buffer->scan_mask);
261 return sprintf(buf, "%d\n", ret);
264 /* Note NULL used as error indicator as it doesn't make sense. */
265 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
266 unsigned int masklength,
267 const unsigned long *mask,
270 if (bitmap_empty(mask, masklength))
274 if (bitmap_equal(mask, av_masks, masklength))
277 if (bitmap_subset(mask, av_masks, masklength))
280 av_masks += BITS_TO_LONGS(masklength);
285 static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
286 const unsigned long *mask)
288 if (!indio_dev->setup_ops->validate_scan_mask)
291 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
295 * iio_scan_mask_set() - set particular bit in the scan mask
296 * @indio_dev: the iio device
297 * @buffer: the buffer whose scan mask we are interested in
298 * @bit: the bit to be set.
300 * Note that at this point we have no way of knowing what other
301 * buffers might request, hence this code only verifies that the
302 * individual buffers request is plausible.
304 static int iio_scan_mask_set(struct iio_dev *indio_dev,
305 struct iio_buffer *buffer, int bit)
307 const unsigned long *mask;
308 unsigned long *trialmask;
310 trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength),
313 if (trialmask == NULL)
315 if (!indio_dev->masklength) {
316 WARN(1, "Trying to set scanmask prior to registering buffer\n");
317 goto err_invalid_mask;
319 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
320 set_bit(bit, trialmask);
322 if (!iio_validate_scan_mask(indio_dev, trialmask))
323 goto err_invalid_mask;
325 if (indio_dev->available_scan_masks) {
326 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
327 indio_dev->masklength,
330 goto err_invalid_mask;
332 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
343 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
345 clear_bit(bit, buffer->scan_mask);
349 static ssize_t iio_scan_el_store(struct device *dev,
350 struct device_attribute *attr,
356 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
357 struct iio_buffer *buffer = indio_dev->buffer;
358 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
360 ret = strtobool(buf, &state);
363 mutex_lock(&indio_dev->mlock);
364 if (iio_buffer_is_active(indio_dev->buffer)) {
368 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
372 ret = iio_scan_mask_clear(buffer, this_attr->address);
375 } else if (state && !ret) {
376 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
382 mutex_unlock(&indio_dev->mlock);
384 return ret < 0 ? ret : len;
388 static ssize_t iio_scan_el_ts_show(struct device *dev,
389 struct device_attribute *attr,
392 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
393 return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
396 static ssize_t iio_scan_el_ts_store(struct device *dev,
397 struct device_attribute *attr,
402 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
405 ret = strtobool(buf, &state);
409 mutex_lock(&indio_dev->mlock);
410 if (iio_buffer_is_active(indio_dev->buffer)) {
414 indio_dev->buffer->scan_timestamp = state;
416 mutex_unlock(&indio_dev->mlock);
418 return ret ? ret : len;
421 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
422 const struct iio_chan_spec *chan)
424 int ret, attrcount = 0;
425 struct iio_buffer *buffer = indio_dev->buffer;
427 ret = __iio_add_chan_devattr("index",
429 &iio_show_scan_index,
434 &buffer->scan_el_dev_attr_list);
438 ret = __iio_add_chan_devattr("type",
440 &iio_show_fixed_type,
445 &buffer->scan_el_dev_attr_list);
449 if (chan->type != IIO_TIMESTAMP)
450 ret = __iio_add_chan_devattr("en",
457 &buffer->scan_el_dev_attr_list);
459 ret = __iio_add_chan_devattr("en",
461 &iio_scan_el_ts_show,
462 &iio_scan_el_ts_store,
466 &buffer->scan_el_dev_attr_list);
474 static ssize_t iio_buffer_read_length(struct device *dev,
475 struct device_attribute *attr,
478 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
479 struct iio_buffer *buffer = indio_dev->buffer;
481 return sprintf(buf, "%d\n", buffer->length);
484 static ssize_t iio_buffer_write_length(struct device *dev,
485 struct device_attribute *attr,
486 const char *buf, size_t len)
488 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
489 struct iio_buffer *buffer = indio_dev->buffer;
493 ret = kstrtouint(buf, 10, &val);
497 if (val == buffer->length)
500 mutex_lock(&indio_dev->mlock);
501 if (iio_buffer_is_active(indio_dev->buffer)) {
504 buffer->access->set_length(buffer, val);
509 if (buffer->length && buffer->length < buffer->watermark)
510 buffer->watermark = buffer->length;
512 mutex_unlock(&indio_dev->mlock);
514 return ret ? ret : len;
517 static ssize_t iio_buffer_show_enable(struct device *dev,
518 struct device_attribute *attr,
521 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
522 return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
525 static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
526 unsigned int scan_index)
528 const struct iio_chan_spec *ch;
531 ch = iio_find_channel_from_si(indio_dev, scan_index);
532 bytes = ch->scan_type.storagebits / 8;
533 if (ch->scan_type.repeat > 1)
534 bytes *= ch->scan_type.repeat;
538 static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
540 return iio_storage_bytes_for_si(indio_dev,
541 indio_dev->scan_index_timestamp);
544 static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
545 const unsigned long *mask, bool timestamp)
550 /* How much space will the demuxed element take? */
551 for_each_set_bit(i, mask,
552 indio_dev->masklength) {
553 length = iio_storage_bytes_for_si(indio_dev, i);
554 bytes = ALIGN(bytes, length);
559 length = iio_storage_bytes_for_timestamp(indio_dev);
560 bytes = ALIGN(bytes, length);
566 static void iio_buffer_activate(struct iio_dev *indio_dev,
567 struct iio_buffer *buffer)
569 iio_buffer_get(buffer);
570 list_add(&buffer->buffer_list, &indio_dev->buffer_list);
573 static void iio_buffer_deactivate(struct iio_buffer *buffer)
575 list_del_init(&buffer->buffer_list);
576 wake_up_interruptible(&buffer->pollq);
577 iio_buffer_put(buffer);
580 static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
582 struct iio_buffer *buffer, *_buffer;
584 list_for_each_entry_safe(buffer, _buffer,
585 &indio_dev->buffer_list, buffer_list)
586 iio_buffer_deactivate(buffer);
589 static int iio_buffer_enable(struct iio_buffer *buffer,
590 struct iio_dev *indio_dev)
592 if (!buffer->access->enable)
594 return buffer->access->enable(buffer, indio_dev);
597 static int iio_buffer_disable(struct iio_buffer *buffer,
598 struct iio_dev *indio_dev)
600 if (!buffer->access->disable)
602 return buffer->access->disable(buffer, indio_dev);
605 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
606 struct iio_buffer *buffer)
610 if (!buffer->access->set_bytes_per_datum)
613 bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
614 buffer->scan_timestamp);
616 buffer->access->set_bytes_per_datum(buffer, bytes);
619 static int iio_buffer_request_update(struct iio_dev *indio_dev,
620 struct iio_buffer *buffer)
624 iio_buffer_update_bytes_per_datum(indio_dev, buffer);
625 if (buffer->access->request_update) {
626 ret = buffer->access->request_update(buffer);
628 dev_dbg(&indio_dev->dev,
629 "Buffer not started: buffer parameter update failed (%d)\n",
638 static void iio_free_scan_mask(struct iio_dev *indio_dev,
639 const unsigned long *mask)
641 /* If the mask is dynamically allocated free it, otherwise do nothing */
642 if (!indio_dev->available_scan_masks)
646 struct iio_device_config {
648 unsigned int watermark;
649 const unsigned long *scan_mask;
650 unsigned int scan_bytes;
654 static int iio_verify_update(struct iio_dev *indio_dev,
655 struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
656 struct iio_device_config *config)
658 unsigned long *compound_mask;
659 const unsigned long *scan_mask;
660 bool strict_scanmask = false;
661 struct iio_buffer *buffer;
665 memset(config, 0, sizeof(*config));
666 config->watermark = ~0;
669 * If there is just one buffer and we are removing it there is nothing
672 if (remove_buffer && !insert_buffer &&
673 list_is_singular(&indio_dev->buffer_list))
676 modes = indio_dev->modes;
678 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
679 if (buffer == remove_buffer)
681 modes &= buffer->access->modes;
682 config->watermark = min(config->watermark, buffer->watermark);
686 modes &= insert_buffer->access->modes;
687 config->watermark = min(config->watermark,
688 insert_buffer->watermark);
691 /* Definitely possible for devices to support both of these. */
692 if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
693 config->mode = INDIO_BUFFER_TRIGGERED;
694 } else if (modes & INDIO_BUFFER_HARDWARE) {
696 * Keep things simple for now and only allow a single buffer to
697 * be connected in hardware mode.
699 if (insert_buffer && !list_empty(&indio_dev->buffer_list))
701 config->mode = INDIO_BUFFER_HARDWARE;
702 strict_scanmask = true;
703 } else if (modes & INDIO_BUFFER_SOFTWARE) {
704 config->mode = INDIO_BUFFER_SOFTWARE;
706 /* Can only occur on first buffer */
707 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
708 dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
712 /* What scan mask do we actually have? */
713 compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
714 sizeof(long), GFP_KERNEL);
715 if (compound_mask == NULL)
718 scan_timestamp = false;
720 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
721 if (buffer == remove_buffer)
723 bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
724 indio_dev->masklength);
725 scan_timestamp |= buffer->scan_timestamp;
729 bitmap_or(compound_mask, compound_mask,
730 insert_buffer->scan_mask, indio_dev->masklength);
731 scan_timestamp |= insert_buffer->scan_timestamp;
734 if (indio_dev->available_scan_masks) {
735 scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
736 indio_dev->masklength,
739 kfree(compound_mask);
740 if (scan_mask == NULL)
743 scan_mask = compound_mask;
746 config->scan_bytes = iio_compute_scan_bytes(indio_dev,
747 scan_mask, scan_timestamp);
748 config->scan_mask = scan_mask;
749 config->scan_timestamp = scan_timestamp;
754 static int iio_enable_buffers(struct iio_dev *indio_dev,
755 struct iio_device_config *config)
757 struct iio_buffer *buffer;
760 indio_dev->active_scan_mask = config->scan_mask;
761 indio_dev->scan_timestamp = config->scan_timestamp;
762 indio_dev->scan_bytes = config->scan_bytes;
764 iio_update_demux(indio_dev);
767 if (indio_dev->setup_ops->preenable) {
768 ret = indio_dev->setup_ops->preenable(indio_dev);
770 dev_dbg(&indio_dev->dev,
771 "Buffer not started: buffer preenable failed (%d)\n", ret);
772 goto err_undo_config;
776 if (indio_dev->info->update_scan_mode) {
777 ret = indio_dev->info
778 ->update_scan_mode(indio_dev,
779 indio_dev->active_scan_mask);
781 dev_dbg(&indio_dev->dev,
782 "Buffer not started: update scan mode failed (%d)\n",
784 goto err_run_postdisable;
788 if (indio_dev->info->hwfifo_set_watermark)
789 indio_dev->info->hwfifo_set_watermark(indio_dev,
792 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
793 ret = iio_buffer_enable(buffer, indio_dev);
795 goto err_disable_buffers;
798 indio_dev->currentmode = config->mode;
800 if (indio_dev->setup_ops->postenable) {
801 ret = indio_dev->setup_ops->postenable(indio_dev);
803 dev_dbg(&indio_dev->dev,
804 "Buffer not started: postenable failed (%d)\n", ret);
805 goto err_disable_buffers;
812 list_for_each_entry_continue_reverse(buffer, &indio_dev->buffer_list,
814 iio_buffer_disable(buffer, indio_dev);
816 indio_dev->currentmode = INDIO_DIRECT_MODE;
817 if (indio_dev->setup_ops->postdisable)
818 indio_dev->setup_ops->postdisable(indio_dev);
820 indio_dev->active_scan_mask = NULL;
825 static int iio_disable_buffers(struct iio_dev *indio_dev)
827 struct iio_buffer *buffer;
831 /* Wind down existing buffers - iff there are any */
832 if (list_empty(&indio_dev->buffer_list))
836 * If things go wrong at some step in disable we still need to continue
837 * to perform the other steps, otherwise we leave the device in a
838 * inconsistent state. We return the error code for the first error we
842 if (indio_dev->setup_ops->predisable) {
843 ret2 = indio_dev->setup_ops->predisable(indio_dev);
848 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
849 ret2 = iio_buffer_disable(buffer, indio_dev);
854 indio_dev->currentmode = INDIO_DIRECT_MODE;
856 if (indio_dev->setup_ops->postdisable) {
857 ret2 = indio_dev->setup_ops->postdisable(indio_dev);
862 iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
863 indio_dev->active_scan_mask = NULL;
868 static int __iio_update_buffers(struct iio_dev *indio_dev,
869 struct iio_buffer *insert_buffer,
870 struct iio_buffer *remove_buffer)
872 struct iio_device_config new_config;
875 ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
881 ret = iio_buffer_request_update(indio_dev, insert_buffer);
883 goto err_free_config;
886 ret = iio_disable_buffers(indio_dev);
888 goto err_deactivate_all;
891 iio_buffer_deactivate(remove_buffer);
893 iio_buffer_activate(indio_dev, insert_buffer);
895 /* If no buffers in list, we are done */
896 if (list_empty(&indio_dev->buffer_list))
899 ret = iio_enable_buffers(indio_dev, &new_config);
901 goto err_deactivate_all;
907 * We've already verified that the config is valid earlier. If things go
908 * wrong in either enable or disable the most likely reason is an IO
909 * error from the device. In this case there is no good recovery
910 * strategy. Just make sure to disable everything and leave the device
911 * in a sane state. With a bit of luck the device might come back to
912 * life again later and userspace can try again.
914 iio_buffer_deactivate_all(indio_dev);
917 iio_free_scan_mask(indio_dev, new_config.scan_mask);
921 int iio_update_buffers(struct iio_dev *indio_dev,
922 struct iio_buffer *insert_buffer,
923 struct iio_buffer *remove_buffer)
927 if (insert_buffer == remove_buffer)
930 mutex_lock(&indio_dev->info_exist_lock);
931 mutex_lock(&indio_dev->mlock);
933 if (insert_buffer && iio_buffer_is_active(insert_buffer))
934 insert_buffer = NULL;
936 if (remove_buffer && !iio_buffer_is_active(remove_buffer))
937 remove_buffer = NULL;
939 if (!insert_buffer && !remove_buffer) {
944 if (indio_dev->info == NULL) {
949 ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
952 mutex_unlock(&indio_dev->mlock);
953 mutex_unlock(&indio_dev->info_exist_lock);
957 EXPORT_SYMBOL_GPL(iio_update_buffers);
959 void iio_disable_all_buffers(struct iio_dev *indio_dev)
961 iio_disable_buffers(indio_dev);
962 iio_buffer_deactivate_all(indio_dev);
965 static ssize_t iio_buffer_store_enable(struct device *dev,
966 struct device_attribute *attr,
971 bool requested_state;
972 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
975 ret = strtobool(buf, &requested_state);
979 mutex_lock(&indio_dev->mlock);
981 /* Find out if it is in the list */
982 inlist = iio_buffer_is_active(indio_dev->buffer);
983 /* Already in desired state */
984 if (inlist == requested_state)
988 ret = __iio_update_buffers(indio_dev,
989 indio_dev->buffer, NULL);
991 ret = __iio_update_buffers(indio_dev,
992 NULL, indio_dev->buffer);
995 mutex_unlock(&indio_dev->mlock);
996 return (ret < 0) ? ret : len;
999 static const char * const iio_scan_elements_group_name = "scan_elements";
1001 static ssize_t iio_buffer_show_watermark(struct device *dev,
1002 struct device_attribute *attr,
1005 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1006 struct iio_buffer *buffer = indio_dev->buffer;
1008 return sprintf(buf, "%u\n", buffer->watermark);
1011 static ssize_t iio_buffer_store_watermark(struct device *dev,
1012 struct device_attribute *attr,
1016 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1017 struct iio_buffer *buffer = indio_dev->buffer;
1021 ret = kstrtouint(buf, 10, &val);
1027 mutex_lock(&indio_dev->mlock);
1029 if (val > buffer->length) {
1034 if (iio_buffer_is_active(indio_dev->buffer)) {
1039 buffer->watermark = val;
1041 mutex_unlock(&indio_dev->mlock);
1043 return ret ? ret : len;
1046 static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
1047 iio_buffer_write_length);
1048 static struct device_attribute dev_attr_length_ro = __ATTR(length,
1049 S_IRUGO, iio_buffer_read_length, NULL);
1050 static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
1051 iio_buffer_show_enable, iio_buffer_store_enable);
1052 static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
1053 iio_buffer_show_watermark, iio_buffer_store_watermark);
1054 static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark,
1055 S_IRUGO, iio_buffer_show_watermark, NULL);
1057 static struct attribute *iio_buffer_attrs[] = {
1058 &dev_attr_length.attr,
1059 &dev_attr_enable.attr,
1060 &dev_attr_watermark.attr,
1063 int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
1065 struct iio_dev_attr *p;
1066 struct attribute **attr;
1067 struct iio_buffer *buffer = indio_dev->buffer;
1068 int ret, i, attrn, attrcount, attrcount_orig = 0;
1069 const struct iio_chan_spec *channels;
1071 channels = indio_dev->channels;
1073 int ml = indio_dev->masklength;
1075 for (i = 0; i < indio_dev->num_channels; i++)
1076 ml = max(ml, channels[i].scan_index + 1);
1077 indio_dev->masklength = ml;
1084 if (buffer->attrs) {
1085 while (buffer->attrs[attrcount] != NULL)
1089 attr = kcalloc(attrcount + ARRAY_SIZE(iio_buffer_attrs) + 1,
1090 sizeof(struct attribute *), GFP_KERNEL);
1094 memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
1095 if (!buffer->access->set_length)
1096 attr[0] = &dev_attr_length_ro.attr;
1098 if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
1099 attr[2] = &dev_attr_watermark_ro.attr;
1102 memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs,
1103 sizeof(struct attribute *) * attrcount);
1105 attr[attrcount + ARRAY_SIZE(iio_buffer_attrs)] = NULL;
1107 buffer->buffer_group.name = "buffer";
1108 buffer->buffer_group.attrs = attr;
1110 indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group;
1112 if (buffer->scan_el_attrs != NULL) {
1113 attr = buffer->scan_el_attrs->attrs;
1114 while (*attr++ != NULL)
1117 attrcount = attrcount_orig;
1118 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
1119 channels = indio_dev->channels;
1122 for (i = 0; i < indio_dev->num_channels; i++) {
1123 if (channels[i].scan_index < 0)
1126 ret = iio_buffer_add_channel_sysfs(indio_dev,
1129 goto error_cleanup_dynamic;
1131 if (channels[i].type == IIO_TIMESTAMP)
1132 indio_dev->scan_index_timestamp =
1133 channels[i].scan_index;
1135 if (indio_dev->masklength && buffer->scan_mask == NULL) {
1136 buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
1137 sizeof(*buffer->scan_mask),
1139 if (buffer->scan_mask == NULL) {
1141 goto error_cleanup_dynamic;
1146 buffer->scan_el_group.name = iio_scan_elements_group_name;
1148 buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
1149 sizeof(buffer->scan_el_group.attrs[0]),
1151 if (buffer->scan_el_group.attrs == NULL) {
1153 goto error_free_scan_mask;
1155 if (buffer->scan_el_attrs)
1156 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
1157 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
1158 attrn = attrcount_orig;
1160 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
1161 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
1162 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
1166 error_free_scan_mask:
1167 kfree(buffer->scan_mask);
1168 error_cleanup_dynamic:
1169 iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
1170 kfree(indio_dev->buffer->buffer_group.attrs);
1175 void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
1177 if (!indio_dev->buffer)
1180 kfree(indio_dev->buffer->scan_mask);
1181 kfree(indio_dev->buffer->buffer_group.attrs);
1182 kfree(indio_dev->buffer->scan_el_group.attrs);
1183 iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
1187 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
1188 * @indio_dev: the iio device
1189 * @mask: scan mask to be checked
1191 * Return true if exactly one bit is set in the scan mask, false otherwise. It
1192 * can be used for devices where only one channel can be active for sampling at
1195 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
1196 const unsigned long *mask)
1198 return bitmap_weight(mask, indio_dev->masklength) == 1;
1200 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
1202 int iio_scan_mask_query(struct iio_dev *indio_dev,
1203 struct iio_buffer *buffer, int bit)
1205 if (bit > indio_dev->masklength)
1208 if (!buffer->scan_mask)
1211 /* Ensure return value is 0 or 1. */
1212 return !!test_bit(bit, buffer->scan_mask);
1214 EXPORT_SYMBOL_GPL(iio_scan_mask_query);
1217 * struct iio_demux_table - table describing demux memcpy ops
1218 * @from: index to copy from
1219 * @to: index to copy to
1220 * @length: how many bytes to copy
1221 * @l: list head used for management
1223 struct iio_demux_table {
1230 static const void *iio_demux(struct iio_buffer *buffer,
1233 struct iio_demux_table *t;
1235 if (list_empty(&buffer->demux_list))
1237 list_for_each_entry(t, &buffer->demux_list, l)
1238 memcpy(buffer->demux_bounce + t->to,
1239 datain + t->from, t->length);
1241 return buffer->demux_bounce;
1244 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
1246 const void *dataout = iio_demux(buffer, data);
1249 ret = buffer->access->store_to(buffer, dataout);
1254 * We can't just test for watermark to decide if we wake the poll queue
1255 * because read may request less samples than the watermark.
1257 wake_up_interruptible_poll(&buffer->pollq, POLLIN | POLLRDNORM);
1261 static void iio_buffer_demux_free(struct iio_buffer *buffer)
1263 struct iio_demux_table *p, *q;
1264 list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
1271 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
1274 struct iio_buffer *buf;
1276 list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
1277 ret = iio_push_to_buffer(buf, data);
1284 EXPORT_SYMBOL_GPL(iio_push_to_buffers);
1286 static int iio_buffer_add_demux(struct iio_buffer *buffer,
1287 struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
1288 unsigned int length)
1291 if (*p && (*p)->from + (*p)->length == in_loc &&
1292 (*p)->to + (*p)->length == out_loc) {
1293 (*p)->length += length;
1295 *p = kmalloc(sizeof(**p), GFP_KERNEL);
1298 (*p)->from = in_loc;
1300 (*p)->length = length;
1301 list_add_tail(&(*p)->l, &buffer->demux_list);
1307 static int iio_buffer_update_demux(struct iio_dev *indio_dev,
1308 struct iio_buffer *buffer)
1310 int ret, in_ind = -1, out_ind, length;
1311 unsigned in_loc = 0, out_loc = 0;
1312 struct iio_demux_table *p = NULL;
1314 /* Clear out any old demux */
1315 iio_buffer_demux_free(buffer);
1316 kfree(buffer->demux_bounce);
1317 buffer->demux_bounce = NULL;
1319 /* First work out which scan mode we will actually have */
1320 if (bitmap_equal(indio_dev->active_scan_mask,
1322 indio_dev->masklength))
1325 /* Now we have the two masks, work from least sig and build up sizes */
1326 for_each_set_bit(out_ind,
1328 indio_dev->masklength) {
1329 in_ind = find_next_bit(indio_dev->active_scan_mask,
1330 indio_dev->masklength,
1332 while (in_ind != out_ind) {
1333 in_ind = find_next_bit(indio_dev->active_scan_mask,
1334 indio_dev->masklength,
1336 length = iio_storage_bytes_for_si(indio_dev, in_ind);
1337 /* Make sure we are aligned */
1338 in_loc = roundup(in_loc, length) + length;
1340 length = iio_storage_bytes_for_si(indio_dev, in_ind);
1341 out_loc = roundup(out_loc, length);
1342 in_loc = roundup(in_loc, length);
1343 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1345 goto error_clear_mux_table;
1349 /* Relies on scan_timestamp being last */
1350 if (buffer->scan_timestamp) {
1351 length = iio_storage_bytes_for_timestamp(indio_dev);
1352 out_loc = roundup(out_loc, length);
1353 in_loc = roundup(in_loc, length);
1354 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1356 goto error_clear_mux_table;
1360 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
1361 if (buffer->demux_bounce == NULL) {
1363 goto error_clear_mux_table;
1367 error_clear_mux_table:
1368 iio_buffer_demux_free(buffer);
1373 int iio_update_demux(struct iio_dev *indio_dev)
1375 struct iio_buffer *buffer;
1378 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
1379 ret = iio_buffer_update_demux(indio_dev, buffer);
1381 goto error_clear_mux_table;
1385 error_clear_mux_table:
1386 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
1387 iio_buffer_demux_free(buffer);
1391 EXPORT_SYMBOL_GPL(iio_update_demux);
1394 * iio_buffer_release() - Free a buffer's resources
1395 * @ref: Pointer to the kref embedded in the iio_buffer struct
1397 * This function is called when the last reference to the buffer has been
1398 * dropped. It will typically free all resources allocated by the buffer. Do not
1399 * call this function manually, always use iio_buffer_put() when done using a
1402 static void iio_buffer_release(struct kref *ref)
1404 struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1406 buffer->access->release(buffer);
1410 * iio_buffer_get() - Grab a reference to the buffer
1411 * @buffer: The buffer to grab a reference for, may be NULL
1413 * Returns the pointer to the buffer that was passed into the function.
1415 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1418 kref_get(&buffer->ref);
1422 EXPORT_SYMBOL_GPL(iio_buffer_get);
1425 * iio_buffer_put() - Release the reference to the buffer
1426 * @buffer: The buffer to release the reference for, may be NULL
1428 void iio_buffer_put(struct iio_buffer *buffer)
1431 kref_put(&buffer->ref, iio_buffer_release);
1433 EXPORT_SYMBOL_GPL(iio_buffer_put);