#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/fs.h>
-#include <linux/poll.h>
#include <linux/cdev.h>
#include <linux/slab.h>
size_t count, loff_t *f_ps)
{
struct iio_ring_buffer *rb = filp->private_data;
- int ret, dead_offset, copied;
- u8 *data;
+ int ret, dead_offset;
+
/* rip lots must exist. */
if (!rb->access.rip_lots)
return -EINVAL;
- copied = rb->access.rip_lots(rb, count, &data, &dead_offset);
+ ret = rb->access.rip_lots(rb, count, buf, &dead_offset);
- if (copied <= 0) {
- ret = copied;
- goto error_ret;
- }
- if (copy_to_user(buf, data + dead_offset, copied)) {
- ret = -EFAULT;
- goto error_free_data_cpy;
- }
- /* In clever ring buffer designs this may not need to be freed.
- * When such a design exists I'll add this to ring access funcs.
- */
- kfree(data);
-
- return copied;
-
-error_free_data_cpy:
- kfree(data);
-error_ret:
return ret;
}
#include <linux/module.h>
#include <linux/device.h>
#include <linux/workqueue.h>
+#include <linux/poll.h>
#include "ring_sw.h"
#include "trigger.h"
}
int iio_rip_sw_rb(struct iio_ring_buffer *r,
- size_t count, u8 **data, int *dead_offset)
+ size_t count, char __user *buf, int *dead_offset)
{
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p;
+ u8 *data;
int ret, max_copied;
int bytes_to_rip;
/* Limit size to whole of ring buffer */
bytes_to_rip = min((size_t)(ring->buf.bytes_per_datum*ring->buf.length), count);
- *data = kmalloc(bytes_to_rip, GFP_KERNEL);
- if (*data == NULL) {
+ data = kmalloc(bytes_to_rip, GFP_KERNEL);
+ if (data == NULL) {
ret = -ENOMEM;
goto error_ret;
}
if (initial_write_p >= initial_read_p + bytes_to_rip) {
/* write_p is greater than necessary, all is easy */
max_copied = bytes_to_rip;
- memcpy(*data, initial_read_p, max_copied);
+ memcpy(data, initial_read_p, max_copied);
end_read_p = initial_read_p + max_copied;
} else if (initial_write_p > initial_read_p) {
/*not enough data to cpy */
max_copied = initial_write_p - initial_read_p;
- memcpy(*data, initial_read_p, max_copied);
+ memcpy(data, initial_read_p, max_copied);
end_read_p = initial_write_p;
} else {
/* going through 'end' of ring buffer */
max_copied = ring->data
+ ring->buf.length*ring->buf.bytes_per_datum - initial_read_p;
- memcpy(*data, initial_read_p, max_copied);
+ memcpy(data, initial_read_p, max_copied);
/* possible we are done if we align precisely with end */
if (max_copied == bytes_to_rip)
end_read_p = ring->data;
else if (initial_write_p
> ring->data + bytes_to_rip - max_copied) {
/* enough data to finish */
- memcpy(*data + max_copied, ring->data,
+ memcpy(data + max_copied, ring->data,
bytes_to_rip - max_copied);
max_copied = bytes_to_rip;
end_read_p = ring->data + (bytes_to_rip - max_copied);
} else { /* not enough data */
- memcpy(*data + max_copied, ring->data,
+ memcpy(data + max_copied, ring->data,
initial_write_p - ring->data);
max_copied += initial_write_p - ring->data;
end_read_p = initial_write_p;
while (ring->read_p != end_read_p)
ring->read_p = end_read_p;
- return max_copied - *dead_offset;
+ ret = max_copied - *dead_offset;
+ if (copy_to_user(buf, data + *dead_offset, ret)) {
+ ret = -EFAULT;
+ goto error_free_data_cpy;
+ }
error_free_data_cpy:
- kfree(*data);
+ kfree(data);
error_ret:
+
return ret;
}
EXPORT_SYMBOL(iio_rip_sw_rb);