/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
+ * Make the buffer large enough for one 16 bit sample and one 64 bit
+ * aligned 64 bit timestamp.
*/
- unsigned char data[2] ____cacheline_aligned;
+ unsigned char data[ALIGN(2, sizeof(s64)) + sizeof(s64)]
+ ____cacheline_aligned;
};
enum ad7476_supported_device_ids {
struct iio_dev *indio_dev = pf->indio_dev;
struct ad7476_state *st = iio_priv(indio_dev);
s64 time_ns;
- __u8 *rxbuf;
int b_sent;
- rxbuf = kzalloc(indio_dev->scan_bytes, GFP_KERNEL);
- if (rxbuf == NULL)
- goto done;
-
- b_sent = spi_read(st->spi, rxbuf,
- st->chip_info->channel[0].scan_type.storagebits / 8);
+ b_sent = spi_sync(st->spi, &st->msg);
if (b_sent < 0)
goto done;
time_ns = iio_get_time_ns();
if (indio_dev->scan_timestamp)
- memcpy(rxbuf + indio_dev->scan_bytes - sizeof(s64),
- &time_ns, sizeof(time_ns));
+ ((s64 *)st->data)[1] = time_ns;
- iio_push_to_buffer(indio_dev->buffer, rxbuf);
+ iio_push_to_buffer(indio_dev->buffer, st->data);
done:
iio_trigger_notify_done(indio_dev->trig);
- kfree(rxbuf);
return IRQ_HANDLED;
}