1 #include <linux/interrupt.h>
3 #include <linux/gpio.h>
4 #include <linux/mutex.h>
5 #include <linux/device.h>
6 #include <linux/kernel.h>
7 #include <linux/spi/spi.h>
8 #include <linux/sysfs.h>
9 #include <linux/slab.h>
13 #include "../ring_sw.h"
14 #include "../kfifo_buf.h"
16 #include "../trigger.h"
17 #include "lis3l02dq.h"
20 * combine_8_to_16() utility function to munge to u8s into u16
22 static inline u16 combine_8_to_16(u8 lower, u8 upper)
26 return _lower | (_upper << 8);
30 * lis3l02dq_data_rdy_trig_poll() the event handler for the data rdy trig
32 irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private)
34 struct iio_dev *indio_dev = private;
35 struct iio_sw_ring_helper_state *h = iio_dev_get_devdata(indio_dev);
36 struct lis3l02dq_state *st = lis3l02dq_h_to_s(h);
39 iio_trigger_poll(st->trig, iio_get_time_ns());
42 return IRQ_WAKE_THREAD;
46 * lis3l02dq_read_accel_from_ring() individual acceleration read from ring
48 ssize_t lis3l02dq_read_accel_from_ring(struct iio_ring_buffer *ring,
54 if (!iio_scan_mask_query(ring, index))
57 data = kmalloc(ring->access->get_bytes_per_datum(ring),
62 ret = ring->access->read_last(ring, (u8 *)data);
65 *val = data[iio_scan_mask_count_to_right(ring, index)];
71 static const u8 read_all_tx_array[] = {
72 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR), 0,
73 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR), 0,
74 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_L_ADDR), 0,
75 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_H_ADDR), 0,
76 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_L_ADDR), 0,
77 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_H_ADDR), 0,
81 * lis3l02dq_read_all() Reads all channels currently selected
82 * @st: device specific state
83 * @rx_array: (dma capable) receive array, must be at least
84 * 4*number of channels
86 static int lis3l02dq_read_all(struct lis3l02dq_state *st, u8 *rx_array)
88 struct iio_ring_buffer *ring = st->help.indio_dev->ring;
89 struct spi_transfer *xfers;
90 struct spi_message msg;
93 xfers = kzalloc((ring->scan_count) * 2
94 * sizeof(*xfers), GFP_KERNEL);
98 mutex_lock(&st->buf_lock);
100 for (i = 0; i < ARRAY_SIZE(read_all_tx_array)/4; i++)
101 if (ring->scan_mask & (1 << i)) {
103 xfers[j].tx_buf = st->tx + 2*j;
104 st->tx[2*j] = read_all_tx_array[i*4];
107 xfers[j].rx_buf = rx_array + j*2;
108 xfers[j].bits_per_word = 8;
110 xfers[j].cs_change = 1;
114 xfers[j].tx_buf = st->tx + 2*j;
115 st->tx[2*j] = read_all_tx_array[i*4 + 2];
118 xfers[j].rx_buf = rx_array + j*2;
119 xfers[j].bits_per_word = 8;
121 xfers[j].cs_change = 1;
125 /* After these are transmitted, the rx_buff should have
126 * values in alternate bytes
128 spi_message_init(&msg);
129 for (j = 0; j < ring->scan_count * 2; j++)
130 spi_message_add_tail(&xfers[j], &msg);
132 ret = spi_sync(st->us, &msg);
133 mutex_unlock(&st->buf_lock);
139 static irqreturn_t lis3l02dq_trigger_handler(int irq, void *p)
141 struct iio_poll_func *pf = p;
142 struct iio_dev *indio_dev = pf->private_data;
143 struct iio_sw_ring_helper_state *h = iio_dev_get_devdata(indio_dev);
145 h->last_timestamp = pf->timestamp;
146 iio_sw_trigger_to_ring(h);
151 static int lis3l02dq_get_ring_element(struct iio_sw_ring_helper_state *h,
156 s16 *data = (s16 *)buf;
158 rx_array = kzalloc(4 * (h->indio_dev->ring->scan_count), GFP_KERNEL);
159 if (rx_array == NULL)
161 ret = lis3l02dq_read_all(lis3l02dq_h_to_s(h), rx_array);
164 for (i = 0; i < h->indio_dev->ring->scan_count; i++)
165 data[i] = combine_8_to_16(rx_array[i*4+1],
169 return i*sizeof(data[0]);
172 /* Caller responsible for locking as necessary. */
174 __lis3l02dq_write_data_ready_config(struct device *dev, bool state)
179 struct iio_dev *indio_dev = dev_get_drvdata(dev);
180 struct iio_sw_ring_helper_state *h
181 = iio_dev_get_devdata(indio_dev);
182 struct lis3l02dq_state *st = lis3l02dq_h_to_s(h);
184 /* Get the current event mask register */
185 ret = lis3l02dq_spi_read_reg_8(indio_dev,
186 LIS3L02DQ_REG_CTRL_2_ADDR,
190 /* Find out if data ready is already on */
192 = valold & LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
194 /* Disable requested */
195 if (!state && currentlyset) {
196 /* disable the data ready signal */
197 valold &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
199 /* The double write is to overcome a hardware bug?*/
200 ret = lis3l02dq_spi_write_reg_8(indio_dev,
201 LIS3L02DQ_REG_CTRL_2_ADDR,
205 ret = lis3l02dq_spi_write_reg_8(indio_dev,
206 LIS3L02DQ_REG_CTRL_2_ADDR,
210 st->trigger_on = false;
211 /* Enable requested */
212 } else if (state && !currentlyset) {
213 /* if not set, enable requested */
214 /* first disable all events */
215 ret = lis3l02dq_disable_all_events(indio_dev);
220 LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
222 st->trigger_on = true;
223 ret = lis3l02dq_spi_write_reg_8(indio_dev,
224 LIS3L02DQ_REG_CTRL_2_ADDR,
236 * lis3l02dq_data_rdy_trigger_set_state() set datardy interrupt state
238 * If disabling the interrupt also does a final read to ensure it is clear.
239 * This is only important in some cases where the scan enable elements are
240 * switched before the ring is reenabled.
242 static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
245 struct lis3l02dq_state *st = trig->private_data;
249 __lis3l02dq_write_data_ready_config(&st->help.indio_dev->dev, state);
250 if (state == false) {
252 * A possible quirk with teh handler is currently worked around
253 * by ensuring outstanding read events are cleared.
255 ret = lis3l02dq_read_all(st, NULL);
257 lis3l02dq_spi_read_reg_8(st->help.indio_dev,
258 LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
264 * lis3l02dq_trig_try_reen() try renabling irq for data rdy trigger
265 * @trig: the datardy trigger
267 static int lis3l02dq_trig_try_reen(struct iio_trigger *trig)
269 struct lis3l02dq_state *st = trig->private_data;
272 /* If gpio still high (or high again) */
273 /* In theory possible we will need to do this several times */
274 for (i = 0; i < 5; i++)
275 if (gpio_get_value(irq_to_gpio(st->us->irq)))
276 lis3l02dq_read_all(st, NULL);
281 "Failed to clear the interrupt for lis3l02dq\n");
283 /* irq reenabled so success! */
287 int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
290 struct iio_sw_ring_helper_state *h
291 = iio_dev_get_devdata(indio_dev);
292 struct lis3l02dq_state *st = lis3l02dq_h_to_s(h);
294 st->trig = iio_allocate_trigger("lis3l02dq-dev%d", indio_dev->id);
300 st->trig->dev.parent = &st->us->dev;
301 st->trig->owner = THIS_MODULE;
302 st->trig->private_data = st;
303 st->trig->set_trigger_state = &lis3l02dq_data_rdy_trigger_set_state;
304 st->trig->try_reenable = &lis3l02dq_trig_try_reen;
305 ret = iio_trigger_register(st->trig);
307 goto error_free_trig;
312 iio_free_trigger(st->trig);
317 void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
319 struct iio_sw_ring_helper_state *h
320 = iio_dev_get_devdata(indio_dev);
321 struct lis3l02dq_state *st = lis3l02dq_h_to_s(h);
323 iio_trigger_unregister(st->trig);
324 iio_free_trigger(st->trig);
327 void lis3l02dq_unconfigure_ring(struct iio_dev *indio_dev)
329 kfree(indio_dev->pollfunc->name);
330 kfree(indio_dev->pollfunc);
331 lis3l02dq_free_buf(indio_dev->ring);
334 static int lis3l02dq_ring_postenable(struct iio_dev *indio_dev)
336 /* Disable unwanted channels otherwise the interrupt will not clear */
339 bool oneenabled = false;
341 ret = lis3l02dq_spi_read_reg_8(indio_dev,
342 LIS3L02DQ_REG_CTRL_1_ADDR,
347 if (iio_scan_mask_query(indio_dev->ring, 0)) {
348 t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
351 t &= ~LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
352 if (iio_scan_mask_query(indio_dev->ring, 1)) {
353 t |= LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
356 t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
357 if (iio_scan_mask_query(indio_dev->ring, 2)) {
358 t |= LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
361 t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
363 if (!oneenabled) /* what happens in this case is unknown */
365 ret = lis3l02dq_spi_write_reg_8(indio_dev,
366 LIS3L02DQ_REG_CTRL_1_ADDR,
371 return iio_triggered_ring_postenable(indio_dev);
376 /* Turn all channels on again */
377 static int lis3l02dq_ring_predisable(struct iio_dev *indio_dev)
382 ret = iio_triggered_ring_predisable(indio_dev);
386 ret = lis3l02dq_spi_read_reg_8(indio_dev,
387 LIS3L02DQ_REG_CTRL_1_ADDR,
391 t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE |
392 LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE |
393 LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
395 ret = lis3l02dq_spi_write_reg_8(indio_dev,
396 LIS3L02DQ_REG_CTRL_1_ADDR,
403 static const struct iio_ring_setup_ops lis3l02dq_ring_setup_ops = {
404 .preenable = &iio_sw_ring_preenable,
405 .postenable = &lis3l02dq_ring_postenable,
406 .predisable = &lis3l02dq_ring_predisable,
409 int lis3l02dq_configure_ring(struct iio_dev *indio_dev)
412 struct iio_sw_ring_helper_state *h = iio_dev_get_devdata(indio_dev);
413 struct iio_ring_buffer *ring;
415 h->get_ring_element = &lis3l02dq_get_ring_element;
417 ring = lis3l02dq_alloc_buf(indio_dev);
421 indio_dev->ring = ring;
422 /* Effectively select the ring buffer implementation */
423 indio_dev->ring->access = &lis3l02dq_access_funcs;
426 ring->scan_timestamp = true;
427 ring->setup_ops = &lis3l02dq_ring_setup_ops;
428 ring->owner = THIS_MODULE;
430 /* Set default scan mode */
431 iio_scan_mask_set(ring, 0);
432 iio_scan_mask_set(ring, 1);
433 iio_scan_mask_set(ring, 2);
435 /* Functions are NULL as we set handler below */
436 indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL);
438 if (indio_dev->pollfunc == NULL) {
440 goto error_iio_sw_rb_free;
442 indio_dev->pollfunc->private_data = indio_dev;
443 indio_dev->pollfunc->thread = &lis3l02dq_trigger_handler;
444 indio_dev->pollfunc->h = &iio_pollfunc_store_time;
445 indio_dev->pollfunc->type = 0;
446 indio_dev->pollfunc->name
447 = kasprintf(GFP_KERNEL, "lis3l02dq_consumer%d", indio_dev->id);
449 indio_dev->modes |= INDIO_RING_TRIGGERED;
452 error_iio_sw_rb_free:
453 lis3l02dq_free_buf(indio_dev->ring);