]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/staging/iio/accel/lis3l02dq_ring.c
a6b7c72a86f48c8925cc7e4642e74c2b027ca124
[mv-sheeva.git] / drivers / staging / iio / accel / lis3l02dq_ring.c
1 #include <linux/interrupt.h>
2 #include <linux/irq.h>
3 #include <linux/gpio.h>
4 #include <linux/workqueue.h>
5 #include <linux/mutex.h>
6 #include <linux/device.h>
7 #include <linux/kernel.h>
8 #include <linux/spi/spi.h>
9 #include <linux/sysfs.h>
10 #include <linux/list.h>
11
12 #include "../iio.h"
13 #include "../sysfs.h"
14 #include "../ring_sw.h"
15 #include "accel.h"
16 #include "../trigger.h"
17 #include "lis3l02dq.h"
18
19 /**
20  * combine_8_to_16() utility function to munge to u8s into u16
21  **/
22 static inline u16 combine_8_to_16(u8 lower, u8 upper)
23 {
24         u16 _lower = lower;
25         u16 _upper = upper;
26         return _lower | (_upper << 8);
27 }
28
29 /**
30  * lis3l02dq_scan_el_set_state() set whether a scan contains a given channel
31  * @scan_el:    associtate iio scan element attribute
32  * @indio_dev:  the device structure
33  * @bool:       desired state
34  *
35  * mlock already held when this is called.
36  **/
37 static int lis3l02dq_scan_el_set_state(struct iio_scan_el *scan_el,
38                                        struct iio_dev *indio_dev,
39                                        bool state)
40 {
41         u8 t, mask;
42         int ret;
43
44         ret = lis3l02dq_spi_read_reg_8(&indio_dev->dev,
45                                        LIS3L02DQ_REG_CTRL_1_ADDR,
46                                        &t);
47         if (ret)
48                 goto error_ret;
49         switch (scan_el->label) {
50         case LIS3L02DQ_REG_OUT_X_L_ADDR:
51                 mask = LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
52                 break;
53         case LIS3L02DQ_REG_OUT_Y_L_ADDR:
54                 mask = LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
55                 break;
56         case LIS3L02DQ_REG_OUT_Z_L_ADDR:
57                 mask = LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
58                 break;
59         default:
60                 ret = -EINVAL;
61                 goto error_ret;
62         }
63
64         if (!(mask & t) == state) {
65                 if (state)
66                         t |= mask;
67                 else
68                         t &= ~mask;
69                 ret = lis3l02dq_spi_write_reg_8(&indio_dev->dev,
70                                                 LIS3L02DQ_REG_CTRL_1_ADDR,
71                                                 &t);
72         }
73 error_ret:
74         return ret;
75
76 }
77 static IIO_SCAN_EL_C(accel_x, LIS3L02DQ_SCAN_ACC_X, IIO_SIGNED(16),
78                      LIS3L02DQ_REG_OUT_X_L_ADDR,
79                      &lis3l02dq_scan_el_set_state);
80 static IIO_SCAN_EL_C(accel_y, LIS3L02DQ_SCAN_ACC_Y, IIO_SIGNED(16),
81                      LIS3L02DQ_REG_OUT_Y_L_ADDR,
82                      &lis3l02dq_scan_el_set_state);
83 static IIO_SCAN_EL_C(accel_z, LIS3L02DQ_SCAN_ACC_Z, IIO_SIGNED(16),
84                      LIS3L02DQ_REG_OUT_Z_L_ADDR,
85                      &lis3l02dq_scan_el_set_state);
86 static IIO_SCAN_EL_TIMESTAMP;
87
88 static struct attribute *lis3l02dq_scan_el_attrs[] = {
89         &iio_scan_el_accel_x.dev_attr.attr,
90         &iio_scan_el_accel_y.dev_attr.attr,
91         &iio_scan_el_accel_z.dev_attr.attr,
92         &iio_scan_el_timestamp.dev_attr.attr,
93         NULL,
94 };
95
96 static struct attribute_group lis3l02dq_scan_el_group = {
97         .attrs = lis3l02dq_scan_el_attrs,
98         .name = "scan_elements",
99 };
100
101 /**
102  * lis3l02dq_poll_func_th() top half interrupt handler called by trigger
103  * @private_data:       iio_dev
104  **/
105 static void lis3l02dq_poll_func_th(struct iio_dev *indio_dev)
106 {
107   struct lis3l02dq_state *st = iio_dev_get_devdata(indio_dev);
108         st->last_timestamp = indio_dev->trig->timestamp;
109         schedule_work(&st->work_trigger_to_ring);
110         /* Indicate that this interrupt is being handled */
111
112         /* Technically this is trigger related, but without this
113          * handler running there is currently now way for the interrupt
114          * to clear.
115          */
116         st->inter = 1;
117 }
118
119 /**
120  * lis3l02dq_data_rdy_trig_poll() the event handler for the data rdy trig
121  **/
122 static int lis3l02dq_data_rdy_trig_poll(struct iio_dev *dev_info,
123                                        int index,
124                                        s64 timestamp,
125                                        int no_test)
126 {
127         struct lis3l02dq_state *st = iio_dev_get_devdata(dev_info);
128         struct iio_trigger *trig = st->trig;
129
130         trig->timestamp = timestamp;
131         iio_trigger_poll(trig);
132
133         return IRQ_HANDLED;
134 }
135
136 /* This is an event as it is a response to a physical interrupt */
137 IIO_EVENT_SH(data_rdy_trig, &lis3l02dq_data_rdy_trig_poll);
138
139 /**
140  * lis3l02dq_read_accel_from_ring() individual acceleration read from ring
141  **/
142 ssize_t lis3l02dq_read_accel_from_ring(struct device *dev,
143                                        struct device_attribute *attr,
144                                        char *buf)
145 {
146         struct iio_scan_el *el = NULL;
147         int ret, len = 0, i = 0;
148         struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
149         struct iio_dev *dev_info = dev_get_drvdata(dev);
150         s16 *data;
151
152         while (dev_info->scan_el_attrs->attrs[i]) {
153                 el = to_iio_scan_el((struct device_attribute *)
154                                     (dev_info->scan_el_attrs->attrs[i]));
155                 /* label is in fact the address */
156                 if (el->label == this_attr->address)
157                         break;
158                 i++;
159         }
160         if (!dev_info->scan_el_attrs->attrs[i]) {
161                 ret = -EINVAL;
162                 goto error_ret;
163         }
164         /* If this element is in the scan mask */
165         ret = iio_scan_mask_query(dev_info, el->number);
166         if (ret < 0)
167                 goto error_ret;
168         if (ret) {
169                 data = kmalloc(dev_info->ring->access.get_bpd(dev_info->ring),
170                                GFP_KERNEL);
171                 if (data == NULL)
172                         return -ENOMEM;
173                 ret = dev_info->ring->access.read_last(dev_info->ring,
174                                                       (u8 *)data);
175                 if (ret)
176                         goto error_free_data;
177         } else {
178                 ret = -EINVAL;
179                 goto error_ret;
180         }
181         len = iio_scan_mask_count_to_right(dev_info, el->number);
182         if (len < 0) {
183                 ret = len;
184                 goto error_free_data;
185         }
186         len = sprintf(buf, "ring %d\n", data[len]);
187 error_free_data:
188         kfree(data);
189 error_ret:
190         return ret ? ret : len;
191
192 }
193
194 static const u8 read_all_tx_array[] =
195 {
196         LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR), 0,
197         LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR), 0,
198         LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_L_ADDR), 0,
199         LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_H_ADDR), 0,
200         LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_L_ADDR), 0,
201         LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_H_ADDR), 0,
202 };
203
204 /**
205  * lis3l02dq_read_all() Reads all channels currently selected
206  * @st:         device specific state
207  * @rx_array:   (dma capable) recieve array, must be at least
208  *              4*number of channels
209  **/
210 int lis3l02dq_read_all(struct lis3l02dq_state *st, u8 *rx_array)
211 {
212         struct spi_transfer *xfers;
213         struct spi_message msg;
214         int ret, i, j = 0;
215
216         xfers = kzalloc((st->indio_dev->scan_count) * 2
217                         * sizeof(*xfers), GFP_KERNEL);
218         if (!xfers)
219                 return -ENOMEM;
220
221         mutex_lock(&st->buf_lock);
222
223         for (i = 0; i < ARRAY_SIZE(read_all_tx_array)/4; i++) {
224                 if (st->indio_dev->scan_mask & (1 << i)) {
225                         /* lower byte */
226                         xfers[j].tx_buf = st->tx + 2*j;
227                         st->tx[2*j] = read_all_tx_array[i*4];
228                         st->tx[2*j + 1] = 0;
229                         if (rx_array)
230                                 xfers[j].rx_buf = rx_array + j*2;
231                         xfers[j].bits_per_word = 8;
232                         xfers[j].len = 2;
233                         xfers[j].cs_change = 1;
234                         j++;
235
236                         /* upper byte */
237                         xfers[j].tx_buf = st->tx + 2*j;
238                         st->tx[2*j] = read_all_tx_array[i*4 + 2];
239                         st->tx[2*j + 1] = 0;
240                         if (rx_array)
241                                 xfers[j].rx_buf = rx_array + j*2;
242                         xfers[j].bits_per_word = 8;
243                         xfers[j].len = 2;
244                         xfers[j].cs_change = 1;
245                         j++;
246                 }
247         }
248         /* After these are transmitted, the rx_buff should have
249          * values in alternate bytes
250          */
251         spi_message_init(&msg);
252         for (j = 0; j < st->indio_dev->scan_count * 2; j++)
253                 spi_message_add_tail(&xfers[j], &msg);
254
255         ret = spi_sync(st->us, &msg);
256         mutex_unlock(&st->buf_lock);
257         kfree(xfers);
258
259         return ret;
260 }
261
262
263 /* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
264  * specific to be rolled into the core.
265  */
266 static void lis3l02dq_trigger_bh_to_ring(struct work_struct *work_s)
267 {
268         struct lis3l02dq_state *st
269                 = container_of(work_s, struct lis3l02dq_state,
270                                work_trigger_to_ring);
271
272         u8 *rx_array;
273         int i = 0;
274         u16 *data;
275         size_t datasize = st->indio_dev
276                 ->ring->access.get_bpd(st->indio_dev->ring);
277
278         data = kmalloc(datasize , GFP_KERNEL);
279         if (data == NULL) {
280                 dev_err(&st->us->dev, "memory alloc failed in ring bh");
281                 return;
282         }
283         /* Due to interleaved nature of transmission this buffer must be
284          * twice the number of bytes, or 4 times the number of channels
285          */
286         rx_array = kmalloc(4 * (st->indio_dev->scan_count), GFP_KERNEL);
287         if (rx_array == NULL) {
288                 dev_err(&st->us->dev, "memory alloc failed in ring bh");
289                 kfree(data);
290                 return;
291         }
292
293         /* whilst trigger specific, if this read does nto occur the data
294            ready interrupt will not be cleared.  Need to add a mechanism
295            to provide a dummy read function if this is not triggering on
296            the data ready function but something else is.
297         */
298         st->inter = 0;
299
300         if (st->indio_dev->scan_count)
301                 if (lis3l02dq_read_all(st, rx_array) >= 0)
302                         for (; i < st->indio_dev->scan_count; i++)
303                                 data[i] = combine_8_to_16(rx_array[i*4+1],
304                                                           rx_array[i*4+3]);
305         /* Guaranteed to be aligned with 8 byte boundary */
306         if (st->indio_dev->scan_timestamp)
307                 *((s64 *)(data + ((i + 3)/4)*4)) = st->last_timestamp;
308
309         st->indio_dev->ring->access.store_to(st->indio_dev->ring,
310                                             (u8 *)data,
311                                             st->last_timestamp);
312
313         iio_trigger_notify_done(st->indio_dev->trig);
314         kfree(rx_array);
315         kfree(data);
316
317         return;
318 }
319 /* in these circumstances is it better to go with unaligned packing and
320  * deal with the cost?*/
321 static int lis3l02dq_data_rdy_ring_preenable(struct iio_dev *indio_dev)
322 {
323         size_t size;
324         /* Check if there are any scan elements enabled, if not fail*/
325         if (!(indio_dev->scan_count || indio_dev->scan_timestamp))
326                 return -EINVAL;
327
328         if (indio_dev->ring->access.set_bpd) {
329                 if (indio_dev->scan_timestamp)
330                         if (indio_dev->scan_count) /* Timestamp and data */
331                                 size = 2*sizeof(s64);
332                         else /* Timestamp only  */
333                                 size = sizeof(s64);
334                 else /* Data only */
335                         size = indio_dev->scan_count*sizeof(s16);
336                 indio_dev->ring->access.set_bpd(indio_dev->ring, size);
337         }
338
339         return 0;
340 }
341
342 static int lis3l02dq_data_rdy_ring_postenable(struct iio_dev *indio_dev)
343 {
344         return indio_dev->trig
345                 ? iio_trigger_attach_poll_func(indio_dev->trig,
346                                                indio_dev->pollfunc)
347                 : 0;
348 }
349
350 static int lis3l02dq_data_rdy_ring_predisable(struct iio_dev *indio_dev)
351 {
352         return indio_dev->trig
353                 ? iio_trigger_dettach_poll_func(indio_dev->trig,
354                                                 indio_dev->pollfunc)
355                 : 0;
356 }
357
358
359 /* Caller responsible for locking as necessary. */
360 static int __lis3l02dq_write_data_ready_config(struct device *dev,
361                                                struct
362                                                iio_event_handler_list *list,
363                                                bool state)
364 {
365         int ret;
366         u8 valold;
367         bool currentlyset;
368         struct iio_dev *indio_dev = dev_get_drvdata(dev);
369
370 /* Get the current event mask register */
371         ret = lis3l02dq_spi_read_reg_8(dev,
372                                        LIS3L02DQ_REG_CTRL_2_ADDR,
373                                        &valold);
374         if (ret)
375                 goto error_ret;
376 /* Find out if data ready is already on */
377         currentlyset
378                 = valold & LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
379
380 /* Disable requested */
381         if (!state && currentlyset) {
382
383                 valold &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
384                 /* The double write is to overcome a hardware bug?*/
385                 ret = lis3l02dq_spi_write_reg_8(dev,
386                                                 LIS3L02DQ_REG_CTRL_2_ADDR,
387                                                 &valold);
388                 if (ret)
389                         goto error_ret;
390                 ret = lis3l02dq_spi_write_reg_8(dev,
391                                                 LIS3L02DQ_REG_CTRL_2_ADDR,
392                                                 &valold);
393                 if (ret)
394                         goto error_ret;
395
396                 iio_remove_event_from_list(list,
397                                            &indio_dev->interrupts[0]
398                                            ->ev_list);
399
400 /* Enable requested */
401         } else if (state && !currentlyset) {
402                 /* if not set, enable requested */
403                 valold |= LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
404                 iio_add_event_to_list(list, &indio_dev->interrupts[0]->ev_list);
405                 ret = lis3l02dq_spi_write_reg_8(dev,
406                                                 LIS3L02DQ_REG_CTRL_2_ADDR,
407                                                 &valold);
408                 if (ret)
409                         goto error_ret;
410         }
411
412         return 0;
413 error_ret:
414         return ret;
415 }
416
417 /**
418  * lis3l02dq_data_rdy_trigger_set_state() set datardy interrupt state
419  *
420  * If disabling the interrupt also does a final read to ensure it is clear.
421  * This is only important in some cases where the scan enable elements are
422  * switched before the ring is reenabled.
423  **/
424 static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
425                                                 bool state)
426 {
427         struct lis3l02dq_state *st = trig->private_data;
428         int ret = 0;
429         u8 t;
430         __lis3l02dq_write_data_ready_config(&st->indio_dev->dev,
431                                             &iio_event_data_rdy_trig,
432                                             state);
433         if (state == false) {
434                 /* possible quirk with handler currently worked around
435                    by ensuring the work queue is empty */
436                 flush_scheduled_work();
437                 /* Clear any outstanding ready events */
438                 ret = lis3l02dq_read_all(st, NULL);
439         }
440         lis3l02dq_spi_read_reg_8(&st->indio_dev->dev,
441                                  LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
442                                  &t);
443         return ret;
444 }
445 static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
446
447 static struct attribute *lis3l02dq_trigger_attrs[] = {
448         &dev_attr_name.attr,
449         NULL,
450 };
451
452 static const struct attribute_group lis3l02dq_trigger_attr_group = {
453         .attrs = lis3l02dq_trigger_attrs,
454 };
455
456 /**
457  * lis3l02dq_trig_try_reen() try renabling irq for data rdy trigger
458  * @trig:       the datardy trigger
459  *
460  * As the trigger may occur on any data element being updated it is
461  * really rather likely to occur during the read from the previous
462  * trigger event.  The only way to discover if this has occured on
463  * boards not supporting level interrupts is to take a look at the line.
464  * If it is indicating another interrupt and we don't seem to have a
465  * handler looking at it, then we need to notify the core that we need
466  * to tell the triggering core to try reading all these again.
467  **/
468 static int lis3l02dq_trig_try_reen(struct iio_trigger *trig)
469 {
470         struct lis3l02dq_state *st = trig->private_data;
471         enable_irq(st->us->irq);
472         /* If gpio still high (or high again) */
473         if (gpio_get_value(irq_to_gpio(st->us->irq)))
474                 if (st->inter == 0) {
475                         /* already interrupt handler dealing with it */
476                         disable_irq_nosync(st->us->irq);
477                         if (st->inter == 1) {
478                                 /* interrupt handler snuck in between test
479                                  * and disable */
480                                 enable_irq(st->us->irq);
481                                 return 0;
482                         }
483                         return -EAGAIN;
484                 }
485         /* irq reenabled so success! */
486         return 0;
487 }
488
489 int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
490 {
491         int ret;
492         struct lis3l02dq_state *state = indio_dev->dev_data;
493
494         state->trig = iio_allocate_trigger();
495         state->trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH, GFP_KERNEL);
496         if (!state->trig->name) {
497                 ret = -ENOMEM;
498                 goto error_free_trig;
499         }
500         snprintf((char *)state->trig->name,
501                  IIO_TRIGGER_NAME_LENGTH,
502                  "lis3l02dq-dev%d", indio_dev->id);
503         state->trig->dev.parent = &state->us->dev;
504         state->trig->owner = THIS_MODULE;
505         state->trig->private_data = state;
506         state->trig->set_trigger_state = &lis3l02dq_data_rdy_trigger_set_state;
507         state->trig->try_reenable = &lis3l02dq_trig_try_reen;
508         state->trig->control_attrs = &lis3l02dq_trigger_attr_group;
509         ret = iio_trigger_register(state->trig);
510         if (ret)
511                 goto error_free_trig_name;
512
513         return 0;
514
515 error_free_trig_name:
516         kfree(state->trig->name);
517 error_free_trig:
518         iio_free_trigger(state->trig);
519
520         return ret;
521 }
522
523 void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
524 {
525         struct lis3l02dq_state *state = indio_dev->dev_data;
526
527         iio_trigger_unregister(state->trig);
528         kfree(state->trig->name);
529         iio_free_trigger(state->trig);
530 }
531
532 void lis3l02dq_unconfigure_ring(struct iio_dev *indio_dev)
533 {
534         kfree(indio_dev->pollfunc);
535         iio_sw_rb_free(indio_dev->ring);
536 }
537
538 int lis3l02dq_configure_ring(struct iio_dev *indio_dev)
539 {
540         int ret = 0;
541         struct lis3l02dq_state *st = indio_dev->dev_data;
542         struct iio_ring_buffer *ring;
543         INIT_WORK(&st->work_trigger_to_ring, lis3l02dq_trigger_bh_to_ring);
544         /* Set default scan mode */
545
546         iio_scan_mask_set(indio_dev, iio_scan_el_accel_x.number);
547         iio_scan_mask_set(indio_dev, iio_scan_el_accel_y.number);
548         iio_scan_mask_set(indio_dev, iio_scan_el_accel_z.number);
549         indio_dev->scan_timestamp = true;
550
551         indio_dev->scan_el_attrs = &lis3l02dq_scan_el_group;
552
553         ring = iio_sw_rb_allocate(indio_dev);
554         if (!ring) {
555                 ret = -ENOMEM;
556                 return ret;
557         }
558         indio_dev->ring = ring;
559         /* Effectively select the ring buffer implementation */
560         iio_ring_sw_register_funcs(&ring->access);
561         ring->preenable = &lis3l02dq_data_rdy_ring_preenable;
562         ring->postenable = &lis3l02dq_data_rdy_ring_postenable;
563         ring->predisable = &lis3l02dq_data_rdy_ring_predisable;
564         ring->owner = THIS_MODULE;
565
566         indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL);
567         if (indio_dev->pollfunc == NULL) {
568                 ret = -ENOMEM;
569                 goto error_iio_sw_rb_free;;
570         }
571         indio_dev->pollfunc->poll_func_main = &lis3l02dq_poll_func_th;
572         indio_dev->pollfunc->private_data = indio_dev;
573         indio_dev->modes |= INDIO_RING_TRIGGERED;
574         return 0;
575
576 error_iio_sw_rb_free:
577         iio_sw_rb_free(indio_dev->ring);
578         return ret;
579 }
580
581 int lis3l02dq_initialize_ring(struct iio_ring_buffer *ring)
582 {
583         return iio_ring_buffer_register(ring);
584 }
585
586 void lis3l02dq_uninitialize_ring(struct iio_ring_buffer *ring)
587 {
588         iio_ring_buffer_unregister(ring);
589 }
590
591
592 int lis3l02dq_set_ring_length(struct iio_dev *indio_dev, int length)
593 {
594         /* Set sensible defaults for the ring buffer */
595         if (indio_dev->ring->access.set_length)
596                 return indio_dev->ring->access.set_length(indio_dev->ring, 500);
597         return 0;
598 }
599
600