ALSA: opl4 - Fix a wrong argument in proc write callback
[safe/jmp/linux-2.6] / drivers / staging / iio / accel / lis3l02dq_ring.c
1 #include <linux/interrupt.h>
2 #include <linux/irq.h>
3 #include <linux/gpio.h>
4 #include <linux/workqueue.h>
5 #include <linux/mutex.h>
6 #include <linux/device.h>
7 #include <linux/kernel.h>
8 #include <linux/spi/spi.h>
9 #include <linux/sysfs.h>
10 #include <linux/list.h>
11 #include <linux/slab.h>
12
13 #include "../iio.h"
14 #include "../sysfs.h"
15 #include "../ring_sw.h"
16 #include "accel.h"
17 #include "../trigger.h"
18 #include "lis3l02dq.h"
19
20 /**
21  * combine_8_to_16() utility function to munge to u8s into u16
22  **/
23 static inline u16 combine_8_to_16(u8 lower, u8 upper)
24 {
25         u16 _lower = lower;
26         u16 _upper = upper;
27         return _lower | (_upper << 8);
28 }
29
30 /**
31  * lis3l02dq_scan_el_set_state() set whether a scan contains a given channel
32  * @scan_el:    associtate iio scan element attribute
33  * @indio_dev:  the device structure
34  * @bool:       desired state
35  *
36  * mlock already held when this is called.
37  **/
38 static int lis3l02dq_scan_el_set_state(struct iio_scan_el *scan_el,
39                                        struct iio_dev *indio_dev,
40                                        bool state)
41 {
42         u8 t, mask;
43         int ret;
44
45         ret = lis3l02dq_spi_read_reg_8(&indio_dev->dev,
46                                        LIS3L02DQ_REG_CTRL_1_ADDR,
47                                        &t);
48         if (ret)
49                 goto error_ret;
50         switch (scan_el->label) {
51         case LIS3L02DQ_REG_OUT_X_L_ADDR:
52                 mask = LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
53                 break;
54         case LIS3L02DQ_REG_OUT_Y_L_ADDR:
55                 mask = LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
56                 break;
57         case LIS3L02DQ_REG_OUT_Z_L_ADDR:
58                 mask = LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
59                 break;
60         default:
61                 ret = -EINVAL;
62                 goto error_ret;
63         }
64
65         if (!(mask & t) == state) {
66                 if (state)
67                         t |= mask;
68                 else
69                         t &= ~mask;
70                 ret = lis3l02dq_spi_write_reg_8(&indio_dev->dev,
71                                                 LIS3L02DQ_REG_CTRL_1_ADDR,
72                                                 &t);
73         }
74 error_ret:
75         return ret;
76
77 }
78 static IIO_SCAN_EL_C(accel_x, LIS3L02DQ_SCAN_ACC_X, IIO_SIGNED(16),
79                      LIS3L02DQ_REG_OUT_X_L_ADDR,
80                      &lis3l02dq_scan_el_set_state);
81 static IIO_SCAN_EL_C(accel_y, LIS3L02DQ_SCAN_ACC_Y, IIO_SIGNED(16),
82                      LIS3L02DQ_REG_OUT_Y_L_ADDR,
83                      &lis3l02dq_scan_el_set_state);
84 static IIO_SCAN_EL_C(accel_z, LIS3L02DQ_SCAN_ACC_Z, IIO_SIGNED(16),
85                      LIS3L02DQ_REG_OUT_Z_L_ADDR,
86                      &lis3l02dq_scan_el_set_state);
87 static IIO_SCAN_EL_TIMESTAMP;
88
89 static struct attribute *lis3l02dq_scan_el_attrs[] = {
90         &iio_scan_el_accel_x.dev_attr.attr,
91         &iio_scan_el_accel_y.dev_attr.attr,
92         &iio_scan_el_accel_z.dev_attr.attr,
93         &iio_scan_el_timestamp.dev_attr.attr,
94         NULL,
95 };
96
97 static struct attribute_group lis3l02dq_scan_el_group = {
98         .attrs = lis3l02dq_scan_el_attrs,
99         .name = "scan_elements",
100 };
101
102 /**
103  * lis3l02dq_poll_func_th() top half interrupt handler called by trigger
104  * @private_data:       iio_dev
105  **/
106 static void lis3l02dq_poll_func_th(struct iio_dev *indio_dev)
107 {
108   struct lis3l02dq_state *st = iio_dev_get_devdata(indio_dev);
109         st->last_timestamp = indio_dev->trig->timestamp;
110         schedule_work(&st->work_trigger_to_ring);
111         /* Indicate that this interrupt is being handled */
112
113         /* Technically this is trigger related, but without this
114          * handler running there is currently now way for the interrupt
115          * to clear.
116          */
117         st->inter = 1;
118 }
119
120 /**
121  * lis3l02dq_data_rdy_trig_poll() the event handler for the data rdy trig
122  **/
123 static int lis3l02dq_data_rdy_trig_poll(struct iio_dev *dev_info,
124                                        int index,
125                                        s64 timestamp,
126                                        int no_test)
127 {
128         struct lis3l02dq_state *st = iio_dev_get_devdata(dev_info);
129         struct iio_trigger *trig = st->trig;
130
131         trig->timestamp = timestamp;
132         iio_trigger_poll(trig);
133
134         return IRQ_HANDLED;
135 }
136
137 /* This is an event as it is a response to a physical interrupt */
138 IIO_EVENT_SH(data_rdy_trig, &lis3l02dq_data_rdy_trig_poll);
139
140 /**
141  * lis3l02dq_read_accel_from_ring() individual acceleration read from ring
142  **/
143 ssize_t lis3l02dq_read_accel_from_ring(struct device *dev,
144                                        struct device_attribute *attr,
145                                        char *buf)
146 {
147         struct iio_scan_el *el = NULL;
148         int ret, len = 0, i = 0;
149         struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
150         struct iio_dev *dev_info = dev_get_drvdata(dev);
151         s16 *data;
152
153         while (dev_info->scan_el_attrs->attrs[i]) {
154                 el = to_iio_scan_el((struct device_attribute *)
155                                     (dev_info->scan_el_attrs->attrs[i]));
156                 /* label is in fact the address */
157                 if (el->label == this_attr->address)
158                         break;
159                 i++;
160         }
161         if (!dev_info->scan_el_attrs->attrs[i]) {
162                 ret = -EINVAL;
163                 goto error_ret;
164         }
165         /* If this element is in the scan mask */
166         ret = iio_scan_mask_query(dev_info, el->number);
167         if (ret < 0)
168                 goto error_ret;
169         if (ret) {
170                 data = kmalloc(dev_info->ring->access.get_bpd(dev_info->ring),
171                                GFP_KERNEL);
172                 if (data == NULL)
173                         return -ENOMEM;
174                 ret = dev_info->ring->access.read_last(dev_info->ring,
175                                                       (u8 *)data);
176                 if (ret)
177                         goto error_free_data;
178         } else {
179                 ret = -EINVAL;
180                 goto error_ret;
181         }
182         len = iio_scan_mask_count_to_right(dev_info, el->number);
183         if (len < 0) {
184                 ret = len;
185                 goto error_free_data;
186         }
187         len = sprintf(buf, "ring %d\n", data[len]);
188 error_free_data:
189         kfree(data);
190 error_ret:
191         return ret ? ret : len;
192
193 }
194
195 static const u8 read_all_tx_array[] =
196 {
197         LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR), 0,
198         LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR), 0,
199         LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_L_ADDR), 0,
200         LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_H_ADDR), 0,
201         LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_L_ADDR), 0,
202         LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_H_ADDR), 0,
203 };
204
205 /**
206  * lis3l02dq_read_all() Reads all channels currently selected
207  * @st:         device specific state
208  * @rx_array:   (dma capable) recieve array, must be at least
209  *              4*number of channels
210  **/
211 int lis3l02dq_read_all(struct lis3l02dq_state *st, u8 *rx_array)
212 {
213         struct spi_transfer *xfers;
214         struct spi_message msg;
215         int ret, i, j = 0;
216
217         xfers = kzalloc((st->indio_dev->scan_count) * 2
218                         * sizeof(*xfers), GFP_KERNEL);
219         if (!xfers)
220                 return -ENOMEM;
221
222         mutex_lock(&st->buf_lock);
223
224         for (i = 0; i < ARRAY_SIZE(read_all_tx_array)/4; i++) {
225                 if (st->indio_dev->scan_mask & (1 << i)) {
226                         /* lower byte */
227                         xfers[j].tx_buf = st->tx + 2*j;
228                         st->tx[2*j] = read_all_tx_array[i*4];
229                         st->tx[2*j + 1] = 0;
230                         if (rx_array)
231                                 xfers[j].rx_buf = rx_array + j*2;
232                         xfers[j].bits_per_word = 8;
233                         xfers[j].len = 2;
234                         xfers[j].cs_change = 1;
235                         j++;
236
237                         /* upper byte */
238                         xfers[j].tx_buf = st->tx + 2*j;
239                         st->tx[2*j] = read_all_tx_array[i*4 + 2];
240                         st->tx[2*j + 1] = 0;
241                         if (rx_array)
242                                 xfers[j].rx_buf = rx_array + j*2;
243                         xfers[j].bits_per_word = 8;
244                         xfers[j].len = 2;
245                         xfers[j].cs_change = 1;
246                         j++;
247                 }
248         }
249         /* After these are transmitted, the rx_buff should have
250          * values in alternate bytes
251          */
252         spi_message_init(&msg);
253         for (j = 0; j < st->indio_dev->scan_count * 2; j++)
254                 spi_message_add_tail(&xfers[j], &msg);
255
256         ret = spi_sync(st->us, &msg);
257         mutex_unlock(&st->buf_lock);
258         kfree(xfers);
259
260         return ret;
261 }
262
263
264 /* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
265  * specific to be rolled into the core.
266  */
267 static void lis3l02dq_trigger_bh_to_ring(struct work_struct *work_s)
268 {
269         struct lis3l02dq_state *st
270                 = container_of(work_s, struct lis3l02dq_state,
271                                work_trigger_to_ring);
272
273         u8 *rx_array;
274         int i = 0;
275         u16 *data;
276         size_t datasize = st->indio_dev
277                 ->ring->access.get_bpd(st->indio_dev->ring);
278
279         data = kmalloc(datasize , GFP_KERNEL);
280         if (data == NULL) {
281                 dev_err(&st->us->dev, "memory alloc failed in ring bh");
282                 return;
283         }
284         /* Due to interleaved nature of transmission this buffer must be
285          * twice the number of bytes, or 4 times the number of channels
286          */
287         rx_array = kmalloc(4 * (st->indio_dev->scan_count), GFP_KERNEL);
288         if (rx_array == NULL) {
289                 dev_err(&st->us->dev, "memory alloc failed in ring bh");
290                 kfree(data);
291                 return;
292         }
293
294         /* whilst trigger specific, if this read does nto occur the data
295            ready interrupt will not be cleared.  Need to add a mechanism
296            to provide a dummy read function if this is not triggering on
297            the data ready function but something else is.
298         */
299         st->inter = 0;
300
301         if (st->indio_dev->scan_count)
302                 if (lis3l02dq_read_all(st, rx_array) >= 0)
303                         for (; i < st->indio_dev->scan_count; i++)
304                                 data[i] = combine_8_to_16(rx_array[i*4+1],
305                                                           rx_array[i*4+3]);
306         /* Guaranteed to be aligned with 8 byte boundary */
307         if (st->indio_dev->scan_timestamp)
308                 *((s64 *)(data + ((i + 3)/4)*4)) = st->last_timestamp;
309
310         st->indio_dev->ring->access.store_to(st->indio_dev->ring,
311                                             (u8 *)data,
312                                             st->last_timestamp);
313
314         iio_trigger_notify_done(st->indio_dev->trig);
315         kfree(rx_array);
316         kfree(data);
317
318         return;
319 }
320 /* in these circumstances is it better to go with unaligned packing and
321  * deal with the cost?*/
322 static int lis3l02dq_data_rdy_ring_preenable(struct iio_dev *indio_dev)
323 {
324         size_t size;
325         /* Check if there are any scan elements enabled, if not fail*/
326         if (!(indio_dev->scan_count || indio_dev->scan_timestamp))
327                 return -EINVAL;
328
329         if (indio_dev->ring->access.set_bpd) {
330                 if (indio_dev->scan_timestamp)
331                         if (indio_dev->scan_count) /* Timestamp and data */
332                                 size = 2*sizeof(s64);
333                         else /* Timestamp only  */
334                                 size = sizeof(s64);
335                 else /* Data only */
336                         size = indio_dev->scan_count*sizeof(s16);
337                 indio_dev->ring->access.set_bpd(indio_dev->ring, size);
338         }
339
340         return 0;
341 }
342
343 static int lis3l02dq_data_rdy_ring_postenable(struct iio_dev *indio_dev)
344 {
345         return indio_dev->trig
346                 ? iio_trigger_attach_poll_func(indio_dev->trig,
347                                                indio_dev->pollfunc)
348                 : 0;
349 }
350
351 static int lis3l02dq_data_rdy_ring_predisable(struct iio_dev *indio_dev)
352 {
353         return indio_dev->trig
354                 ? iio_trigger_dettach_poll_func(indio_dev->trig,
355                                                 indio_dev->pollfunc)
356                 : 0;
357 }
358
359
360 /* Caller responsible for locking as necessary. */
361 static int __lis3l02dq_write_data_ready_config(struct device *dev,
362                                                struct
363                                                iio_event_handler_list *list,
364                                                bool state)
365 {
366         int ret;
367         u8 valold;
368         bool currentlyset;
369         struct iio_dev *indio_dev = dev_get_drvdata(dev);
370
371 /* Get the current event mask register */
372         ret = lis3l02dq_spi_read_reg_8(dev,
373                                        LIS3L02DQ_REG_CTRL_2_ADDR,
374                                        &valold);
375         if (ret)
376                 goto error_ret;
377 /* Find out if data ready is already on */
378         currentlyset
379                 = valold & LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
380
381 /* Disable requested */
382         if (!state && currentlyset) {
383
384                 valold &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
385                 /* The double write is to overcome a hardware bug?*/
386                 ret = lis3l02dq_spi_write_reg_8(dev,
387                                                 LIS3L02DQ_REG_CTRL_2_ADDR,
388                                                 &valold);
389                 if (ret)
390                         goto error_ret;
391                 ret = lis3l02dq_spi_write_reg_8(dev,
392                                                 LIS3L02DQ_REG_CTRL_2_ADDR,
393                                                 &valold);
394                 if (ret)
395                         goto error_ret;
396
397                 iio_remove_event_from_list(list,
398                                            &indio_dev->interrupts[0]
399                                            ->ev_list);
400
401 /* Enable requested */
402         } else if (state && !currentlyset) {
403                 /* if not set, enable requested */
404                 valold |= LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
405                 iio_add_event_to_list(list, &indio_dev->interrupts[0]->ev_list);
406                 ret = lis3l02dq_spi_write_reg_8(dev,
407                                                 LIS3L02DQ_REG_CTRL_2_ADDR,
408                                                 &valold);
409                 if (ret)
410                         goto error_ret;
411         }
412
413         return 0;
414 error_ret:
415         return ret;
416 }
417
418 /**
419  * lis3l02dq_data_rdy_trigger_set_state() set datardy interrupt state
420  *
421  * If disabling the interrupt also does a final read to ensure it is clear.
422  * This is only important in some cases where the scan enable elements are
423  * switched before the ring is reenabled.
424  **/
425 static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
426                                                 bool state)
427 {
428         struct lis3l02dq_state *st = trig->private_data;
429         int ret = 0;
430         u8 t;
431         __lis3l02dq_write_data_ready_config(&st->indio_dev->dev,
432                                             &iio_event_data_rdy_trig,
433                                             state);
434         if (state == false) {
435                 /* possible quirk with handler currently worked around
436                    by ensuring the work queue is empty */
437                 flush_scheduled_work();
438                 /* Clear any outstanding ready events */
439                 ret = lis3l02dq_read_all(st, NULL);
440         }
441         lis3l02dq_spi_read_reg_8(&st->indio_dev->dev,
442                                  LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
443                                  &t);
444         return ret;
445 }
446 static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
447
448 static struct attribute *lis3l02dq_trigger_attrs[] = {
449         &dev_attr_name.attr,
450         NULL,
451 };
452
453 static const struct attribute_group lis3l02dq_trigger_attr_group = {
454         .attrs = lis3l02dq_trigger_attrs,
455 };
456
457 /**
458  * lis3l02dq_trig_try_reen() try renabling irq for data rdy trigger
459  * @trig:       the datardy trigger
460  *
461  * As the trigger may occur on any data element being updated it is
462  * really rather likely to occur during the read from the previous
463  * trigger event.  The only way to discover if this has occured on
464  * boards not supporting level interrupts is to take a look at the line.
465  * If it is indicating another interrupt and we don't seem to have a
466  * handler looking at it, then we need to notify the core that we need
467  * to tell the triggering core to try reading all these again.
468  **/
469 static int lis3l02dq_trig_try_reen(struct iio_trigger *trig)
470 {
471         struct lis3l02dq_state *st = trig->private_data;
472         enable_irq(st->us->irq);
473         /* If gpio still high (or high again) */
474         if (gpio_get_value(irq_to_gpio(st->us->irq)))
475                 if (st->inter == 0) {
476                         /* already interrupt handler dealing with it */
477                         disable_irq_nosync(st->us->irq);
478                         if (st->inter == 1) {
479                                 /* interrupt handler snuck in between test
480                                  * and disable */
481                                 enable_irq(st->us->irq);
482                                 return 0;
483                         }
484                         return -EAGAIN;
485                 }
486         /* irq reenabled so success! */
487         return 0;
488 }
489
490 int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
491 {
492         int ret;
493         struct lis3l02dq_state *state = indio_dev->dev_data;
494
495         state->trig = iio_allocate_trigger();
496         state->trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH, GFP_KERNEL);
497         if (!state->trig->name) {
498                 ret = -ENOMEM;
499                 goto error_free_trig;
500         }
501         snprintf((char *)state->trig->name,
502                  IIO_TRIGGER_NAME_LENGTH,
503                  "lis3l02dq-dev%d", indio_dev->id);
504         state->trig->dev.parent = &state->us->dev;
505         state->trig->owner = THIS_MODULE;
506         state->trig->private_data = state;
507         state->trig->set_trigger_state = &lis3l02dq_data_rdy_trigger_set_state;
508         state->trig->try_reenable = &lis3l02dq_trig_try_reen;
509         state->trig->control_attrs = &lis3l02dq_trigger_attr_group;
510         ret = iio_trigger_register(state->trig);
511         if (ret)
512                 goto error_free_trig_name;
513
514         return 0;
515
516 error_free_trig_name:
517         kfree(state->trig->name);
518 error_free_trig:
519         iio_free_trigger(state->trig);
520
521         return ret;
522 }
523
524 void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
525 {
526         struct lis3l02dq_state *state = indio_dev->dev_data;
527
528         iio_trigger_unregister(state->trig);
529         kfree(state->trig->name);
530         iio_free_trigger(state->trig);
531 }
532
533 void lis3l02dq_unconfigure_ring(struct iio_dev *indio_dev)
534 {
535         kfree(indio_dev->pollfunc);
536         iio_sw_rb_free(indio_dev->ring);
537 }
538
539 int lis3l02dq_configure_ring(struct iio_dev *indio_dev)
540 {
541         int ret = 0;
542         struct lis3l02dq_state *st = indio_dev->dev_data;
543         struct iio_ring_buffer *ring;
544         INIT_WORK(&st->work_trigger_to_ring, lis3l02dq_trigger_bh_to_ring);
545         /* Set default scan mode */
546
547         iio_scan_mask_set(indio_dev, iio_scan_el_accel_x.number);
548         iio_scan_mask_set(indio_dev, iio_scan_el_accel_y.number);
549         iio_scan_mask_set(indio_dev, iio_scan_el_accel_z.number);
550         indio_dev->scan_timestamp = true;
551
552         indio_dev->scan_el_attrs = &lis3l02dq_scan_el_group;
553
554         ring = iio_sw_rb_allocate(indio_dev);
555         if (!ring) {
556                 ret = -ENOMEM;
557                 return ret;
558         }
559         indio_dev->ring = ring;
560         /* Effectively select the ring buffer implementation */
561         iio_ring_sw_register_funcs(&ring->access);
562         ring->preenable = &lis3l02dq_data_rdy_ring_preenable;
563         ring->postenable = &lis3l02dq_data_rdy_ring_postenable;
564         ring->predisable = &lis3l02dq_data_rdy_ring_predisable;
565         ring->owner = THIS_MODULE;
566
567         indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL);
568         if (indio_dev->pollfunc == NULL) {
569                 ret = -ENOMEM;
570                 goto error_iio_sw_rb_free;;
571         }
572         indio_dev->pollfunc->poll_func_main = &lis3l02dq_poll_func_th;
573         indio_dev->pollfunc->private_data = indio_dev;
574         indio_dev->modes |= INDIO_RING_TRIGGERED;
575         return 0;
576
577 error_iio_sw_rb_free:
578         iio_sw_rb_free(indio_dev->ring);
579         return ret;
580 }
581
582 int lis3l02dq_initialize_ring(struct iio_ring_buffer *ring)
583 {
584         return iio_ring_buffer_register(ring);
585 }
586
587 void lis3l02dq_uninitialize_ring(struct iio_ring_buffer *ring)
588 {
589         iio_ring_buffer_unregister(ring);
590 }
591
592
593 int lis3l02dq_set_ring_length(struct iio_dev *indio_dev, int length)
594 {
595         /* Set sensible defaults for the ring buffer */
596         if (indio_dev->ring->access.set_length)
597                 return indio_dev->ring->access.set_length(indio_dev->ring, 500);
598         return 0;
599 }
600
601