]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/staging/most/mostcore/core.c
Merge tag 'regulator-fix-v4.10-rc6' of git://git.kernel.org/pub/scm/linux/kernel...
[karo-tx-linux.git] / drivers / staging / most / mostcore / core.c
1 /*
2  * core.c - Implementation of core module of MOST Linux driver stack
3  *
4  * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * This file is licensed under GPLv2.
12  */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/fs.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/device.h>
20 #include <linux/list.h>
21 #include <linux/poll.h>
22 #include <linux/wait.h>
23 #include <linux/kobject.h>
24 #include <linux/mutex.h>
25 #include <linux/completion.h>
26 #include <linux/sysfs.h>
27 #include <linux/kthread.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/idr.h>
30 #include "mostcore.h"
31
32 #define MAX_CHANNELS    64
33 #define STRING_SIZE     80
34
35 static struct class *most_class;
36 static struct device *core_dev;
37 static struct ida mdev_id;
38 static int dummy_num_buffers;
39
40 struct most_c_aim_obj {
41         struct most_aim *ptr;
42         int refs;
43         int num_buffers;
44 };
45
46 struct most_c_obj {
47         struct kobject kobj;
48         struct completion cleanup;
49         atomic_t mbo_ref;
50         atomic_t mbo_nq_level;
51         u16 channel_id;
52         bool is_poisoned;
53         struct mutex start_mutex;
54         struct mutex nq_mutex; /* nq thread synchronization */
55         int is_starving;
56         struct most_interface *iface;
57         struct most_inst_obj *inst;
58         struct most_channel_config cfg;
59         bool keep_mbo;
60         bool enqueue_halt;
61         struct list_head fifo;
62         spinlock_t fifo_lock;
63         struct list_head halt_fifo;
64         struct list_head list;
65         struct most_c_aim_obj aim0;
66         struct most_c_aim_obj aim1;
67         struct list_head trash_fifo;
68         struct task_struct *hdm_enqueue_task;
69         wait_queue_head_t hdm_fifo_wq;
70 };
71
72 #define to_c_obj(d) container_of(d, struct most_c_obj, kobj)
73
74 struct most_inst_obj {
75         int dev_id;
76         struct most_interface *iface;
77         struct list_head channel_list;
78         struct most_c_obj *channel[MAX_CHANNELS];
79         struct kobject kobj;
80         struct list_head list;
81 };
82
83 static const struct {
84         int most_ch_data_type;
85         char *name;
86 } ch_data_type[] = {
87         { MOST_CH_CONTROL, "control\n" },
88         { MOST_CH_ASYNC, "async\n" },
89         { MOST_CH_SYNC, "sync\n" },
90         { MOST_CH_ISOC, "isoc\n"},
91         { MOST_CH_ISOC, "isoc_avp\n"},
92 };
93
94 #define to_inst_obj(d) container_of(d, struct most_inst_obj, kobj)
95
96 /**
97  * list_pop_mbo - retrieves the first MBO of the list and removes it
98  * @ptr: the list head to grab the MBO from.
99  */
100 #define list_pop_mbo(ptr)                                               \
101 ({                                                                      \
102         struct mbo *_mbo = list_first_entry(ptr, struct mbo, list);     \
103         list_del(&_mbo->list);                                          \
104         _mbo;                                                           \
105 })
106
107 /*                   ___             ___
108  *                   ___C H A N N E L___
109  */
110
111 /**
112  * struct most_c_attr - to access the attributes of a channel object
113  * @attr: attributes of a channel
114  * @show: pointer to the show function
115  * @store: pointer to the store function
116  */
117 struct most_c_attr {
118         struct attribute attr;
119         ssize_t (*show)(struct most_c_obj *d,
120                         struct most_c_attr *attr,
121                         char *buf);
122         ssize_t (*store)(struct most_c_obj *d,
123                          struct most_c_attr *attr,
124                          const char *buf,
125                          size_t count);
126 };
127
128 #define to_channel_attr(a) container_of(a, struct most_c_attr, attr)
129
130 #define MOST_CHNL_ATTR(_name, _mode, _show, _store) \
131                 struct most_c_attr most_chnl_attr_##_name = \
132                 __ATTR(_name, _mode, _show, _store)
133
134 /**
135  * channel_attr_show - show function of channel object
136  * @kobj: pointer to its kobject
137  * @attr: pointer to its attributes
138  * @buf: buffer
139  */
140 static ssize_t channel_attr_show(struct kobject *kobj, struct attribute *attr,
141                                  char *buf)
142 {
143         struct most_c_attr *channel_attr = to_channel_attr(attr);
144         struct most_c_obj *c_obj = to_c_obj(kobj);
145
146         if (!channel_attr->show)
147                 return -EIO;
148
149         return channel_attr->show(c_obj, channel_attr, buf);
150 }
151
152 /**
153  * channel_attr_store - store function of channel object
154  * @kobj: pointer to its kobject
155  * @attr: pointer to its attributes
156  * @buf: buffer
157  * @len: length of buffer
158  */
159 static ssize_t channel_attr_store(struct kobject *kobj,
160                                   struct attribute *attr,
161                                   const char *buf,
162                                   size_t len)
163 {
164         struct most_c_attr *channel_attr = to_channel_attr(attr);
165         struct most_c_obj *c_obj = to_c_obj(kobj);
166
167         if (!channel_attr->store)
168                 return -EIO;
169         return channel_attr->store(c_obj, channel_attr, buf, len);
170 }
171
172 static const struct sysfs_ops most_channel_sysfs_ops = {
173         .show = channel_attr_show,
174         .store = channel_attr_store,
175 };
176
177 /**
178  * most_free_mbo_coherent - free an MBO and its coherent buffer
179  * @mbo: buffer to be released
180  *
181  */
182 static void most_free_mbo_coherent(struct mbo *mbo)
183 {
184         struct most_c_obj *c = mbo->context;
185         u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
186
187         dma_free_coherent(NULL, coherent_buf_size, mbo->virt_address,
188                           mbo->bus_address);
189         kfree(mbo);
190         if (atomic_sub_and_test(1, &c->mbo_ref))
191                 complete(&c->cleanup);
192 }
193
194 /**
195  * flush_channel_fifos - clear the channel fifos
196  * @c: pointer to channel object
197  */
198 static void flush_channel_fifos(struct most_c_obj *c)
199 {
200         unsigned long flags, hf_flags;
201         struct mbo *mbo, *tmp;
202
203         if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
204                 return;
205
206         spin_lock_irqsave(&c->fifo_lock, flags);
207         list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
208                 list_del(&mbo->list);
209                 spin_unlock_irqrestore(&c->fifo_lock, flags);
210                 most_free_mbo_coherent(mbo);
211                 spin_lock_irqsave(&c->fifo_lock, flags);
212         }
213         spin_unlock_irqrestore(&c->fifo_lock, flags);
214
215         spin_lock_irqsave(&c->fifo_lock, hf_flags);
216         list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
217                 list_del(&mbo->list);
218                 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
219                 most_free_mbo_coherent(mbo);
220                 spin_lock_irqsave(&c->fifo_lock, hf_flags);
221         }
222         spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
223
224         if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
225                 pr_info("WARN: fifo | trash fifo not empty\n");
226 }
227
228 /**
229  * flush_trash_fifo - clear the trash fifo
230  * @c: pointer to channel object
231  */
232 static int flush_trash_fifo(struct most_c_obj *c)
233 {
234         struct mbo *mbo, *tmp;
235         unsigned long flags;
236
237         spin_lock_irqsave(&c->fifo_lock, flags);
238         list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
239                 list_del(&mbo->list);
240                 spin_unlock_irqrestore(&c->fifo_lock, flags);
241                 most_free_mbo_coherent(mbo);
242                 spin_lock_irqsave(&c->fifo_lock, flags);
243         }
244         spin_unlock_irqrestore(&c->fifo_lock, flags);
245         return 0;
246 }
247
248 /**
249  * most_channel_release - release function of channel object
250  * @kobj: pointer to channel's kobject
251  */
252 static void most_channel_release(struct kobject *kobj)
253 {
254         struct most_c_obj *c = to_c_obj(kobj);
255
256         kfree(c);
257 }
258
259 static ssize_t show_available_directions(struct most_c_obj *c,
260                                          struct most_c_attr *attr,
261                                          char *buf)
262 {
263         unsigned int i = c->channel_id;
264
265         strcpy(buf, "");
266         if (c->iface->channel_vector[i].direction & MOST_CH_RX)
267                 strcat(buf, "rx ");
268         if (c->iface->channel_vector[i].direction & MOST_CH_TX)
269                 strcat(buf, "tx ");
270         strcat(buf, "\n");
271         return strlen(buf);
272 }
273
274 static ssize_t show_available_datatypes(struct most_c_obj *c,
275                                         struct most_c_attr *attr,
276                                         char *buf)
277 {
278         unsigned int i = c->channel_id;
279
280         strcpy(buf, "");
281         if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
282                 strcat(buf, "control ");
283         if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
284                 strcat(buf, "async ");
285         if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
286                 strcat(buf, "sync ");
287         if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC)
288                 strcat(buf, "isoc ");
289         strcat(buf, "\n");
290         return strlen(buf);
291 }
292
293 static
294 ssize_t show_number_of_packet_buffers(struct most_c_obj *c,
295                                       struct most_c_attr *attr,
296                                       char *buf)
297 {
298         unsigned int i = c->channel_id;
299
300         return snprintf(buf, PAGE_SIZE, "%d\n",
301                         c->iface->channel_vector[i].num_buffers_packet);
302 }
303
304 static
305 ssize_t show_number_of_stream_buffers(struct most_c_obj *c,
306                                       struct most_c_attr *attr,
307                                       char *buf)
308 {
309         unsigned int i = c->channel_id;
310
311         return snprintf(buf, PAGE_SIZE, "%d\n",
312                         c->iface->channel_vector[i].num_buffers_streaming);
313 }
314
315 static
316 ssize_t show_size_of_packet_buffer(struct most_c_obj *c,
317                                    struct most_c_attr *attr,
318                                    char *buf)
319 {
320         unsigned int i = c->channel_id;
321
322         return snprintf(buf, PAGE_SIZE, "%d\n",
323                         c->iface->channel_vector[i].buffer_size_packet);
324 }
325
326 static
327 ssize_t show_size_of_stream_buffer(struct most_c_obj *c,
328                                    struct most_c_attr *attr,
329                                    char *buf)
330 {
331         unsigned int i = c->channel_id;
332
333         return snprintf(buf, PAGE_SIZE, "%d\n",
334                         c->iface->channel_vector[i].buffer_size_streaming);
335 }
336
337 static ssize_t show_channel_starving(struct most_c_obj *c,
338                                      struct most_c_attr *attr,
339                                      char *buf)
340 {
341         return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
342 }
343
344 #define create_show_channel_attribute(val) \
345         static MOST_CHNL_ATTR(val, 0444, show_##val, NULL)
346
347 create_show_channel_attribute(available_directions);
348 create_show_channel_attribute(available_datatypes);
349 create_show_channel_attribute(number_of_packet_buffers);
350 create_show_channel_attribute(number_of_stream_buffers);
351 create_show_channel_attribute(size_of_stream_buffer);
352 create_show_channel_attribute(size_of_packet_buffer);
353 create_show_channel_attribute(channel_starving);
354
355 static ssize_t show_set_number_of_buffers(struct most_c_obj *c,
356                                           struct most_c_attr *attr,
357                                           char *buf)
358 {
359         return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
360 }
361
362 static ssize_t store_set_number_of_buffers(struct most_c_obj *c,
363                                            struct most_c_attr *attr,
364                                            const char *buf,
365                                            size_t count)
366 {
367         int ret = kstrtou16(buf, 0, &c->cfg.num_buffers);
368
369         if (ret)
370                 return ret;
371         return count;
372 }
373
374 static ssize_t show_set_buffer_size(struct most_c_obj *c,
375                                     struct most_c_attr *attr,
376                                     char *buf)
377 {
378         return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
379 }
380
381 static ssize_t store_set_buffer_size(struct most_c_obj *c,
382                                      struct most_c_attr *attr,
383                                      const char *buf,
384                                      size_t count)
385 {
386         int ret = kstrtou16(buf, 0, &c->cfg.buffer_size);
387
388         if (ret)
389                 return ret;
390         return count;
391 }
392
393 static ssize_t show_set_direction(struct most_c_obj *c,
394                                   struct most_c_attr *attr,
395                                   char *buf)
396 {
397         if (c->cfg.direction & MOST_CH_TX)
398                 return snprintf(buf, PAGE_SIZE, "tx\n");
399         else if (c->cfg.direction & MOST_CH_RX)
400                 return snprintf(buf, PAGE_SIZE, "rx\n");
401         return snprintf(buf, PAGE_SIZE, "unconfigured\n");
402 }
403
404 static ssize_t store_set_direction(struct most_c_obj *c,
405                                    struct most_c_attr *attr,
406                                    const char *buf,
407                                    size_t count)
408 {
409         if (!strcmp(buf, "dir_rx\n")) {
410                 c->cfg.direction = MOST_CH_RX;
411         } else if (!strcmp(buf, "rx\n")) {
412                 c->cfg.direction = MOST_CH_RX;
413         } else if (!strcmp(buf, "dir_tx\n")) {
414                 c->cfg.direction = MOST_CH_TX;
415         } else if (!strcmp(buf, "tx\n")) {
416                 c->cfg.direction = MOST_CH_TX;
417         } else {
418                 pr_info("WARN: invalid attribute settings\n");
419                 return -EINVAL;
420         }
421         return count;
422 }
423
424 static ssize_t show_set_datatype(struct most_c_obj *c,
425                                  struct most_c_attr *attr,
426                                  char *buf)
427 {
428         int i;
429
430         for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
431                 if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
432                         return snprintf(buf, PAGE_SIZE, ch_data_type[i].name);
433         }
434         return snprintf(buf, PAGE_SIZE, "unconfigured\n");
435 }
436
437 static ssize_t store_set_datatype(struct most_c_obj *c,
438                                   struct most_c_attr *attr,
439                                   const char *buf,
440                                   size_t count)
441 {
442         int i;
443
444         for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
445                 if (!strcmp(buf, ch_data_type[i].name)) {
446                         c->cfg.data_type = ch_data_type[i].most_ch_data_type;
447                         break;
448                 }
449         }
450
451         if (i == ARRAY_SIZE(ch_data_type)) {
452                 pr_info("WARN: invalid attribute settings\n");
453                 return -EINVAL;
454         }
455         return count;
456 }
457
458 static ssize_t show_set_subbuffer_size(struct most_c_obj *c,
459                                        struct most_c_attr *attr,
460                                        char *buf)
461 {
462         return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
463 }
464
465 static ssize_t store_set_subbuffer_size(struct most_c_obj *c,
466                                         struct most_c_attr *attr,
467                                         const char *buf,
468                                         size_t count)
469 {
470         int ret = kstrtou16(buf, 0, &c->cfg.subbuffer_size);
471
472         if (ret)
473                 return ret;
474         return count;
475 }
476
477 static ssize_t show_set_packets_per_xact(struct most_c_obj *c,
478                                          struct most_c_attr *attr,
479                                          char *buf)
480 {
481         return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
482 }
483
484 static ssize_t store_set_packets_per_xact(struct most_c_obj *c,
485                                           struct most_c_attr *attr,
486                                           const char *buf,
487                                           size_t count)
488 {
489         int ret = kstrtou16(buf, 0, &c->cfg.packets_per_xact);
490
491         if (ret)
492                 return ret;
493         return count;
494 }
495
496 #define create_channel_attribute(value) \
497         static MOST_CHNL_ATTR(value, 0644, show_##value, store_##value)
498
499 create_channel_attribute(set_buffer_size);
500 create_channel_attribute(set_number_of_buffers);
501 create_channel_attribute(set_direction);
502 create_channel_attribute(set_datatype);
503 create_channel_attribute(set_subbuffer_size);
504 create_channel_attribute(set_packets_per_xact);
505
506 /**
507  * most_channel_def_attrs - array of default attributes of channel object
508  */
509 static struct attribute *most_channel_def_attrs[] = {
510         &most_chnl_attr_available_directions.attr,
511         &most_chnl_attr_available_datatypes.attr,
512         &most_chnl_attr_number_of_packet_buffers.attr,
513         &most_chnl_attr_number_of_stream_buffers.attr,
514         &most_chnl_attr_size_of_packet_buffer.attr,
515         &most_chnl_attr_size_of_stream_buffer.attr,
516         &most_chnl_attr_set_number_of_buffers.attr,
517         &most_chnl_attr_set_buffer_size.attr,
518         &most_chnl_attr_set_direction.attr,
519         &most_chnl_attr_set_datatype.attr,
520         &most_chnl_attr_set_subbuffer_size.attr,
521         &most_chnl_attr_set_packets_per_xact.attr,
522         &most_chnl_attr_channel_starving.attr,
523         NULL,
524 };
525
526 static struct kobj_type most_channel_ktype = {
527         .sysfs_ops = &most_channel_sysfs_ops,
528         .release = most_channel_release,
529         .default_attrs = most_channel_def_attrs,
530 };
531
532 static struct kset *most_channel_kset;
533
534 /**
535  * create_most_c_obj - allocates a channel object
536  * @name: name of the channel object
537  * @parent: parent kobject
538  *
539  * This create a channel object and registers it with sysfs.
540  * Returns a pointer to the object or NULL when something went wrong.
541  */
542 static struct most_c_obj *
543 create_most_c_obj(const char *name, struct kobject *parent)
544 {
545         struct most_c_obj *c;
546         int retval;
547
548         c = kzalloc(sizeof(*c), GFP_KERNEL);
549         if (!c)
550                 return NULL;
551         c->kobj.kset = most_channel_kset;
552         retval = kobject_init_and_add(&c->kobj, &most_channel_ktype, parent,
553                                       "%s", name);
554         if (retval) {
555                 kobject_put(&c->kobj);
556                 return NULL;
557         }
558         kobject_uevent(&c->kobj, KOBJ_ADD);
559         return c;
560 }
561
562 /*                   ___               ___
563  *                   ___I N S T A N C E___
564  */
565 #define MOST_INST_ATTR(_name, _mode, _show, _store) \
566                 struct most_inst_attribute most_inst_attr_##_name = \
567                 __ATTR(_name, _mode, _show, _store)
568
569 static struct list_head instance_list;
570
571 /**
572  * struct most_inst_attribute - to access the attributes of instance object
573  * @attr: attributes of an instance
574  * @show: pointer to the show function
575  * @store: pointer to the store function
576  */
577 struct most_inst_attribute {
578         struct attribute attr;
579         ssize_t (*show)(struct most_inst_obj *d,
580                         struct most_inst_attribute *attr,
581                         char *buf);
582         ssize_t (*store)(struct most_inst_obj *d,
583                          struct most_inst_attribute *attr,
584                          const char *buf,
585                          size_t count);
586 };
587
588 #define to_instance_attr(a) \
589         container_of(a, struct most_inst_attribute, attr)
590
591 /**
592  * instance_attr_show - show function for an instance object
593  * @kobj: pointer to kobject
594  * @attr: pointer to attribute struct
595  * @buf: buffer
596  */
597 static ssize_t instance_attr_show(struct kobject *kobj,
598                                   struct attribute *attr,
599                                   char *buf)
600 {
601         struct most_inst_attribute *instance_attr;
602         struct most_inst_obj *instance_obj;
603
604         instance_attr = to_instance_attr(attr);
605         instance_obj = to_inst_obj(kobj);
606
607         if (!instance_attr->show)
608                 return -EIO;
609
610         return instance_attr->show(instance_obj, instance_attr, buf);
611 }
612
613 /**
614  * instance_attr_store - store function for an instance object
615  * @kobj: pointer to kobject
616  * @attr: pointer to attribute struct
617  * @buf: buffer
618  * @len: length of buffer
619  */
620 static ssize_t instance_attr_store(struct kobject *kobj,
621                                    struct attribute *attr,
622                                    const char *buf,
623                                    size_t len)
624 {
625         struct most_inst_attribute *instance_attr;
626         struct most_inst_obj *instance_obj;
627
628         instance_attr = to_instance_attr(attr);
629         instance_obj = to_inst_obj(kobj);
630
631         if (!instance_attr->store)
632                 return -EIO;
633
634         return instance_attr->store(instance_obj, instance_attr, buf, len);
635 }
636
637 static const struct sysfs_ops most_inst_sysfs_ops = {
638         .show = instance_attr_show,
639         .store = instance_attr_store,
640 };
641
642 /**
643  * most_inst_release - release function for instance object
644  * @kobj: pointer to instance's kobject
645  *
646  * This frees the allocated memory for the instance object
647  */
648 static void most_inst_release(struct kobject *kobj)
649 {
650         struct most_inst_obj *inst = to_inst_obj(kobj);
651
652         kfree(inst);
653 }
654
655 static ssize_t show_description(struct most_inst_obj *instance_obj,
656                                 struct most_inst_attribute *attr,
657                                 char *buf)
658 {
659         return snprintf(buf, PAGE_SIZE, "%s\n",
660                         instance_obj->iface->description);
661 }
662
663 static ssize_t show_interface(struct most_inst_obj *instance_obj,
664                               struct most_inst_attribute *attr,
665                               char *buf)
666 {
667         switch (instance_obj->iface->interface) {
668         case ITYPE_LOOPBACK:
669                 return snprintf(buf, PAGE_SIZE, "loopback\n");
670         case ITYPE_I2C:
671                 return snprintf(buf, PAGE_SIZE, "i2c\n");
672         case ITYPE_I2S:
673                 return snprintf(buf, PAGE_SIZE, "i2s\n");
674         case ITYPE_TSI:
675                 return snprintf(buf, PAGE_SIZE, "tsi\n");
676         case ITYPE_HBI:
677                 return snprintf(buf, PAGE_SIZE, "hbi\n");
678         case ITYPE_MEDIALB_DIM:
679                 return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
680         case ITYPE_MEDIALB_DIM2:
681                 return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
682         case ITYPE_USB:
683                 return snprintf(buf, PAGE_SIZE, "usb\n");
684         case ITYPE_PCIE:
685                 return snprintf(buf, PAGE_SIZE, "pcie\n");
686         }
687         return snprintf(buf, PAGE_SIZE, "unknown\n");
688 }
689
690 #define create_inst_attribute(value) \
691         static MOST_INST_ATTR(value, 0444, show_##value, NULL)
692
693 create_inst_attribute(description);
694 create_inst_attribute(interface);
695
696 static struct attribute *most_inst_def_attrs[] = {
697         &most_inst_attr_description.attr,
698         &most_inst_attr_interface.attr,
699         NULL,
700 };
701
702 static struct kobj_type most_inst_ktype = {
703         .sysfs_ops = &most_inst_sysfs_ops,
704         .release = most_inst_release,
705         .default_attrs = most_inst_def_attrs,
706 };
707
708 static struct kset *most_inst_kset;
709
710 /**
711  * create_most_inst_obj - creates an instance object
712  * @name: name of the object to be created
713  *
714  * This allocates memory for an instance structure, assigns the proper kset
715  * and registers it with sysfs.
716  *
717  * Returns a pointer to the instance object or NULL when something went wrong.
718  */
719 static struct most_inst_obj *create_most_inst_obj(const char *name)
720 {
721         struct most_inst_obj *inst;
722         int retval;
723
724         inst = kzalloc(sizeof(*inst), GFP_KERNEL);
725         if (!inst)
726                 return NULL;
727         inst->kobj.kset = most_inst_kset;
728         retval = kobject_init_and_add(&inst->kobj, &most_inst_ktype, NULL,
729                                       "%s", name);
730         if (retval) {
731                 kobject_put(&inst->kobj);
732                 return NULL;
733         }
734         kobject_uevent(&inst->kobj, KOBJ_ADD);
735         return inst;
736 }
737
738 /**
739  * destroy_most_inst_obj - MOST instance release function
740  * @inst: pointer to the instance object
741  *
742  * This decrements the reference counter of the instance object.
743  * If the reference count turns zero, its release function is called
744  */
745 static void destroy_most_inst_obj(struct most_inst_obj *inst)
746 {
747         struct most_c_obj *c, *tmp;
748
749         list_for_each_entry_safe(c, tmp, &inst->channel_list, list) {
750                 flush_trash_fifo(c);
751                 flush_channel_fifos(c);
752                 kobject_put(&c->kobj);
753         }
754         kobject_put(&inst->kobj);
755 }
756
757 /*                   ___     ___
758  *                   ___A I M___
759  */
760 struct most_aim_obj {
761         struct kobject kobj;
762         struct list_head list;
763         struct most_aim *driver;
764 };
765
766 #define to_aim_obj(d) container_of(d, struct most_aim_obj, kobj)
767
768 static struct list_head aim_list;
769
770 /**
771  * struct most_aim_attribute - to access the attributes of AIM object
772  * @attr: attributes of an AIM
773  * @show: pointer to the show function
774  * @store: pointer to the store function
775  */
776 struct most_aim_attribute {
777         struct attribute attr;
778         ssize_t (*show)(struct most_aim_obj *d,
779                         struct most_aim_attribute *attr,
780                         char *buf);
781         ssize_t (*store)(struct most_aim_obj *d,
782                          struct most_aim_attribute *attr,
783                          const char *buf,
784                          size_t count);
785 };
786
787 #define to_aim_attr(a) container_of(a, struct most_aim_attribute, attr)
788
789 /**
790  * aim_attr_show - show function of an AIM object
791  * @kobj: pointer to kobject
792  * @attr: pointer to attribute struct
793  * @buf: buffer
794  */
795 static ssize_t aim_attr_show(struct kobject *kobj,
796                              struct attribute *attr,
797                              char *buf)
798 {
799         struct most_aim_attribute *aim_attr;
800         struct most_aim_obj *aim_obj;
801
802         aim_attr = to_aim_attr(attr);
803         aim_obj = to_aim_obj(kobj);
804
805         if (!aim_attr->show)
806                 return -EIO;
807
808         return aim_attr->show(aim_obj, aim_attr, buf);
809 }
810
811 /**
812  * aim_attr_store - store function of an AIM object
813  * @kobj: pointer to kobject
814  * @attr: pointer to attribute struct
815  * @buf: buffer
816  * @len: length of buffer
817  */
818 static ssize_t aim_attr_store(struct kobject *kobj,
819                               struct attribute *attr,
820                               const char *buf,
821                               size_t len)
822 {
823         struct most_aim_attribute *aim_attr;
824         struct most_aim_obj *aim_obj;
825
826         aim_attr = to_aim_attr(attr);
827         aim_obj = to_aim_obj(kobj);
828
829         if (!aim_attr->store)
830                 return -EIO;
831         return aim_attr->store(aim_obj, aim_attr, buf, len);
832 }
833
834 static const struct sysfs_ops most_aim_sysfs_ops = {
835         .show = aim_attr_show,
836         .store = aim_attr_store,
837 };
838
839 /**
840  * most_aim_release - AIM release function
841  * @kobj: pointer to AIM's kobject
842  */
843 static void most_aim_release(struct kobject *kobj)
844 {
845         struct most_aim_obj *aim_obj = to_aim_obj(kobj);
846
847         kfree(aim_obj);
848 }
849
850 static ssize_t add_link_show(struct most_aim_obj *aim_obj,
851                              struct most_aim_attribute *attr,
852                              char *buf)
853 {
854         struct most_c_obj *c;
855         struct most_inst_obj *i;
856         int offs = 0;
857
858         list_for_each_entry(i, &instance_list, list) {
859                 list_for_each_entry(c, &i->channel_list, list) {
860                         if (c->aim0.ptr == aim_obj->driver ||
861                             c->aim1.ptr == aim_obj->driver) {
862                                 offs += snprintf(buf + offs, PAGE_SIZE - offs,
863                                                  "%s:%s\n",
864                                                  kobject_name(&i->kobj),
865                                                  kobject_name(&c->kobj));
866                         }
867                 }
868         }
869
870         return offs;
871 }
872
873 /**
874  * split_string - parses and changes string in the buffer buf and
875  * splits it into two mandatory and one optional substrings.
876  *
877  * @buf: complete string from attribute 'add_channel'
878  * @a: address of pointer to 1st substring (=instance name)
879  * @b: address of pointer to 2nd substring (=channel name)
880  * @c: optional address of pointer to 3rd substring (=user defined name)
881  *
882  * Examples:
883  *
884  * Input: "mdev0:ch6:my_channel\n" or
885  *        "mdev0:ch6:my_channel"
886  *
887  * Output: *a -> "mdev0", *b -> "ch6", *c -> "my_channel"
888  *
889  * Input: "mdev1:ep81\n"
890  * Output: *a -> "mdev1", *b -> "ep81", *c -> ""
891  *
892  * Input: "mdev1:ep81"
893  * Output: *a -> "mdev1", *b -> "ep81", *c == NULL
894  */
895 static int split_string(char *buf, char **a, char **b, char **c)
896 {
897         *a = strsep(&buf, ":");
898         if (!*a)
899                 return -EIO;
900
901         *b = strsep(&buf, ":\n");
902         if (!*b)
903                 return -EIO;
904
905         if (c)
906                 *c = strsep(&buf, ":\n");
907
908         return 0;
909 }
910
911 /**
912  * get_channel_by_name - get pointer to channel object
913  * @mdev: name of the device instance
914  * @mdev_ch: name of the respective channel
915  *
916  * This retrieves the pointer to a channel object.
917  */
918 static struct
919 most_c_obj *get_channel_by_name(char *mdev, char *mdev_ch)
920 {
921         struct most_c_obj *c, *tmp;
922         struct most_inst_obj *i, *i_tmp;
923         int found = 0;
924
925         list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
926                 if (!strcmp(kobject_name(&i->kobj), mdev)) {
927                         found++;
928                         break;
929                 }
930         }
931         if (unlikely(!found))
932                 return ERR_PTR(-EIO);
933
934         list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
935                 if (!strcmp(kobject_name(&c->kobj), mdev_ch)) {
936                         found++;
937                         break;
938                 }
939         }
940         if (unlikely(found < 2))
941                 return ERR_PTR(-EIO);
942         return c;
943 }
944
945 /**
946  * store_add_link - store() function for add_link attribute
947  * @aim_obj: pointer to AIM object
948  * @attr: its attributes
949  * @buf: buffer
950  * @len: buffer length
951  *
952  * This parses the string given by buf and splits it into
953  * three substrings. Note: third substring is optional. In case a cdev
954  * AIM is loaded the optional 3rd substring will make up the name of
955  * device node in the /dev directory. If omitted, the device node will
956  * inherit the channel's name within sysfs.
957  *
958  * Searches for a pair of device and channel and probes the AIM
959  *
960  * Example:
961  * (1) echo "mdev0:ch6:my_rxchannel" >add_link
962  * (2) echo "mdev1:ep81" >add_link
963  *
964  * (1) would create the device node /dev/my_rxchannel
965  * (2) would create the device node /dev/mdev1-ep81
966  */
967 static ssize_t add_link_store(struct most_aim_obj *aim_obj,
968                               struct most_aim_attribute *attr,
969                               const char *buf,
970                               size_t len)
971 {
972         struct most_c_obj *c;
973         struct most_aim **aim_ptr;
974         char buffer[STRING_SIZE];
975         char *mdev;
976         char *mdev_ch;
977         char *mdev_devnod;
978         char devnod_buf[STRING_SIZE];
979         int ret;
980         size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
981
982         strlcpy(buffer, buf, max_len);
983
984         ret = split_string(buffer, &mdev, &mdev_ch, &mdev_devnod);
985         if (ret)
986                 return ret;
987
988         if (!mdev_devnod || *mdev_devnod == 0) {
989                 snprintf(devnod_buf, sizeof(devnod_buf), "%s-%s", mdev,
990                          mdev_ch);
991                 mdev_devnod = devnod_buf;
992         }
993
994         c = get_channel_by_name(mdev, mdev_ch);
995         if (IS_ERR(c))
996                 return -ENODEV;
997
998         if (!c->aim0.ptr)
999                 aim_ptr = &c->aim0.ptr;
1000         else if (!c->aim1.ptr)
1001                 aim_ptr = &c->aim1.ptr;
1002         else
1003                 return -ENOSPC;
1004
1005         *aim_ptr = aim_obj->driver;
1006         ret = aim_obj->driver->probe_channel(c->iface, c->channel_id,
1007                                              &c->cfg, &c->kobj, mdev_devnod);
1008         if (ret) {
1009                 *aim_ptr = NULL;
1010                 return ret;
1011         }
1012
1013         return len;
1014 }
1015
1016 static struct most_aim_attribute most_aim_attr_add_link =
1017         __ATTR_RW(add_link);
1018
1019 /**
1020  * store_remove_link - store function for remove_link attribute
1021  * @aim_obj: pointer to AIM object
1022  * @attr: its attributes
1023  * @buf: buffer
1024  * @len: buffer length
1025  *
1026  * Example:
1027  * echo "mdev0:ep81" >remove_link
1028  */
1029 static ssize_t remove_link_store(struct most_aim_obj *aim_obj,
1030                                  struct most_aim_attribute *attr,
1031                                  const char *buf,
1032                                  size_t len)
1033 {
1034         struct most_c_obj *c;
1035         char buffer[STRING_SIZE];
1036         char *mdev;
1037         char *mdev_ch;
1038         int ret;
1039         size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
1040
1041         strlcpy(buffer, buf, max_len);
1042         ret = split_string(buffer, &mdev, &mdev_ch, NULL);
1043         if (ret)
1044                 return ret;
1045
1046         c = get_channel_by_name(mdev, mdev_ch);
1047         if (IS_ERR(c))
1048                 return -ENODEV;
1049
1050         if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id))
1051                 return -EIO;
1052         if (c->aim0.ptr == aim_obj->driver)
1053                 c->aim0.ptr = NULL;
1054         if (c->aim1.ptr == aim_obj->driver)
1055                 c->aim1.ptr = NULL;
1056         return len;
1057 }
1058
1059 static struct most_aim_attribute most_aim_attr_remove_link =
1060         __ATTR_WO(remove_link);
1061
1062 static struct attribute *most_aim_def_attrs[] = {
1063         &most_aim_attr_add_link.attr,
1064         &most_aim_attr_remove_link.attr,
1065         NULL,
1066 };
1067
1068 static struct kobj_type most_aim_ktype = {
1069         .sysfs_ops = &most_aim_sysfs_ops,
1070         .release = most_aim_release,
1071         .default_attrs = most_aim_def_attrs,
1072 };
1073
1074 static struct kset *most_aim_kset;
1075
1076 /**
1077  * create_most_aim_obj - creates an AIM object
1078  * @name: name of the AIM
1079  *
1080  * This creates an AIM object assigns the proper kset and registers
1081  * it with sysfs.
1082  * Returns a pointer to the object or NULL if something went wrong.
1083  */
1084 static struct most_aim_obj *create_most_aim_obj(const char *name)
1085 {
1086         struct most_aim_obj *most_aim;
1087         int retval;
1088
1089         most_aim = kzalloc(sizeof(*most_aim), GFP_KERNEL);
1090         if (!most_aim)
1091                 return NULL;
1092         most_aim->kobj.kset = most_aim_kset;
1093         retval = kobject_init_and_add(&most_aim->kobj, &most_aim_ktype,
1094                                       NULL, "%s", name);
1095         if (retval) {
1096                 kobject_put(&most_aim->kobj);
1097                 return NULL;
1098         }
1099         kobject_uevent(&most_aim->kobj, KOBJ_ADD);
1100         return most_aim;
1101 }
1102
1103 /**
1104  * destroy_most_aim_obj - AIM release function
1105  * @p: pointer to AIM object
1106  *
1107  * This decrements the reference counter of the AIM object. If the
1108  * reference count turns zero, its release function will be called.
1109  */
1110 static void destroy_most_aim_obj(struct most_aim_obj *p)
1111 {
1112         kobject_put(&p->kobj);
1113 }
1114
1115 /*                   ___       ___
1116  *                   ___C O R E___
1117  */
1118
1119 /**
1120  * Instantiation of the MOST bus
1121  */
1122 static struct bus_type most_bus = {
1123         .name = "most",
1124 };
1125
1126 /**
1127  * Instantiation of the core driver
1128  */
1129 static struct device_driver mostcore = {
1130         .name = "mostcore",
1131         .bus = &most_bus,
1132 };
1133
1134 static inline void trash_mbo(struct mbo *mbo)
1135 {
1136         unsigned long flags;
1137         struct most_c_obj *c = mbo->context;
1138
1139         spin_lock_irqsave(&c->fifo_lock, flags);
1140         list_add(&mbo->list, &c->trash_fifo);
1141         spin_unlock_irqrestore(&c->fifo_lock, flags);
1142 }
1143
1144 static bool hdm_mbo_ready(struct most_c_obj *c)
1145 {
1146         bool empty;
1147
1148         if (c->enqueue_halt)
1149                 return false;
1150
1151         spin_lock_irq(&c->fifo_lock);
1152         empty = list_empty(&c->halt_fifo);
1153         spin_unlock_irq(&c->fifo_lock);
1154
1155         return !empty;
1156 }
1157
1158 static void nq_hdm_mbo(struct mbo *mbo)
1159 {
1160         unsigned long flags;
1161         struct most_c_obj *c = mbo->context;
1162
1163         spin_lock_irqsave(&c->fifo_lock, flags);
1164         list_add_tail(&mbo->list, &c->halt_fifo);
1165         spin_unlock_irqrestore(&c->fifo_lock, flags);
1166         wake_up_interruptible(&c->hdm_fifo_wq);
1167 }
1168
1169 static int hdm_enqueue_thread(void *data)
1170 {
1171         struct most_c_obj *c = data;
1172         struct mbo *mbo;
1173         int ret;
1174         typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
1175
1176         while (likely(!kthread_should_stop())) {
1177                 wait_event_interruptible(c->hdm_fifo_wq,
1178                                          hdm_mbo_ready(c) ||
1179                                          kthread_should_stop());
1180
1181                 mutex_lock(&c->nq_mutex);
1182                 spin_lock_irq(&c->fifo_lock);
1183                 if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) {
1184                         spin_unlock_irq(&c->fifo_lock);
1185                         mutex_unlock(&c->nq_mutex);
1186                         continue;
1187                 }
1188
1189                 mbo = list_pop_mbo(&c->halt_fifo);
1190                 spin_unlock_irq(&c->fifo_lock);
1191
1192                 if (c->cfg.direction == MOST_CH_RX)
1193                         mbo->buffer_length = c->cfg.buffer_size;
1194
1195                 ret = enqueue(mbo->ifp, mbo->hdm_channel_id, mbo);
1196                 mutex_unlock(&c->nq_mutex);
1197
1198                 if (unlikely(ret)) {
1199                         pr_err("hdm enqueue failed\n");
1200                         nq_hdm_mbo(mbo);
1201                         c->hdm_enqueue_task = NULL;
1202                         return 0;
1203                 }
1204         }
1205
1206         return 0;
1207 }
1208
1209 static int run_enqueue_thread(struct most_c_obj *c, int channel_id)
1210 {
1211         struct task_struct *task =
1212                 kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d",
1213                             channel_id);
1214
1215         if (IS_ERR(task))
1216                 return PTR_ERR(task);
1217
1218         c->hdm_enqueue_task = task;
1219         return 0;
1220 }
1221
1222 /**
1223  * arm_mbo - recycle MBO for further usage
1224  * @mbo: buffer object
1225  *
1226  * This puts an MBO back to the list to have it ready for up coming
1227  * tx transactions.
1228  *
1229  * In case the MBO belongs to a channel that recently has been
1230  * poisoned, the MBO is scheduled to be trashed.
1231  * Calls the completion handler of an attached AIM.
1232  */
1233 static void arm_mbo(struct mbo *mbo)
1234 {
1235         unsigned long flags;
1236         struct most_c_obj *c;
1237
1238         BUG_ON((!mbo) || (!mbo->context));
1239         c = mbo->context;
1240
1241         if (c->is_poisoned) {
1242                 trash_mbo(mbo);
1243                 return;
1244         }
1245
1246         spin_lock_irqsave(&c->fifo_lock, flags);
1247         ++*mbo->num_buffers_ptr;
1248         list_add_tail(&mbo->list, &c->fifo);
1249         spin_unlock_irqrestore(&c->fifo_lock, flags);
1250
1251         if (c->aim0.refs && c->aim0.ptr->tx_completion)
1252                 c->aim0.ptr->tx_completion(c->iface, c->channel_id);
1253
1254         if (c->aim1.refs && c->aim1.ptr->tx_completion)
1255                 c->aim1.ptr->tx_completion(c->iface, c->channel_id);
1256 }
1257
1258 /**
1259  * arm_mbo_chain - helper function that arms an MBO chain for the HDM
1260  * @c: pointer to interface channel
1261  * @dir: direction of the channel
1262  * @compl: pointer to completion function
1263  *
1264  * This allocates buffer objects including the containing DMA coherent
1265  * buffer and puts them in the fifo.
1266  * Buffers of Rx channels are put in the kthread fifo, hence immediately
1267  * submitted to the HDM.
1268  *
1269  * Returns the number of allocated and enqueued MBOs.
1270  */
1271 static int arm_mbo_chain(struct most_c_obj *c, int dir,
1272                          void (*compl)(struct mbo *))
1273 {
1274         unsigned int i;
1275         int retval;
1276         struct mbo *mbo;
1277         u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
1278
1279         atomic_set(&c->mbo_nq_level, 0);
1280
1281         for (i = 0; i < c->cfg.num_buffers; i++) {
1282                 mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
1283                 if (!mbo) {
1284                         retval = i;
1285                         goto _exit;
1286                 }
1287                 mbo->context = c;
1288                 mbo->ifp = c->iface;
1289                 mbo->hdm_channel_id = c->channel_id;
1290                 mbo->virt_address = dma_alloc_coherent(NULL,
1291                                                        coherent_buf_size,
1292                                                        &mbo->bus_address,
1293                                                        GFP_KERNEL);
1294                 if (!mbo->virt_address) {
1295                         pr_info("WARN: No DMA coherent buffer.\n");
1296                         retval = i;
1297                         goto _error1;
1298                 }
1299                 mbo->complete = compl;
1300                 mbo->num_buffers_ptr = &dummy_num_buffers;
1301                 if (dir == MOST_CH_RX) {
1302                         nq_hdm_mbo(mbo);
1303                         atomic_inc(&c->mbo_nq_level);
1304                 } else {
1305                         arm_mbo(mbo);
1306                 }
1307         }
1308         return i;
1309
1310 _error1:
1311         kfree(mbo);
1312 _exit:
1313         return retval;
1314 }
1315
1316 /**
1317  * most_submit_mbo - submits an MBO to fifo
1318  * @mbo: pointer to the MBO
1319  */
1320 void most_submit_mbo(struct mbo *mbo)
1321 {
1322         if (WARN_ONCE(!mbo || !mbo->context,
1323                       "bad mbo or missing channel reference\n"))
1324                 return;
1325
1326         nq_hdm_mbo(mbo);
1327 }
1328 EXPORT_SYMBOL_GPL(most_submit_mbo);
1329
1330 /**
1331  * most_write_completion - write completion handler
1332  * @mbo: pointer to MBO
1333  *
1334  * This recycles the MBO for further usage. In case the channel has been
1335  * poisoned, the MBO is scheduled to be trashed.
1336  */
1337 static void most_write_completion(struct mbo *mbo)
1338 {
1339         struct most_c_obj *c;
1340
1341         BUG_ON((!mbo) || (!mbo->context));
1342
1343         c = mbo->context;
1344         if (mbo->status == MBO_E_INVAL)
1345                 pr_info("WARN: Tx MBO status: invalid\n");
1346         if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
1347                 trash_mbo(mbo);
1348         else
1349                 arm_mbo(mbo);
1350 }
1351
1352 /**
1353  * get_channel_by_iface - get pointer to channel object
1354  * @iface: pointer to interface instance
1355  * @id: channel ID
1356  *
1357  * This retrieves a pointer to a channel of the given interface and channel ID.
1358  */
1359 static struct
1360 most_c_obj *get_channel_by_iface(struct most_interface *iface, int id)
1361 {
1362         struct most_inst_obj *i;
1363
1364         if (unlikely(!iface)) {
1365                 pr_err("Bad interface\n");
1366                 return NULL;
1367         }
1368         if (unlikely((id < 0) || (id >= iface->num_channels))) {
1369                 pr_err("Channel index (%d) out of range\n", id);
1370                 return NULL;
1371         }
1372         i = iface->priv;
1373         if (unlikely(!i)) {
1374                 pr_err("interface is not registered\n");
1375                 return NULL;
1376         }
1377         return i->channel[id];
1378 }
1379
1380 int channel_has_mbo(struct most_interface *iface, int id, struct most_aim *aim)
1381 {
1382         struct most_c_obj *c = get_channel_by_iface(iface, id);
1383         unsigned long flags;
1384         int empty;
1385
1386         if (unlikely(!c))
1387                 return -EINVAL;
1388
1389         if (c->aim0.refs && c->aim1.refs &&
1390             ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
1391              (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
1392                 return 0;
1393
1394         spin_lock_irqsave(&c->fifo_lock, flags);
1395         empty = list_empty(&c->fifo);
1396         spin_unlock_irqrestore(&c->fifo_lock, flags);
1397         return !empty;
1398 }
1399 EXPORT_SYMBOL_GPL(channel_has_mbo);
1400
1401 /**
1402  * most_get_mbo - get pointer to an MBO of pool
1403  * @iface: pointer to interface instance
1404  * @id: channel ID
1405  *
1406  * This attempts to get a free buffer out of the channel fifo.
1407  * Returns a pointer to MBO on success or NULL otherwise.
1408  */
1409 struct mbo *most_get_mbo(struct most_interface *iface, int id,
1410                          struct most_aim *aim)
1411 {
1412         struct mbo *mbo;
1413         struct most_c_obj *c;
1414         unsigned long flags;
1415         int *num_buffers_ptr;
1416
1417         c = get_channel_by_iface(iface, id);
1418         if (unlikely(!c))
1419                 return NULL;
1420
1421         if (c->aim0.refs && c->aim1.refs &&
1422             ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
1423              (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
1424                 return NULL;
1425
1426         if (aim == c->aim0.ptr)
1427                 num_buffers_ptr = &c->aim0.num_buffers;
1428         else if (aim == c->aim1.ptr)
1429                 num_buffers_ptr = &c->aim1.num_buffers;
1430         else
1431                 num_buffers_ptr = &dummy_num_buffers;
1432
1433         spin_lock_irqsave(&c->fifo_lock, flags);
1434         if (list_empty(&c->fifo)) {
1435                 spin_unlock_irqrestore(&c->fifo_lock, flags);
1436                 return NULL;
1437         }
1438         mbo = list_pop_mbo(&c->fifo);
1439         --*num_buffers_ptr;
1440         spin_unlock_irqrestore(&c->fifo_lock, flags);
1441
1442         mbo->num_buffers_ptr = num_buffers_ptr;
1443         mbo->buffer_length = c->cfg.buffer_size;
1444         return mbo;
1445 }
1446 EXPORT_SYMBOL_GPL(most_get_mbo);
1447
1448 /**
1449  * most_put_mbo - return buffer to pool
1450  * @mbo: buffer object
1451  */
1452 void most_put_mbo(struct mbo *mbo)
1453 {
1454         struct most_c_obj *c = mbo->context;
1455
1456         if (c->cfg.direction == MOST_CH_TX) {
1457                 arm_mbo(mbo);
1458                 return;
1459         }
1460         nq_hdm_mbo(mbo);
1461         atomic_inc(&c->mbo_nq_level);
1462 }
1463 EXPORT_SYMBOL_GPL(most_put_mbo);
1464
1465 /**
1466  * most_read_completion - read completion handler
1467  * @mbo: pointer to MBO
1468  *
1469  * This function is called by the HDM when data has been received from the
1470  * hardware and copied to the buffer of the MBO.
1471  *
1472  * In case the channel has been poisoned it puts the buffer in the trash queue.
1473  * Otherwise, it passes the buffer to an AIM for further processing.
1474  */
1475 static void most_read_completion(struct mbo *mbo)
1476 {
1477         struct most_c_obj *c = mbo->context;
1478
1479         if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
1480                 trash_mbo(mbo);
1481                 return;
1482         }
1483
1484         if (mbo->status == MBO_E_INVAL) {
1485                 nq_hdm_mbo(mbo);
1486                 atomic_inc(&c->mbo_nq_level);
1487                 return;
1488         }
1489
1490         if (atomic_sub_and_test(1, &c->mbo_nq_level))
1491                 c->is_starving = 1;
1492
1493         if (c->aim0.refs && c->aim0.ptr->rx_completion &&
1494             c->aim0.ptr->rx_completion(mbo) == 0)
1495                 return;
1496
1497         if (c->aim1.refs && c->aim1.ptr->rx_completion &&
1498             c->aim1.ptr->rx_completion(mbo) == 0)
1499                 return;
1500
1501         most_put_mbo(mbo);
1502 }
1503
1504 /**
1505  * most_start_channel - prepares a channel for communication
1506  * @iface: pointer to interface instance
1507  * @id: channel ID
1508  *
1509  * This prepares the channel for usage. Cross-checks whether the
1510  * channel's been properly configured.
1511  *
1512  * Returns 0 on success or error code otherwise.
1513  */
1514 int most_start_channel(struct most_interface *iface, int id,
1515                        struct most_aim *aim)
1516 {
1517         int num_buffer;
1518         int ret;
1519         struct most_c_obj *c = get_channel_by_iface(iface, id);
1520
1521         if (unlikely(!c))
1522                 return -EINVAL;
1523
1524         mutex_lock(&c->start_mutex);
1525         if (c->aim0.refs + c->aim1.refs > 0)
1526                 goto out; /* already started by other aim */
1527
1528         if (!try_module_get(iface->mod)) {
1529                 pr_info("failed to acquire HDM lock\n");
1530                 mutex_unlock(&c->start_mutex);
1531                 return -ENOLCK;
1532         }
1533
1534         c->cfg.extra_len = 0;
1535         if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
1536                 pr_info("channel configuration failed. Go check settings...\n");
1537                 ret = -EINVAL;
1538                 goto error;
1539         }
1540
1541         init_waitqueue_head(&c->hdm_fifo_wq);
1542
1543         if (c->cfg.direction == MOST_CH_RX)
1544                 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1545                                            most_read_completion);
1546         else
1547                 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1548                                            most_write_completion);
1549         if (unlikely(!num_buffer)) {
1550                 pr_info("failed to allocate memory\n");
1551                 ret = -ENOMEM;
1552                 goto error;
1553         }
1554
1555         ret = run_enqueue_thread(c, id);
1556         if (ret)
1557                 goto error;
1558
1559         c->is_starving = 0;
1560         c->aim0.num_buffers = c->cfg.num_buffers / 2;
1561         c->aim1.num_buffers = c->cfg.num_buffers - c->aim0.num_buffers;
1562         atomic_set(&c->mbo_ref, num_buffer);
1563
1564 out:
1565         if (aim == c->aim0.ptr)
1566                 c->aim0.refs++;
1567         if (aim == c->aim1.ptr)
1568                 c->aim1.refs++;
1569         mutex_unlock(&c->start_mutex);
1570         return 0;
1571
1572 error:
1573         module_put(iface->mod);
1574         mutex_unlock(&c->start_mutex);
1575         return ret;
1576 }
1577 EXPORT_SYMBOL_GPL(most_start_channel);
1578
1579 /**
1580  * most_stop_channel - stops a running channel
1581  * @iface: pointer to interface instance
1582  * @id: channel ID
1583  */
1584 int most_stop_channel(struct most_interface *iface, int id,
1585                       struct most_aim *aim)
1586 {
1587         struct most_c_obj *c;
1588
1589         if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
1590                 pr_err("Bad interface or index out of range\n");
1591                 return -EINVAL;
1592         }
1593         c = get_channel_by_iface(iface, id);
1594         if (unlikely(!c))
1595                 return -EINVAL;
1596
1597         mutex_lock(&c->start_mutex);
1598         if (c->aim0.refs + c->aim1.refs >= 2)
1599                 goto out;
1600
1601         if (c->hdm_enqueue_task)
1602                 kthread_stop(c->hdm_enqueue_task);
1603         c->hdm_enqueue_task = NULL;
1604
1605         if (iface->mod)
1606                 module_put(iface->mod);
1607
1608         c->is_poisoned = true;
1609         if (c->iface->poison_channel(c->iface, c->channel_id)) {
1610                 pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
1611                        c->iface->description);
1612                 mutex_unlock(&c->start_mutex);
1613                 return -EAGAIN;
1614         }
1615         flush_trash_fifo(c);
1616         flush_channel_fifos(c);
1617
1618 #ifdef CMPL_INTERRUPTIBLE
1619         if (wait_for_completion_interruptible(&c->cleanup)) {
1620                 pr_info("Interrupted while clean up ch %d\n", c->channel_id);
1621                 mutex_unlock(&c->start_mutex);
1622                 return -EINTR;
1623         }
1624 #else
1625         wait_for_completion(&c->cleanup);
1626 #endif
1627         c->is_poisoned = false;
1628
1629 out:
1630         if (aim == c->aim0.ptr)
1631                 c->aim0.refs--;
1632         if (aim == c->aim1.ptr)
1633                 c->aim1.refs--;
1634         mutex_unlock(&c->start_mutex);
1635         return 0;
1636 }
1637 EXPORT_SYMBOL_GPL(most_stop_channel);
1638
1639 /**
1640  * most_register_aim - registers an AIM (driver) with the core
1641  * @aim: instance of AIM to be registered
1642  */
1643 int most_register_aim(struct most_aim *aim)
1644 {
1645         struct most_aim_obj *aim_obj;
1646
1647         if (!aim) {
1648                 pr_err("Bad driver\n");
1649                 return -EINVAL;
1650         }
1651         aim_obj = create_most_aim_obj(aim->name);
1652         if (!aim_obj) {
1653                 pr_info("failed to alloc driver object\n");
1654                 return -ENOMEM;
1655         }
1656         aim_obj->driver = aim;
1657         aim->context = aim_obj;
1658         pr_info("registered new application interfacing module %s\n",
1659                 aim->name);
1660         list_add_tail(&aim_obj->list, &aim_list);
1661         return 0;
1662 }
1663 EXPORT_SYMBOL_GPL(most_register_aim);
1664
1665 /**
1666  * most_deregister_aim - deregisters an AIM (driver) with the core
1667  * @aim: AIM to be removed
1668  */
1669 int most_deregister_aim(struct most_aim *aim)
1670 {
1671         struct most_aim_obj *aim_obj;
1672         struct most_c_obj *c, *tmp;
1673         struct most_inst_obj *i, *i_tmp;
1674
1675         if (!aim) {
1676                 pr_err("Bad driver\n");
1677                 return -EINVAL;
1678         }
1679
1680         aim_obj = aim->context;
1681         if (!aim_obj) {
1682                 pr_info("driver not registered.\n");
1683                 return -EINVAL;
1684         }
1685         list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
1686                 list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
1687                         if (c->aim0.ptr == aim || c->aim1.ptr == aim)
1688                                 aim->disconnect_channel(
1689                                         c->iface, c->channel_id);
1690                         if (c->aim0.ptr == aim)
1691                                 c->aim0.ptr = NULL;
1692                         if (c->aim1.ptr == aim)
1693                                 c->aim1.ptr = NULL;
1694                 }
1695         }
1696         list_del(&aim_obj->list);
1697         destroy_most_aim_obj(aim_obj);
1698         pr_info("deregistering application interfacing module %s\n", aim->name);
1699         return 0;
1700 }
1701 EXPORT_SYMBOL_GPL(most_deregister_aim);
1702
1703 /**
1704  * most_register_interface - registers an interface with core
1705  * @iface: pointer to the instance of the interface description.
1706  *
1707  * Allocates and initializes a new interface instance and all of its channels.
1708  * Returns a pointer to kobject or an error pointer.
1709  */
1710 struct kobject *most_register_interface(struct most_interface *iface)
1711 {
1712         unsigned int i;
1713         int id;
1714         char name[STRING_SIZE];
1715         char channel_name[STRING_SIZE];
1716         struct most_c_obj *c;
1717         struct most_inst_obj *inst;
1718
1719         if (!iface || !iface->enqueue || !iface->configure ||
1720             !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
1721                 pr_err("Bad interface or channel overflow\n");
1722                 return ERR_PTR(-EINVAL);
1723         }
1724
1725         id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
1726         if (id < 0) {
1727                 pr_info("Failed to alloc mdev ID\n");
1728                 return ERR_PTR(id);
1729         }
1730         snprintf(name, STRING_SIZE, "mdev%d", id);
1731
1732         inst = create_most_inst_obj(name);
1733         if (!inst) {
1734                 pr_info("Failed to allocate interface instance\n");
1735                 ida_simple_remove(&mdev_id, id);
1736                 return ERR_PTR(-ENOMEM);
1737         }
1738
1739         iface->priv = inst;
1740         INIT_LIST_HEAD(&inst->channel_list);
1741         inst->iface = iface;
1742         inst->dev_id = id;
1743         list_add_tail(&inst->list, &instance_list);
1744
1745         for (i = 0; i < iface->num_channels; i++) {
1746                 const char *name_suffix = iface->channel_vector[i].name_suffix;
1747
1748                 if (!name_suffix)
1749                         snprintf(channel_name, STRING_SIZE, "ch%d", i);
1750                 else
1751                         snprintf(channel_name, STRING_SIZE, "%s", name_suffix);
1752
1753                 /* this increments the reference count of this instance */
1754                 c = create_most_c_obj(channel_name, &inst->kobj);
1755                 if (!c)
1756                         goto free_instance;
1757                 inst->channel[i] = c;
1758                 c->is_starving = 0;
1759                 c->iface = iface;
1760                 c->inst = inst;
1761                 c->channel_id = i;
1762                 c->keep_mbo = false;
1763                 c->enqueue_halt = false;
1764                 c->is_poisoned = false;
1765                 c->cfg.direction = 0;
1766                 c->cfg.data_type = 0;
1767                 c->cfg.num_buffers = 0;
1768                 c->cfg.buffer_size = 0;
1769                 c->cfg.subbuffer_size = 0;
1770                 c->cfg.packets_per_xact = 0;
1771                 spin_lock_init(&c->fifo_lock);
1772                 INIT_LIST_HEAD(&c->fifo);
1773                 INIT_LIST_HEAD(&c->trash_fifo);
1774                 INIT_LIST_HEAD(&c->halt_fifo);
1775                 init_completion(&c->cleanup);
1776                 atomic_set(&c->mbo_ref, 0);
1777                 mutex_init(&c->start_mutex);
1778                 mutex_init(&c->nq_mutex);
1779                 list_add_tail(&c->list, &inst->channel_list);
1780         }
1781         pr_info("registered new MOST device mdev%d (%s)\n",
1782                 inst->dev_id, iface->description);
1783         return &inst->kobj;
1784
1785 free_instance:
1786         pr_info("Failed allocate channel(s)\n");
1787         list_del(&inst->list);
1788         ida_simple_remove(&mdev_id, id);
1789         destroy_most_inst_obj(inst);
1790         return ERR_PTR(-ENOMEM);
1791 }
1792 EXPORT_SYMBOL_GPL(most_register_interface);
1793
1794 /**
1795  * most_deregister_interface - deregisters an interface with core
1796  * @iface: pointer to the interface instance description.
1797  *
1798  * Before removing an interface instance from the list, all running
1799  * channels are stopped and poisoned.
1800  */
1801 void most_deregister_interface(struct most_interface *iface)
1802 {
1803         struct most_inst_obj *i = iface->priv;
1804         struct most_c_obj *c;
1805
1806         if (unlikely(!i)) {
1807                 pr_info("Bad Interface\n");
1808                 return;
1809         }
1810         pr_info("deregistering MOST device %s (%s)\n", i->kobj.name,
1811                 iface->description);
1812
1813         list_for_each_entry(c, &i->channel_list, list) {
1814                 if (c->aim0.ptr)
1815                         c->aim0.ptr->disconnect_channel(c->iface,
1816                                                         c->channel_id);
1817                 if (c->aim1.ptr)
1818                         c->aim1.ptr->disconnect_channel(c->iface,
1819                                                         c->channel_id);
1820                 c->aim0.ptr = NULL;
1821                 c->aim1.ptr = NULL;
1822         }
1823
1824         ida_simple_remove(&mdev_id, i->dev_id);
1825         list_del(&i->list);
1826         destroy_most_inst_obj(i);
1827 }
1828 EXPORT_SYMBOL_GPL(most_deregister_interface);
1829
1830 /**
1831  * most_stop_enqueue - prevents core from enqueueing MBOs
1832  * @iface: pointer to interface
1833  * @id: channel id
1834  *
1835  * This is called by an HDM that _cannot_ attend to its duties and
1836  * is imminent to get run over by the core. The core is not going to
1837  * enqueue any further packets unless the flagging HDM calls
1838  * most_resume enqueue().
1839  */
1840 void most_stop_enqueue(struct most_interface *iface, int id)
1841 {
1842         struct most_c_obj *c = get_channel_by_iface(iface, id);
1843
1844         if (!c)
1845                 return;
1846
1847         mutex_lock(&c->nq_mutex);
1848         c->enqueue_halt = true;
1849         mutex_unlock(&c->nq_mutex);
1850 }
1851 EXPORT_SYMBOL_GPL(most_stop_enqueue);
1852
1853 /**
1854  * most_resume_enqueue - allow core to enqueue MBOs again
1855  * @iface: pointer to interface
1856  * @id: channel id
1857  *
1858  * This clears the enqueue halt flag and enqueues all MBOs currently
1859  * sitting in the wait fifo.
1860  */
1861 void most_resume_enqueue(struct most_interface *iface, int id)
1862 {
1863         struct most_c_obj *c = get_channel_by_iface(iface, id);
1864
1865         if (!c)
1866                 return;
1867
1868         mutex_lock(&c->nq_mutex);
1869         c->enqueue_halt = false;
1870         mutex_unlock(&c->nq_mutex);
1871
1872         wake_up_interruptible(&c->hdm_fifo_wq);
1873 }
1874 EXPORT_SYMBOL_GPL(most_resume_enqueue);
1875
1876 static int __init most_init(void)
1877 {
1878         int err;
1879
1880         pr_info("init()\n");
1881         INIT_LIST_HEAD(&instance_list);
1882         INIT_LIST_HEAD(&aim_list);
1883         ida_init(&mdev_id);
1884
1885         err = bus_register(&most_bus);
1886         if (err) {
1887                 pr_info("Cannot register most bus\n");
1888                 return err;
1889         }
1890
1891         most_class = class_create(THIS_MODULE, "most");
1892         if (IS_ERR(most_class)) {
1893                 pr_info("No udev support.\n");
1894                 err = PTR_ERR(most_class);
1895                 goto exit_bus;
1896         }
1897
1898         err = driver_register(&mostcore);
1899         if (err) {
1900                 pr_info("Cannot register core driver\n");
1901                 goto exit_class;
1902         }
1903
1904         core_dev = device_create(most_class, NULL, 0, NULL, "mostcore");
1905         if (IS_ERR(core_dev)) {
1906                 err = PTR_ERR(core_dev);
1907                 goto exit_driver;
1908         }
1909
1910         most_aim_kset = kset_create_and_add("aims", NULL, &core_dev->kobj);
1911         if (!most_aim_kset) {
1912                 err = -ENOMEM;
1913                 goto exit_class_container;
1914         }
1915
1916         most_inst_kset = kset_create_and_add("devices", NULL, &core_dev->kobj);
1917         if (!most_inst_kset) {
1918                 err = -ENOMEM;
1919                 goto exit_driver_kset;
1920         }
1921
1922         return 0;
1923
1924 exit_driver_kset:
1925         kset_unregister(most_aim_kset);
1926 exit_class_container:
1927         device_destroy(most_class, 0);
1928 exit_driver:
1929         driver_unregister(&mostcore);
1930 exit_class:
1931         class_destroy(most_class);
1932 exit_bus:
1933         bus_unregister(&most_bus);
1934         return err;
1935 }
1936
1937 static void __exit most_exit(void)
1938 {
1939         struct most_inst_obj *i, *i_tmp;
1940         struct most_aim_obj *d, *d_tmp;
1941
1942         pr_info("exit core module\n");
1943         list_for_each_entry_safe(d, d_tmp, &aim_list, list) {
1944                 destroy_most_aim_obj(d);
1945         }
1946
1947         list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
1948                 list_del(&i->list);
1949                 destroy_most_inst_obj(i);
1950         }
1951         kset_unregister(most_inst_kset);
1952         kset_unregister(most_aim_kset);
1953         device_destroy(most_class, 0);
1954         driver_unregister(&mostcore);
1955         class_destroy(most_class);
1956         bus_unregister(&most_bus);
1957         ida_destroy(&mdev_id);
1958 }
1959
1960 module_init(most_init);
1961 module_exit(most_exit);
1962 MODULE_LICENSE("GPL");
1963 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
1964 MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");