2 * core.c - Implementation of core module of MOST Linux driver stack
4 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * This file is licensed under GPLv2.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/device.h>
20 #include <linux/list.h>
21 #include <linux/poll.h>
22 #include <linux/wait.h>
23 #include <linux/kobject.h>
24 #include <linux/mutex.h>
25 #include <linux/completion.h>
26 #include <linux/sysfs.h>
27 #include <linux/kthread.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/idr.h>
32 #define MAX_CHANNELS 64
33 #define STRING_SIZE 80
35 static struct class *most_class;
36 static struct device *core_dev;
37 static struct ida mdev_id;
38 static int dummy_num_buffers;
40 struct most_c_aim_obj {
48 struct completion cleanup;
50 atomic_t mbo_nq_level;
53 struct mutex start_mutex;
54 struct mutex nq_mutex; /* nq thread synchronization */
56 struct most_interface *iface;
57 struct most_inst_obj *inst;
58 struct most_channel_config cfg;
61 struct list_head fifo;
63 struct list_head halt_fifo;
64 struct list_head list;
65 struct most_c_aim_obj aim0;
66 struct most_c_aim_obj aim1;
67 struct list_head trash_fifo;
68 struct task_struct *hdm_enqueue_task;
69 wait_queue_head_t hdm_fifo_wq;
72 #define to_c_obj(d) container_of(d, struct most_c_obj, kobj)
74 struct most_inst_obj {
76 struct most_interface *iface;
77 struct list_head channel_list;
78 struct most_c_obj *channel[MAX_CHANNELS];
80 struct list_head list;
84 int most_ch_data_type;
87 { MOST_CH_CONTROL, "control\n" },
88 { MOST_CH_ASYNC, "async\n" },
89 { MOST_CH_SYNC, "sync\n" },
90 { MOST_CH_ISOC, "isoc\n"},
91 { MOST_CH_ISOC, "isoc_avp\n"},
94 #define to_inst_obj(d) container_of(d, struct most_inst_obj, kobj)
97 * list_pop_mbo - retrieves the first MBO of the list and removes it
98 * @ptr: the list head to grab the MBO from.
100 #define list_pop_mbo(ptr) \
102 struct mbo *_mbo = list_first_entry(ptr, struct mbo, list); \
103 list_del(&_mbo->list); \
108 * ___C H A N N E L___
112 * struct most_c_attr - to access the attributes of a channel object
113 * @attr: attributes of a channel
114 * @show: pointer to the show function
115 * @store: pointer to the store function
118 struct attribute attr;
119 ssize_t (*show)(struct most_c_obj *d,
120 struct most_c_attr *attr,
122 ssize_t (*store)(struct most_c_obj *d,
123 struct most_c_attr *attr,
128 #define to_channel_attr(a) container_of(a, struct most_c_attr, attr)
130 #define MOST_CHNL_ATTR(_name, _mode, _show, _store) \
131 struct most_c_attr most_chnl_attr_##_name = \
132 __ATTR(_name, _mode, _show, _store)
135 * channel_attr_show - show function of channel object
136 * @kobj: pointer to its kobject
137 * @attr: pointer to its attributes
140 static ssize_t channel_attr_show(struct kobject *kobj, struct attribute *attr,
143 struct most_c_attr *channel_attr = to_channel_attr(attr);
144 struct most_c_obj *c_obj = to_c_obj(kobj);
146 if (!channel_attr->show)
149 return channel_attr->show(c_obj, channel_attr, buf);
153 * channel_attr_store - store function of channel object
154 * @kobj: pointer to its kobject
155 * @attr: pointer to its attributes
157 * @len: length of buffer
159 static ssize_t channel_attr_store(struct kobject *kobj,
160 struct attribute *attr,
164 struct most_c_attr *channel_attr = to_channel_attr(attr);
165 struct most_c_obj *c_obj = to_c_obj(kobj);
167 if (!channel_attr->store)
169 return channel_attr->store(c_obj, channel_attr, buf, len);
172 static const struct sysfs_ops most_channel_sysfs_ops = {
173 .show = channel_attr_show,
174 .store = channel_attr_store,
178 * most_free_mbo_coherent - free an MBO and its coherent buffer
179 * @mbo: buffer to be released
182 static void most_free_mbo_coherent(struct mbo *mbo)
184 struct most_c_obj *c = mbo->context;
185 u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
187 dma_free_coherent(NULL, coherent_buf_size, mbo->virt_address,
190 if (atomic_sub_and_test(1, &c->mbo_ref))
191 complete(&c->cleanup);
195 * flush_channel_fifos - clear the channel fifos
196 * @c: pointer to channel object
198 static void flush_channel_fifos(struct most_c_obj *c)
200 unsigned long flags, hf_flags;
201 struct mbo *mbo, *tmp;
203 if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
206 spin_lock_irqsave(&c->fifo_lock, flags);
207 list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
208 list_del(&mbo->list);
209 spin_unlock_irqrestore(&c->fifo_lock, flags);
210 most_free_mbo_coherent(mbo);
211 spin_lock_irqsave(&c->fifo_lock, flags);
213 spin_unlock_irqrestore(&c->fifo_lock, flags);
215 spin_lock_irqsave(&c->fifo_lock, hf_flags);
216 list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
217 list_del(&mbo->list);
218 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
219 most_free_mbo_coherent(mbo);
220 spin_lock_irqsave(&c->fifo_lock, hf_flags);
222 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
224 if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
225 pr_info("WARN: fifo | trash fifo not empty\n");
229 * flush_trash_fifo - clear the trash fifo
230 * @c: pointer to channel object
232 static int flush_trash_fifo(struct most_c_obj *c)
234 struct mbo *mbo, *tmp;
237 spin_lock_irqsave(&c->fifo_lock, flags);
238 list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
239 list_del(&mbo->list);
240 spin_unlock_irqrestore(&c->fifo_lock, flags);
241 most_free_mbo_coherent(mbo);
242 spin_lock_irqsave(&c->fifo_lock, flags);
244 spin_unlock_irqrestore(&c->fifo_lock, flags);
249 * most_channel_release - release function of channel object
250 * @kobj: pointer to channel's kobject
252 static void most_channel_release(struct kobject *kobj)
254 struct most_c_obj *c = to_c_obj(kobj);
259 static ssize_t show_available_directions(struct most_c_obj *c,
260 struct most_c_attr *attr,
263 unsigned int i = c->channel_id;
266 if (c->iface->channel_vector[i].direction & MOST_CH_RX)
268 if (c->iface->channel_vector[i].direction & MOST_CH_TX)
274 static ssize_t show_available_datatypes(struct most_c_obj *c,
275 struct most_c_attr *attr,
278 unsigned int i = c->channel_id;
281 if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
282 strcat(buf, "control ");
283 if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
284 strcat(buf, "async ");
285 if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
286 strcat(buf, "sync ");
287 if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC)
288 strcat(buf, "isoc ");
294 ssize_t show_number_of_packet_buffers(struct most_c_obj *c,
295 struct most_c_attr *attr,
298 unsigned int i = c->channel_id;
300 return snprintf(buf, PAGE_SIZE, "%d\n",
301 c->iface->channel_vector[i].num_buffers_packet);
305 ssize_t show_number_of_stream_buffers(struct most_c_obj *c,
306 struct most_c_attr *attr,
309 unsigned int i = c->channel_id;
311 return snprintf(buf, PAGE_SIZE, "%d\n",
312 c->iface->channel_vector[i].num_buffers_streaming);
316 ssize_t show_size_of_packet_buffer(struct most_c_obj *c,
317 struct most_c_attr *attr,
320 unsigned int i = c->channel_id;
322 return snprintf(buf, PAGE_SIZE, "%d\n",
323 c->iface->channel_vector[i].buffer_size_packet);
327 ssize_t show_size_of_stream_buffer(struct most_c_obj *c,
328 struct most_c_attr *attr,
331 unsigned int i = c->channel_id;
333 return snprintf(buf, PAGE_SIZE, "%d\n",
334 c->iface->channel_vector[i].buffer_size_streaming);
337 static ssize_t show_channel_starving(struct most_c_obj *c,
338 struct most_c_attr *attr,
341 return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
344 #define create_show_channel_attribute(val) \
345 static MOST_CHNL_ATTR(val, 0444, show_##val, NULL)
347 create_show_channel_attribute(available_directions);
348 create_show_channel_attribute(available_datatypes);
349 create_show_channel_attribute(number_of_packet_buffers);
350 create_show_channel_attribute(number_of_stream_buffers);
351 create_show_channel_attribute(size_of_stream_buffer);
352 create_show_channel_attribute(size_of_packet_buffer);
353 create_show_channel_attribute(channel_starving);
355 static ssize_t show_set_number_of_buffers(struct most_c_obj *c,
356 struct most_c_attr *attr,
359 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
362 static ssize_t store_set_number_of_buffers(struct most_c_obj *c,
363 struct most_c_attr *attr,
367 int ret = kstrtou16(buf, 0, &c->cfg.num_buffers);
374 static ssize_t show_set_buffer_size(struct most_c_obj *c,
375 struct most_c_attr *attr,
378 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
381 static ssize_t store_set_buffer_size(struct most_c_obj *c,
382 struct most_c_attr *attr,
386 int ret = kstrtou16(buf, 0, &c->cfg.buffer_size);
393 static ssize_t show_set_direction(struct most_c_obj *c,
394 struct most_c_attr *attr,
397 if (c->cfg.direction & MOST_CH_TX)
398 return snprintf(buf, PAGE_SIZE, "tx\n");
399 else if (c->cfg.direction & MOST_CH_RX)
400 return snprintf(buf, PAGE_SIZE, "rx\n");
401 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
404 static ssize_t store_set_direction(struct most_c_obj *c,
405 struct most_c_attr *attr,
409 if (!strcmp(buf, "dir_rx\n")) {
410 c->cfg.direction = MOST_CH_RX;
411 } else if (!strcmp(buf, "rx\n")) {
412 c->cfg.direction = MOST_CH_RX;
413 } else if (!strcmp(buf, "dir_tx\n")) {
414 c->cfg.direction = MOST_CH_TX;
415 } else if (!strcmp(buf, "tx\n")) {
416 c->cfg.direction = MOST_CH_TX;
418 pr_info("WARN: invalid attribute settings\n");
424 static ssize_t show_set_datatype(struct most_c_obj *c,
425 struct most_c_attr *attr,
430 for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
431 if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
432 return snprintf(buf, PAGE_SIZE, ch_data_type[i].name);
434 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
437 static ssize_t store_set_datatype(struct most_c_obj *c,
438 struct most_c_attr *attr,
444 for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
445 if (!strcmp(buf, ch_data_type[i].name)) {
446 c->cfg.data_type = ch_data_type[i].most_ch_data_type;
451 if (i == ARRAY_SIZE(ch_data_type)) {
452 pr_info("WARN: invalid attribute settings\n");
458 static ssize_t show_set_subbuffer_size(struct most_c_obj *c,
459 struct most_c_attr *attr,
462 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
465 static ssize_t store_set_subbuffer_size(struct most_c_obj *c,
466 struct most_c_attr *attr,
470 int ret = kstrtou16(buf, 0, &c->cfg.subbuffer_size);
477 static ssize_t show_set_packets_per_xact(struct most_c_obj *c,
478 struct most_c_attr *attr,
481 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
484 static ssize_t store_set_packets_per_xact(struct most_c_obj *c,
485 struct most_c_attr *attr,
489 int ret = kstrtou16(buf, 0, &c->cfg.packets_per_xact);
496 #define create_channel_attribute(value) \
497 static MOST_CHNL_ATTR(value, 0644, show_##value, store_##value)
499 create_channel_attribute(set_buffer_size);
500 create_channel_attribute(set_number_of_buffers);
501 create_channel_attribute(set_direction);
502 create_channel_attribute(set_datatype);
503 create_channel_attribute(set_subbuffer_size);
504 create_channel_attribute(set_packets_per_xact);
507 * most_channel_def_attrs - array of default attributes of channel object
509 static struct attribute *most_channel_def_attrs[] = {
510 &most_chnl_attr_available_directions.attr,
511 &most_chnl_attr_available_datatypes.attr,
512 &most_chnl_attr_number_of_packet_buffers.attr,
513 &most_chnl_attr_number_of_stream_buffers.attr,
514 &most_chnl_attr_size_of_packet_buffer.attr,
515 &most_chnl_attr_size_of_stream_buffer.attr,
516 &most_chnl_attr_set_number_of_buffers.attr,
517 &most_chnl_attr_set_buffer_size.attr,
518 &most_chnl_attr_set_direction.attr,
519 &most_chnl_attr_set_datatype.attr,
520 &most_chnl_attr_set_subbuffer_size.attr,
521 &most_chnl_attr_set_packets_per_xact.attr,
522 &most_chnl_attr_channel_starving.attr,
526 static struct kobj_type most_channel_ktype = {
527 .sysfs_ops = &most_channel_sysfs_ops,
528 .release = most_channel_release,
529 .default_attrs = most_channel_def_attrs,
532 static struct kset *most_channel_kset;
535 * create_most_c_obj - allocates a channel object
536 * @name: name of the channel object
537 * @parent: parent kobject
539 * This create a channel object and registers it with sysfs.
540 * Returns a pointer to the object or NULL when something went wrong.
542 static struct most_c_obj *
543 create_most_c_obj(const char *name, struct kobject *parent)
545 struct most_c_obj *c;
548 c = kzalloc(sizeof(*c), GFP_KERNEL);
551 c->kobj.kset = most_channel_kset;
552 retval = kobject_init_and_add(&c->kobj, &most_channel_ktype, parent,
555 kobject_put(&c->kobj);
558 kobject_uevent(&c->kobj, KOBJ_ADD);
563 * ___I N S T A N C E___
565 #define MOST_INST_ATTR(_name, _mode, _show, _store) \
566 struct most_inst_attribute most_inst_attr_##_name = \
567 __ATTR(_name, _mode, _show, _store)
569 static struct list_head instance_list;
572 * struct most_inst_attribute - to access the attributes of instance object
573 * @attr: attributes of an instance
574 * @show: pointer to the show function
575 * @store: pointer to the store function
577 struct most_inst_attribute {
578 struct attribute attr;
579 ssize_t (*show)(struct most_inst_obj *d,
580 struct most_inst_attribute *attr,
582 ssize_t (*store)(struct most_inst_obj *d,
583 struct most_inst_attribute *attr,
588 #define to_instance_attr(a) \
589 container_of(a, struct most_inst_attribute, attr)
592 * instance_attr_show - show function for an instance object
593 * @kobj: pointer to kobject
594 * @attr: pointer to attribute struct
597 static ssize_t instance_attr_show(struct kobject *kobj,
598 struct attribute *attr,
601 struct most_inst_attribute *instance_attr;
602 struct most_inst_obj *instance_obj;
604 instance_attr = to_instance_attr(attr);
605 instance_obj = to_inst_obj(kobj);
607 if (!instance_attr->show)
610 return instance_attr->show(instance_obj, instance_attr, buf);
614 * instance_attr_store - store function for an instance object
615 * @kobj: pointer to kobject
616 * @attr: pointer to attribute struct
618 * @len: length of buffer
620 static ssize_t instance_attr_store(struct kobject *kobj,
621 struct attribute *attr,
625 struct most_inst_attribute *instance_attr;
626 struct most_inst_obj *instance_obj;
628 instance_attr = to_instance_attr(attr);
629 instance_obj = to_inst_obj(kobj);
631 if (!instance_attr->store)
634 return instance_attr->store(instance_obj, instance_attr, buf, len);
637 static const struct sysfs_ops most_inst_sysfs_ops = {
638 .show = instance_attr_show,
639 .store = instance_attr_store,
643 * most_inst_release - release function for instance object
644 * @kobj: pointer to instance's kobject
646 * This frees the allocated memory for the instance object
648 static void most_inst_release(struct kobject *kobj)
650 struct most_inst_obj *inst = to_inst_obj(kobj);
655 static ssize_t show_description(struct most_inst_obj *instance_obj,
656 struct most_inst_attribute *attr,
659 return snprintf(buf, PAGE_SIZE, "%s\n",
660 instance_obj->iface->description);
663 static ssize_t show_interface(struct most_inst_obj *instance_obj,
664 struct most_inst_attribute *attr,
667 switch (instance_obj->iface->interface) {
669 return snprintf(buf, PAGE_SIZE, "loopback\n");
671 return snprintf(buf, PAGE_SIZE, "i2c\n");
673 return snprintf(buf, PAGE_SIZE, "i2s\n");
675 return snprintf(buf, PAGE_SIZE, "tsi\n");
677 return snprintf(buf, PAGE_SIZE, "hbi\n");
678 case ITYPE_MEDIALB_DIM:
679 return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
680 case ITYPE_MEDIALB_DIM2:
681 return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
683 return snprintf(buf, PAGE_SIZE, "usb\n");
685 return snprintf(buf, PAGE_SIZE, "pcie\n");
687 return snprintf(buf, PAGE_SIZE, "unknown\n");
690 #define create_inst_attribute(value) \
691 static MOST_INST_ATTR(value, 0444, show_##value, NULL)
693 create_inst_attribute(description);
694 create_inst_attribute(interface);
696 static struct attribute *most_inst_def_attrs[] = {
697 &most_inst_attr_description.attr,
698 &most_inst_attr_interface.attr,
702 static struct kobj_type most_inst_ktype = {
703 .sysfs_ops = &most_inst_sysfs_ops,
704 .release = most_inst_release,
705 .default_attrs = most_inst_def_attrs,
708 static struct kset *most_inst_kset;
711 * create_most_inst_obj - creates an instance object
712 * @name: name of the object to be created
714 * This allocates memory for an instance structure, assigns the proper kset
715 * and registers it with sysfs.
717 * Returns a pointer to the instance object or NULL when something went wrong.
719 static struct most_inst_obj *create_most_inst_obj(const char *name)
721 struct most_inst_obj *inst;
724 inst = kzalloc(sizeof(*inst), GFP_KERNEL);
727 inst->kobj.kset = most_inst_kset;
728 retval = kobject_init_and_add(&inst->kobj, &most_inst_ktype, NULL,
731 kobject_put(&inst->kobj);
734 kobject_uevent(&inst->kobj, KOBJ_ADD);
739 * destroy_most_inst_obj - MOST instance release function
740 * @inst: pointer to the instance object
742 * This decrements the reference counter of the instance object.
743 * If the reference count turns zero, its release function is called
745 static void destroy_most_inst_obj(struct most_inst_obj *inst)
747 struct most_c_obj *c, *tmp;
749 list_for_each_entry_safe(c, tmp, &inst->channel_list, list) {
751 flush_channel_fifos(c);
752 kobject_put(&c->kobj);
754 kobject_put(&inst->kobj);
760 struct most_aim_obj {
762 struct list_head list;
763 struct most_aim *driver;
766 #define to_aim_obj(d) container_of(d, struct most_aim_obj, kobj)
768 static struct list_head aim_list;
771 * struct most_aim_attribute - to access the attributes of AIM object
772 * @attr: attributes of an AIM
773 * @show: pointer to the show function
774 * @store: pointer to the store function
776 struct most_aim_attribute {
777 struct attribute attr;
778 ssize_t (*show)(struct most_aim_obj *d,
779 struct most_aim_attribute *attr,
781 ssize_t (*store)(struct most_aim_obj *d,
782 struct most_aim_attribute *attr,
787 #define to_aim_attr(a) container_of(a, struct most_aim_attribute, attr)
790 * aim_attr_show - show function of an AIM object
791 * @kobj: pointer to kobject
792 * @attr: pointer to attribute struct
795 static ssize_t aim_attr_show(struct kobject *kobj,
796 struct attribute *attr,
799 struct most_aim_attribute *aim_attr;
800 struct most_aim_obj *aim_obj;
802 aim_attr = to_aim_attr(attr);
803 aim_obj = to_aim_obj(kobj);
808 return aim_attr->show(aim_obj, aim_attr, buf);
812 * aim_attr_store - store function of an AIM object
813 * @kobj: pointer to kobject
814 * @attr: pointer to attribute struct
816 * @len: length of buffer
818 static ssize_t aim_attr_store(struct kobject *kobj,
819 struct attribute *attr,
823 struct most_aim_attribute *aim_attr;
824 struct most_aim_obj *aim_obj;
826 aim_attr = to_aim_attr(attr);
827 aim_obj = to_aim_obj(kobj);
829 if (!aim_attr->store)
831 return aim_attr->store(aim_obj, aim_attr, buf, len);
834 static const struct sysfs_ops most_aim_sysfs_ops = {
835 .show = aim_attr_show,
836 .store = aim_attr_store,
840 * most_aim_release - AIM release function
841 * @kobj: pointer to AIM's kobject
843 static void most_aim_release(struct kobject *kobj)
845 struct most_aim_obj *aim_obj = to_aim_obj(kobj);
850 static ssize_t add_link_show(struct most_aim_obj *aim_obj,
851 struct most_aim_attribute *attr,
854 struct most_c_obj *c;
855 struct most_inst_obj *i;
858 list_for_each_entry(i, &instance_list, list) {
859 list_for_each_entry(c, &i->channel_list, list) {
860 if (c->aim0.ptr == aim_obj->driver ||
861 c->aim1.ptr == aim_obj->driver) {
862 offs += snprintf(buf + offs, PAGE_SIZE - offs,
864 kobject_name(&i->kobj),
865 kobject_name(&c->kobj));
874 * split_string - parses and changes string in the buffer buf and
875 * splits it into two mandatory and one optional substrings.
877 * @buf: complete string from attribute 'add_channel'
878 * @a: address of pointer to 1st substring (=instance name)
879 * @b: address of pointer to 2nd substring (=channel name)
880 * @c: optional address of pointer to 3rd substring (=user defined name)
884 * Input: "mdev0:ch6:my_channel\n" or
885 * "mdev0:ch6:my_channel"
887 * Output: *a -> "mdev0", *b -> "ch6", *c -> "my_channel"
889 * Input: "mdev1:ep81\n"
890 * Output: *a -> "mdev1", *b -> "ep81", *c -> ""
892 * Input: "mdev1:ep81"
893 * Output: *a -> "mdev1", *b -> "ep81", *c == NULL
895 static int split_string(char *buf, char **a, char **b, char **c)
897 *a = strsep(&buf, ":");
901 *b = strsep(&buf, ":\n");
906 *c = strsep(&buf, ":\n");
912 * get_channel_by_name - get pointer to channel object
913 * @mdev: name of the device instance
914 * @mdev_ch: name of the respective channel
916 * This retrieves the pointer to a channel object.
919 most_c_obj *get_channel_by_name(char *mdev, char *mdev_ch)
921 struct most_c_obj *c, *tmp;
922 struct most_inst_obj *i, *i_tmp;
925 list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
926 if (!strcmp(kobject_name(&i->kobj), mdev)) {
931 if (unlikely(!found))
932 return ERR_PTR(-EIO);
934 list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
935 if (!strcmp(kobject_name(&c->kobj), mdev_ch)) {
940 if (unlikely(found < 2))
941 return ERR_PTR(-EIO);
946 * store_add_link - store() function for add_link attribute
947 * @aim_obj: pointer to AIM object
948 * @attr: its attributes
950 * @len: buffer length
952 * This parses the string given by buf and splits it into
953 * three substrings. Note: third substring is optional. In case a cdev
954 * AIM is loaded the optional 3rd substring will make up the name of
955 * device node in the /dev directory. If omitted, the device node will
956 * inherit the channel's name within sysfs.
958 * Searches for a pair of device and channel and probes the AIM
961 * (1) echo "mdev0:ch6:my_rxchannel" >add_link
962 * (2) echo "mdev1:ep81" >add_link
964 * (1) would create the device node /dev/my_rxchannel
965 * (2) would create the device node /dev/mdev1-ep81
967 static ssize_t add_link_store(struct most_aim_obj *aim_obj,
968 struct most_aim_attribute *attr,
972 struct most_c_obj *c;
973 struct most_aim **aim_ptr;
974 char buffer[STRING_SIZE];
978 char devnod_buf[STRING_SIZE];
980 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
982 strlcpy(buffer, buf, max_len);
984 ret = split_string(buffer, &mdev, &mdev_ch, &mdev_devnod);
988 if (!mdev_devnod || *mdev_devnod == 0) {
989 snprintf(devnod_buf, sizeof(devnod_buf), "%s-%s", mdev,
991 mdev_devnod = devnod_buf;
994 c = get_channel_by_name(mdev, mdev_ch);
999 aim_ptr = &c->aim0.ptr;
1000 else if (!c->aim1.ptr)
1001 aim_ptr = &c->aim1.ptr;
1005 *aim_ptr = aim_obj->driver;
1006 ret = aim_obj->driver->probe_channel(c->iface, c->channel_id,
1007 &c->cfg, &c->kobj, mdev_devnod);
1016 static struct most_aim_attribute most_aim_attr_add_link =
1017 __ATTR_RW(add_link);
1020 * store_remove_link - store function for remove_link attribute
1021 * @aim_obj: pointer to AIM object
1022 * @attr: its attributes
1024 * @len: buffer length
1027 * echo "mdev0:ep81" >remove_link
1029 static ssize_t remove_link_store(struct most_aim_obj *aim_obj,
1030 struct most_aim_attribute *attr,
1034 struct most_c_obj *c;
1035 char buffer[STRING_SIZE];
1039 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
1041 strlcpy(buffer, buf, max_len);
1042 ret = split_string(buffer, &mdev, &mdev_ch, NULL);
1046 c = get_channel_by_name(mdev, mdev_ch);
1050 if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id))
1052 if (c->aim0.ptr == aim_obj->driver)
1054 if (c->aim1.ptr == aim_obj->driver)
1059 static struct most_aim_attribute most_aim_attr_remove_link =
1060 __ATTR_WO(remove_link);
1062 static struct attribute *most_aim_def_attrs[] = {
1063 &most_aim_attr_add_link.attr,
1064 &most_aim_attr_remove_link.attr,
1068 static struct kobj_type most_aim_ktype = {
1069 .sysfs_ops = &most_aim_sysfs_ops,
1070 .release = most_aim_release,
1071 .default_attrs = most_aim_def_attrs,
1074 static struct kset *most_aim_kset;
1077 * create_most_aim_obj - creates an AIM object
1078 * @name: name of the AIM
1080 * This creates an AIM object assigns the proper kset and registers
1082 * Returns a pointer to the object or NULL if something went wrong.
1084 static struct most_aim_obj *create_most_aim_obj(const char *name)
1086 struct most_aim_obj *most_aim;
1089 most_aim = kzalloc(sizeof(*most_aim), GFP_KERNEL);
1092 most_aim->kobj.kset = most_aim_kset;
1093 retval = kobject_init_and_add(&most_aim->kobj, &most_aim_ktype,
1096 kobject_put(&most_aim->kobj);
1099 kobject_uevent(&most_aim->kobj, KOBJ_ADD);
1104 * destroy_most_aim_obj - AIM release function
1105 * @p: pointer to AIM object
1107 * This decrements the reference counter of the AIM object. If the
1108 * reference count turns zero, its release function will be called.
1110 static void destroy_most_aim_obj(struct most_aim_obj *p)
1112 kobject_put(&p->kobj);
1120 * Instantiation of the MOST bus
1122 static struct bus_type most_bus = {
1127 * Instantiation of the core driver
1129 static struct device_driver mostcore = {
1134 static inline void trash_mbo(struct mbo *mbo)
1136 unsigned long flags;
1137 struct most_c_obj *c = mbo->context;
1139 spin_lock_irqsave(&c->fifo_lock, flags);
1140 list_add(&mbo->list, &c->trash_fifo);
1141 spin_unlock_irqrestore(&c->fifo_lock, flags);
1144 static bool hdm_mbo_ready(struct most_c_obj *c)
1148 if (c->enqueue_halt)
1151 spin_lock_irq(&c->fifo_lock);
1152 empty = list_empty(&c->halt_fifo);
1153 spin_unlock_irq(&c->fifo_lock);
1158 static void nq_hdm_mbo(struct mbo *mbo)
1160 unsigned long flags;
1161 struct most_c_obj *c = mbo->context;
1163 spin_lock_irqsave(&c->fifo_lock, flags);
1164 list_add_tail(&mbo->list, &c->halt_fifo);
1165 spin_unlock_irqrestore(&c->fifo_lock, flags);
1166 wake_up_interruptible(&c->hdm_fifo_wq);
1169 static int hdm_enqueue_thread(void *data)
1171 struct most_c_obj *c = data;
1174 typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
1176 while (likely(!kthread_should_stop())) {
1177 wait_event_interruptible(c->hdm_fifo_wq,
1179 kthread_should_stop());
1181 mutex_lock(&c->nq_mutex);
1182 spin_lock_irq(&c->fifo_lock);
1183 if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) {
1184 spin_unlock_irq(&c->fifo_lock);
1185 mutex_unlock(&c->nq_mutex);
1189 mbo = list_pop_mbo(&c->halt_fifo);
1190 spin_unlock_irq(&c->fifo_lock);
1192 if (c->cfg.direction == MOST_CH_RX)
1193 mbo->buffer_length = c->cfg.buffer_size;
1195 ret = enqueue(mbo->ifp, mbo->hdm_channel_id, mbo);
1196 mutex_unlock(&c->nq_mutex);
1198 if (unlikely(ret)) {
1199 pr_err("hdm enqueue failed\n");
1201 c->hdm_enqueue_task = NULL;
1209 static int run_enqueue_thread(struct most_c_obj *c, int channel_id)
1211 struct task_struct *task =
1212 kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d",
1216 return PTR_ERR(task);
1218 c->hdm_enqueue_task = task;
1223 * arm_mbo - recycle MBO for further usage
1224 * @mbo: buffer object
1226 * This puts an MBO back to the list to have it ready for up coming
1229 * In case the MBO belongs to a channel that recently has been
1230 * poisoned, the MBO is scheduled to be trashed.
1231 * Calls the completion handler of an attached AIM.
1233 static void arm_mbo(struct mbo *mbo)
1235 unsigned long flags;
1236 struct most_c_obj *c;
1238 BUG_ON((!mbo) || (!mbo->context));
1241 if (c->is_poisoned) {
1246 spin_lock_irqsave(&c->fifo_lock, flags);
1247 ++*mbo->num_buffers_ptr;
1248 list_add_tail(&mbo->list, &c->fifo);
1249 spin_unlock_irqrestore(&c->fifo_lock, flags);
1251 if (c->aim0.refs && c->aim0.ptr->tx_completion)
1252 c->aim0.ptr->tx_completion(c->iface, c->channel_id);
1254 if (c->aim1.refs && c->aim1.ptr->tx_completion)
1255 c->aim1.ptr->tx_completion(c->iface, c->channel_id);
1259 * arm_mbo_chain - helper function that arms an MBO chain for the HDM
1260 * @c: pointer to interface channel
1261 * @dir: direction of the channel
1262 * @compl: pointer to completion function
1264 * This allocates buffer objects including the containing DMA coherent
1265 * buffer and puts them in the fifo.
1266 * Buffers of Rx channels are put in the kthread fifo, hence immediately
1267 * submitted to the HDM.
1269 * Returns the number of allocated and enqueued MBOs.
1271 static int arm_mbo_chain(struct most_c_obj *c, int dir,
1272 void (*compl)(struct mbo *))
1277 u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
1279 atomic_set(&c->mbo_nq_level, 0);
1281 for (i = 0; i < c->cfg.num_buffers; i++) {
1282 mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
1288 mbo->ifp = c->iface;
1289 mbo->hdm_channel_id = c->channel_id;
1290 mbo->virt_address = dma_alloc_coherent(NULL,
1294 if (!mbo->virt_address) {
1295 pr_info("WARN: No DMA coherent buffer.\n");
1299 mbo->complete = compl;
1300 mbo->num_buffers_ptr = &dummy_num_buffers;
1301 if (dir == MOST_CH_RX) {
1303 atomic_inc(&c->mbo_nq_level);
1317 * most_submit_mbo - submits an MBO to fifo
1318 * @mbo: pointer to the MBO
1320 void most_submit_mbo(struct mbo *mbo)
1322 if (WARN_ONCE(!mbo || !mbo->context,
1323 "bad mbo or missing channel reference\n"))
1328 EXPORT_SYMBOL_GPL(most_submit_mbo);
1331 * most_write_completion - write completion handler
1332 * @mbo: pointer to MBO
1334 * This recycles the MBO for further usage. In case the channel has been
1335 * poisoned, the MBO is scheduled to be trashed.
1337 static void most_write_completion(struct mbo *mbo)
1339 struct most_c_obj *c;
1341 BUG_ON((!mbo) || (!mbo->context));
1344 if (mbo->status == MBO_E_INVAL)
1345 pr_info("WARN: Tx MBO status: invalid\n");
1346 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
1353 * get_channel_by_iface - get pointer to channel object
1354 * @iface: pointer to interface instance
1357 * This retrieves a pointer to a channel of the given interface and channel ID.
1360 most_c_obj *get_channel_by_iface(struct most_interface *iface, int id)
1362 struct most_inst_obj *i;
1364 if (unlikely(!iface)) {
1365 pr_err("Bad interface\n");
1368 if (unlikely((id < 0) || (id >= iface->num_channels))) {
1369 pr_err("Channel index (%d) out of range\n", id);
1374 pr_err("interface is not registered\n");
1377 return i->channel[id];
1380 int channel_has_mbo(struct most_interface *iface, int id, struct most_aim *aim)
1382 struct most_c_obj *c = get_channel_by_iface(iface, id);
1383 unsigned long flags;
1389 if (c->aim0.refs && c->aim1.refs &&
1390 ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
1391 (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
1394 spin_lock_irqsave(&c->fifo_lock, flags);
1395 empty = list_empty(&c->fifo);
1396 spin_unlock_irqrestore(&c->fifo_lock, flags);
1399 EXPORT_SYMBOL_GPL(channel_has_mbo);
1402 * most_get_mbo - get pointer to an MBO of pool
1403 * @iface: pointer to interface instance
1406 * This attempts to get a free buffer out of the channel fifo.
1407 * Returns a pointer to MBO on success or NULL otherwise.
1409 struct mbo *most_get_mbo(struct most_interface *iface, int id,
1410 struct most_aim *aim)
1413 struct most_c_obj *c;
1414 unsigned long flags;
1415 int *num_buffers_ptr;
1417 c = get_channel_by_iface(iface, id);
1421 if (c->aim0.refs && c->aim1.refs &&
1422 ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
1423 (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
1426 if (aim == c->aim0.ptr)
1427 num_buffers_ptr = &c->aim0.num_buffers;
1428 else if (aim == c->aim1.ptr)
1429 num_buffers_ptr = &c->aim1.num_buffers;
1431 num_buffers_ptr = &dummy_num_buffers;
1433 spin_lock_irqsave(&c->fifo_lock, flags);
1434 if (list_empty(&c->fifo)) {
1435 spin_unlock_irqrestore(&c->fifo_lock, flags);
1438 mbo = list_pop_mbo(&c->fifo);
1440 spin_unlock_irqrestore(&c->fifo_lock, flags);
1442 mbo->num_buffers_ptr = num_buffers_ptr;
1443 mbo->buffer_length = c->cfg.buffer_size;
1446 EXPORT_SYMBOL_GPL(most_get_mbo);
1449 * most_put_mbo - return buffer to pool
1450 * @mbo: buffer object
1452 void most_put_mbo(struct mbo *mbo)
1454 struct most_c_obj *c = mbo->context;
1456 if (c->cfg.direction == MOST_CH_TX) {
1461 atomic_inc(&c->mbo_nq_level);
1463 EXPORT_SYMBOL_GPL(most_put_mbo);
1466 * most_read_completion - read completion handler
1467 * @mbo: pointer to MBO
1469 * This function is called by the HDM when data has been received from the
1470 * hardware and copied to the buffer of the MBO.
1472 * In case the channel has been poisoned it puts the buffer in the trash queue.
1473 * Otherwise, it passes the buffer to an AIM for further processing.
1475 static void most_read_completion(struct mbo *mbo)
1477 struct most_c_obj *c = mbo->context;
1479 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
1484 if (mbo->status == MBO_E_INVAL) {
1486 atomic_inc(&c->mbo_nq_level);
1490 if (atomic_sub_and_test(1, &c->mbo_nq_level))
1493 if (c->aim0.refs && c->aim0.ptr->rx_completion &&
1494 c->aim0.ptr->rx_completion(mbo) == 0)
1497 if (c->aim1.refs && c->aim1.ptr->rx_completion &&
1498 c->aim1.ptr->rx_completion(mbo) == 0)
1505 * most_start_channel - prepares a channel for communication
1506 * @iface: pointer to interface instance
1509 * This prepares the channel for usage. Cross-checks whether the
1510 * channel's been properly configured.
1512 * Returns 0 on success or error code otherwise.
1514 int most_start_channel(struct most_interface *iface, int id,
1515 struct most_aim *aim)
1519 struct most_c_obj *c = get_channel_by_iface(iface, id);
1524 mutex_lock(&c->start_mutex);
1525 if (c->aim0.refs + c->aim1.refs > 0)
1526 goto out; /* already started by other aim */
1528 if (!try_module_get(iface->mod)) {
1529 pr_info("failed to acquire HDM lock\n");
1530 mutex_unlock(&c->start_mutex);
1534 c->cfg.extra_len = 0;
1535 if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
1536 pr_info("channel configuration failed. Go check settings...\n");
1541 init_waitqueue_head(&c->hdm_fifo_wq);
1543 if (c->cfg.direction == MOST_CH_RX)
1544 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1545 most_read_completion);
1547 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1548 most_write_completion);
1549 if (unlikely(!num_buffer)) {
1550 pr_info("failed to allocate memory\n");
1555 ret = run_enqueue_thread(c, id);
1560 c->aim0.num_buffers = c->cfg.num_buffers / 2;
1561 c->aim1.num_buffers = c->cfg.num_buffers - c->aim0.num_buffers;
1562 atomic_set(&c->mbo_ref, num_buffer);
1565 if (aim == c->aim0.ptr)
1567 if (aim == c->aim1.ptr)
1569 mutex_unlock(&c->start_mutex);
1573 module_put(iface->mod);
1574 mutex_unlock(&c->start_mutex);
1577 EXPORT_SYMBOL_GPL(most_start_channel);
1580 * most_stop_channel - stops a running channel
1581 * @iface: pointer to interface instance
1584 int most_stop_channel(struct most_interface *iface, int id,
1585 struct most_aim *aim)
1587 struct most_c_obj *c;
1589 if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
1590 pr_err("Bad interface or index out of range\n");
1593 c = get_channel_by_iface(iface, id);
1597 mutex_lock(&c->start_mutex);
1598 if (c->aim0.refs + c->aim1.refs >= 2)
1601 if (c->hdm_enqueue_task)
1602 kthread_stop(c->hdm_enqueue_task);
1603 c->hdm_enqueue_task = NULL;
1606 module_put(iface->mod);
1608 c->is_poisoned = true;
1609 if (c->iface->poison_channel(c->iface, c->channel_id)) {
1610 pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
1611 c->iface->description);
1612 mutex_unlock(&c->start_mutex);
1615 flush_trash_fifo(c);
1616 flush_channel_fifos(c);
1618 #ifdef CMPL_INTERRUPTIBLE
1619 if (wait_for_completion_interruptible(&c->cleanup)) {
1620 pr_info("Interrupted while clean up ch %d\n", c->channel_id);
1621 mutex_unlock(&c->start_mutex);
1625 wait_for_completion(&c->cleanup);
1627 c->is_poisoned = false;
1630 if (aim == c->aim0.ptr)
1632 if (aim == c->aim1.ptr)
1634 mutex_unlock(&c->start_mutex);
1637 EXPORT_SYMBOL_GPL(most_stop_channel);
1640 * most_register_aim - registers an AIM (driver) with the core
1641 * @aim: instance of AIM to be registered
1643 int most_register_aim(struct most_aim *aim)
1645 struct most_aim_obj *aim_obj;
1648 pr_err("Bad driver\n");
1651 aim_obj = create_most_aim_obj(aim->name);
1653 pr_info("failed to alloc driver object\n");
1656 aim_obj->driver = aim;
1657 aim->context = aim_obj;
1658 pr_info("registered new application interfacing module %s\n",
1660 list_add_tail(&aim_obj->list, &aim_list);
1663 EXPORT_SYMBOL_GPL(most_register_aim);
1666 * most_deregister_aim - deregisters an AIM (driver) with the core
1667 * @aim: AIM to be removed
1669 int most_deregister_aim(struct most_aim *aim)
1671 struct most_aim_obj *aim_obj;
1672 struct most_c_obj *c, *tmp;
1673 struct most_inst_obj *i, *i_tmp;
1676 pr_err("Bad driver\n");
1680 aim_obj = aim->context;
1682 pr_info("driver not registered.\n");
1685 list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
1686 list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
1687 if (c->aim0.ptr == aim || c->aim1.ptr == aim)
1688 aim->disconnect_channel(
1689 c->iface, c->channel_id);
1690 if (c->aim0.ptr == aim)
1692 if (c->aim1.ptr == aim)
1696 list_del(&aim_obj->list);
1697 destroy_most_aim_obj(aim_obj);
1698 pr_info("deregistering application interfacing module %s\n", aim->name);
1701 EXPORT_SYMBOL_GPL(most_deregister_aim);
1704 * most_register_interface - registers an interface with core
1705 * @iface: pointer to the instance of the interface description.
1707 * Allocates and initializes a new interface instance and all of its channels.
1708 * Returns a pointer to kobject or an error pointer.
1710 struct kobject *most_register_interface(struct most_interface *iface)
1714 char name[STRING_SIZE];
1715 char channel_name[STRING_SIZE];
1716 struct most_c_obj *c;
1717 struct most_inst_obj *inst;
1719 if (!iface || !iface->enqueue || !iface->configure ||
1720 !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
1721 pr_err("Bad interface or channel overflow\n");
1722 return ERR_PTR(-EINVAL);
1725 id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
1727 pr_info("Failed to alloc mdev ID\n");
1730 snprintf(name, STRING_SIZE, "mdev%d", id);
1732 inst = create_most_inst_obj(name);
1734 pr_info("Failed to allocate interface instance\n");
1735 ida_simple_remove(&mdev_id, id);
1736 return ERR_PTR(-ENOMEM);
1740 INIT_LIST_HEAD(&inst->channel_list);
1741 inst->iface = iface;
1743 list_add_tail(&inst->list, &instance_list);
1745 for (i = 0; i < iface->num_channels; i++) {
1746 const char *name_suffix = iface->channel_vector[i].name_suffix;
1749 snprintf(channel_name, STRING_SIZE, "ch%d", i);
1751 snprintf(channel_name, STRING_SIZE, "%s", name_suffix);
1753 /* this increments the reference count of this instance */
1754 c = create_most_c_obj(channel_name, &inst->kobj);
1757 inst->channel[i] = c;
1762 c->keep_mbo = false;
1763 c->enqueue_halt = false;
1764 c->is_poisoned = false;
1765 c->cfg.direction = 0;
1766 c->cfg.data_type = 0;
1767 c->cfg.num_buffers = 0;
1768 c->cfg.buffer_size = 0;
1769 c->cfg.subbuffer_size = 0;
1770 c->cfg.packets_per_xact = 0;
1771 spin_lock_init(&c->fifo_lock);
1772 INIT_LIST_HEAD(&c->fifo);
1773 INIT_LIST_HEAD(&c->trash_fifo);
1774 INIT_LIST_HEAD(&c->halt_fifo);
1775 init_completion(&c->cleanup);
1776 atomic_set(&c->mbo_ref, 0);
1777 mutex_init(&c->start_mutex);
1778 mutex_init(&c->nq_mutex);
1779 list_add_tail(&c->list, &inst->channel_list);
1781 pr_info("registered new MOST device mdev%d (%s)\n",
1782 inst->dev_id, iface->description);
1786 pr_info("Failed allocate channel(s)\n");
1787 list_del(&inst->list);
1788 ida_simple_remove(&mdev_id, id);
1789 destroy_most_inst_obj(inst);
1790 return ERR_PTR(-ENOMEM);
1792 EXPORT_SYMBOL_GPL(most_register_interface);
1795 * most_deregister_interface - deregisters an interface with core
1796 * @iface: pointer to the interface instance description.
1798 * Before removing an interface instance from the list, all running
1799 * channels are stopped and poisoned.
1801 void most_deregister_interface(struct most_interface *iface)
1803 struct most_inst_obj *i = iface->priv;
1804 struct most_c_obj *c;
1807 pr_info("Bad Interface\n");
1810 pr_info("deregistering MOST device %s (%s)\n", i->kobj.name,
1811 iface->description);
1813 list_for_each_entry(c, &i->channel_list, list) {
1815 c->aim0.ptr->disconnect_channel(c->iface,
1818 c->aim1.ptr->disconnect_channel(c->iface,
1824 ida_simple_remove(&mdev_id, i->dev_id);
1826 destroy_most_inst_obj(i);
1828 EXPORT_SYMBOL_GPL(most_deregister_interface);
1831 * most_stop_enqueue - prevents core from enqueueing MBOs
1832 * @iface: pointer to interface
1835 * This is called by an HDM that _cannot_ attend to its duties and
1836 * is imminent to get run over by the core. The core is not going to
1837 * enqueue any further packets unless the flagging HDM calls
1838 * most_resume enqueue().
1840 void most_stop_enqueue(struct most_interface *iface, int id)
1842 struct most_c_obj *c = get_channel_by_iface(iface, id);
1847 mutex_lock(&c->nq_mutex);
1848 c->enqueue_halt = true;
1849 mutex_unlock(&c->nq_mutex);
1851 EXPORT_SYMBOL_GPL(most_stop_enqueue);
1854 * most_resume_enqueue - allow core to enqueue MBOs again
1855 * @iface: pointer to interface
1858 * This clears the enqueue halt flag and enqueues all MBOs currently
1859 * sitting in the wait fifo.
1861 void most_resume_enqueue(struct most_interface *iface, int id)
1863 struct most_c_obj *c = get_channel_by_iface(iface, id);
1868 mutex_lock(&c->nq_mutex);
1869 c->enqueue_halt = false;
1870 mutex_unlock(&c->nq_mutex);
1872 wake_up_interruptible(&c->hdm_fifo_wq);
1874 EXPORT_SYMBOL_GPL(most_resume_enqueue);
1876 static int __init most_init(void)
1880 pr_info("init()\n");
1881 INIT_LIST_HEAD(&instance_list);
1882 INIT_LIST_HEAD(&aim_list);
1885 err = bus_register(&most_bus);
1887 pr_info("Cannot register most bus\n");
1891 most_class = class_create(THIS_MODULE, "most");
1892 if (IS_ERR(most_class)) {
1893 pr_info("No udev support.\n");
1894 err = PTR_ERR(most_class);
1898 err = driver_register(&mostcore);
1900 pr_info("Cannot register core driver\n");
1904 core_dev = device_create(most_class, NULL, 0, NULL, "mostcore");
1905 if (IS_ERR(core_dev)) {
1906 err = PTR_ERR(core_dev);
1910 most_aim_kset = kset_create_and_add("aims", NULL, &core_dev->kobj);
1911 if (!most_aim_kset) {
1913 goto exit_class_container;
1916 most_inst_kset = kset_create_and_add("devices", NULL, &core_dev->kobj);
1917 if (!most_inst_kset) {
1919 goto exit_driver_kset;
1925 kset_unregister(most_aim_kset);
1926 exit_class_container:
1927 device_destroy(most_class, 0);
1929 driver_unregister(&mostcore);
1931 class_destroy(most_class);
1933 bus_unregister(&most_bus);
1937 static void __exit most_exit(void)
1939 struct most_inst_obj *i, *i_tmp;
1940 struct most_aim_obj *d, *d_tmp;
1942 pr_info("exit core module\n");
1943 list_for_each_entry_safe(d, d_tmp, &aim_list, list) {
1944 destroy_most_aim_obj(d);
1947 list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
1949 destroy_most_inst_obj(i);
1951 kset_unregister(most_inst_kset);
1952 kset_unregister(most_aim_kset);
1953 device_destroy(most_class, 0);
1954 driver_unregister(&mostcore);
1955 class_destroy(most_class);
1956 bus_unregister(&most_bus);
1957 ida_destroy(&mdev_id);
1960 module_init(most_init);
1961 module_exit(most_exit);
1962 MODULE_LICENSE("GPL");
1963 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
1964 MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");