2 * core.c - Implementation of core module of MOST Linux driver stack
4 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * This file is licensed under GPLv2.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/device.h>
20 #include <linux/list.h>
21 #include <linux/poll.h>
22 #include <linux/wait.h>
23 #include <linux/kobject.h>
24 #include <linux/mutex.h>
25 #include <linux/completion.h>
26 #include <linux/sysfs.h>
27 #include <linux/kthread.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/idr.h>
32 #define MAX_CHANNELS 64
33 #define STRING_SIZE 80
35 static struct class *most_class;
36 static struct device *core_dev;
37 static struct ida mdev_id;
38 static int dummy_num_buffers;
40 struct most_c_aim_obj {
48 struct completion cleanup;
50 atomic_t mbo_nq_level;
53 struct mutex start_mutex;
54 struct mutex nq_mutex; /* nq thread synchronization */
56 struct most_interface *iface;
57 struct most_inst_obj *inst;
58 struct most_channel_config cfg;
61 struct list_head fifo;
63 struct list_head halt_fifo;
64 struct list_head list;
65 struct most_c_aim_obj aim0;
66 struct most_c_aim_obj aim1;
67 struct list_head trash_fifo;
68 struct task_struct *hdm_enqueue_task;
69 wait_queue_head_t hdm_fifo_wq;
72 #define to_c_obj(d) container_of(d, struct most_c_obj, kobj)
74 struct most_inst_obj {
76 struct most_interface *iface;
77 struct list_head channel_list;
78 struct most_c_obj *channel[MAX_CHANNELS];
80 struct list_head list;
84 int most_ch_data_type;
87 { MOST_CH_CONTROL, "control\n" },
88 { MOST_CH_ASYNC, "async\n" },
89 { MOST_CH_SYNC, "sync\n" },
90 { MOST_CH_ISOC, "isoc\n"},
91 { MOST_CH_ISOC, "isoc_avp\n"},
94 #define to_inst_obj(d) container_of(d, struct most_inst_obj, kobj)
97 * list_pop_mbo - retrieves the first MBO of the list and removes it
98 * @ptr: the list head to grab the MBO from.
100 #define list_pop_mbo(ptr) \
102 struct mbo *_mbo = list_first_entry(ptr, struct mbo, list); \
103 list_del(&_mbo->list); \
108 * ___C H A N N E L___
112 * struct most_c_attr - to access the attributes of a channel object
113 * @attr: attributes of a channel
114 * @show: pointer to the show function
115 * @store: pointer to the store function
118 struct attribute attr;
119 ssize_t (*show)(struct most_c_obj *d,
120 struct most_c_attr *attr,
122 ssize_t (*store)(struct most_c_obj *d,
123 struct most_c_attr *attr,
128 #define to_channel_attr(a) container_of(a, struct most_c_attr, attr)
131 * channel_attr_show - show function of channel object
132 * @kobj: pointer to its kobject
133 * @attr: pointer to its attributes
136 static ssize_t channel_attr_show(struct kobject *kobj, struct attribute *attr,
139 struct most_c_attr *channel_attr = to_channel_attr(attr);
140 struct most_c_obj *c_obj = to_c_obj(kobj);
142 if (!channel_attr->show)
145 return channel_attr->show(c_obj, channel_attr, buf);
149 * channel_attr_store - store function of channel object
150 * @kobj: pointer to its kobject
151 * @attr: pointer to its attributes
153 * @len: length of buffer
155 static ssize_t channel_attr_store(struct kobject *kobj,
156 struct attribute *attr,
160 struct most_c_attr *channel_attr = to_channel_attr(attr);
161 struct most_c_obj *c_obj = to_c_obj(kobj);
163 if (!channel_attr->store)
165 return channel_attr->store(c_obj, channel_attr, buf, len);
168 static const struct sysfs_ops most_channel_sysfs_ops = {
169 .show = channel_attr_show,
170 .store = channel_attr_store,
174 * most_free_mbo_coherent - free an MBO and its coherent buffer
175 * @mbo: buffer to be released
178 static void most_free_mbo_coherent(struct mbo *mbo)
180 struct most_c_obj *c = mbo->context;
181 u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
183 dma_free_coherent(NULL, coherent_buf_size, mbo->virt_address,
186 if (atomic_sub_and_test(1, &c->mbo_ref))
187 complete(&c->cleanup);
191 * flush_channel_fifos - clear the channel fifos
192 * @c: pointer to channel object
194 static void flush_channel_fifos(struct most_c_obj *c)
196 unsigned long flags, hf_flags;
197 struct mbo *mbo, *tmp;
199 if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
202 spin_lock_irqsave(&c->fifo_lock, flags);
203 list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
204 list_del(&mbo->list);
205 spin_unlock_irqrestore(&c->fifo_lock, flags);
206 most_free_mbo_coherent(mbo);
207 spin_lock_irqsave(&c->fifo_lock, flags);
209 spin_unlock_irqrestore(&c->fifo_lock, flags);
211 spin_lock_irqsave(&c->fifo_lock, hf_flags);
212 list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
213 list_del(&mbo->list);
214 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
215 most_free_mbo_coherent(mbo);
216 spin_lock_irqsave(&c->fifo_lock, hf_flags);
218 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
220 if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
221 pr_info("WARN: fifo | trash fifo not empty\n");
225 * flush_trash_fifo - clear the trash fifo
226 * @c: pointer to channel object
228 static int flush_trash_fifo(struct most_c_obj *c)
230 struct mbo *mbo, *tmp;
233 spin_lock_irqsave(&c->fifo_lock, flags);
234 list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
235 list_del(&mbo->list);
236 spin_unlock_irqrestore(&c->fifo_lock, flags);
237 most_free_mbo_coherent(mbo);
238 spin_lock_irqsave(&c->fifo_lock, flags);
240 spin_unlock_irqrestore(&c->fifo_lock, flags);
245 * most_channel_release - release function of channel object
246 * @kobj: pointer to channel's kobject
248 static void most_channel_release(struct kobject *kobj)
250 struct most_c_obj *c = to_c_obj(kobj);
255 static ssize_t available_directions_show(struct most_c_obj *c,
256 struct most_c_attr *attr,
259 unsigned int i = c->channel_id;
262 if (c->iface->channel_vector[i].direction & MOST_CH_RX)
264 if (c->iface->channel_vector[i].direction & MOST_CH_TX)
270 static ssize_t available_datatypes_show(struct most_c_obj *c,
271 struct most_c_attr *attr,
274 unsigned int i = c->channel_id;
277 if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
278 strcat(buf, "control ");
279 if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
280 strcat(buf, "async ");
281 if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
282 strcat(buf, "sync ");
283 if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC)
284 strcat(buf, "isoc ");
289 static ssize_t number_of_packet_buffers_show(struct most_c_obj *c,
290 struct most_c_attr *attr,
293 unsigned int i = c->channel_id;
295 return snprintf(buf, PAGE_SIZE, "%d\n",
296 c->iface->channel_vector[i].num_buffers_packet);
299 static ssize_t number_of_stream_buffers_show(struct most_c_obj *c,
300 struct most_c_attr *attr,
303 unsigned int i = c->channel_id;
305 return snprintf(buf, PAGE_SIZE, "%d\n",
306 c->iface->channel_vector[i].num_buffers_streaming);
309 static ssize_t size_of_packet_buffer_show(struct most_c_obj *c,
310 struct most_c_attr *attr,
313 unsigned int i = c->channel_id;
315 return snprintf(buf, PAGE_SIZE, "%d\n",
316 c->iface->channel_vector[i].buffer_size_packet);
319 static ssize_t size_of_stream_buffer_show(struct most_c_obj *c,
320 struct most_c_attr *attr,
323 unsigned int i = c->channel_id;
325 return snprintf(buf, PAGE_SIZE, "%d\n",
326 c->iface->channel_vector[i].buffer_size_streaming);
329 static ssize_t channel_starving_show(struct most_c_obj *c,
330 struct most_c_attr *attr,
333 return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
336 static ssize_t set_number_of_buffers_show(struct most_c_obj *c,
337 struct most_c_attr *attr,
340 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
343 static ssize_t set_number_of_buffers_store(struct most_c_obj *c,
344 struct most_c_attr *attr,
348 int ret = kstrtou16(buf, 0, &c->cfg.num_buffers);
355 static ssize_t set_buffer_size_show(struct most_c_obj *c,
356 struct most_c_attr *attr,
359 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
362 static ssize_t set_buffer_size_store(struct most_c_obj *c,
363 struct most_c_attr *attr,
367 int ret = kstrtou16(buf, 0, &c->cfg.buffer_size);
374 static ssize_t set_direction_show(struct most_c_obj *c,
375 struct most_c_attr *attr,
378 if (c->cfg.direction & MOST_CH_TX)
379 return snprintf(buf, PAGE_SIZE, "tx\n");
380 else if (c->cfg.direction & MOST_CH_RX)
381 return snprintf(buf, PAGE_SIZE, "rx\n");
382 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
385 static ssize_t set_direction_store(struct most_c_obj *c,
386 struct most_c_attr *attr,
390 if (!strcmp(buf, "dir_rx\n")) {
391 c->cfg.direction = MOST_CH_RX;
392 } else if (!strcmp(buf, "rx\n")) {
393 c->cfg.direction = MOST_CH_RX;
394 } else if (!strcmp(buf, "dir_tx\n")) {
395 c->cfg.direction = MOST_CH_TX;
396 } else if (!strcmp(buf, "tx\n")) {
397 c->cfg.direction = MOST_CH_TX;
399 pr_info("WARN: invalid attribute settings\n");
405 static ssize_t set_datatype_show(struct most_c_obj *c,
406 struct most_c_attr *attr,
411 for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
412 if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
413 return snprintf(buf, PAGE_SIZE, ch_data_type[i].name);
415 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
418 static ssize_t set_datatype_store(struct most_c_obj *c,
419 struct most_c_attr *attr,
425 for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
426 if (!strcmp(buf, ch_data_type[i].name)) {
427 c->cfg.data_type = ch_data_type[i].most_ch_data_type;
432 if (i == ARRAY_SIZE(ch_data_type)) {
433 pr_info("WARN: invalid attribute settings\n");
439 static ssize_t set_subbuffer_size_show(struct most_c_obj *c,
440 struct most_c_attr *attr,
443 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
446 static ssize_t set_subbuffer_size_store(struct most_c_obj *c,
447 struct most_c_attr *attr,
451 int ret = kstrtou16(buf, 0, &c->cfg.subbuffer_size);
458 static ssize_t set_packets_per_xact_show(struct most_c_obj *c,
459 struct most_c_attr *attr,
462 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
465 static ssize_t set_packets_per_xact_store(struct most_c_obj *c,
466 struct most_c_attr *attr,
470 int ret = kstrtou16(buf, 0, &c->cfg.packets_per_xact);
477 static struct most_c_attr most_c_attrs[] = {
478 __ATTR_RO(available_directions),
479 __ATTR_RO(available_datatypes),
480 __ATTR_RO(number_of_packet_buffers),
481 __ATTR_RO(number_of_stream_buffers),
482 __ATTR_RO(size_of_stream_buffer),
483 __ATTR_RO(size_of_packet_buffer),
484 __ATTR_RO(channel_starving),
485 __ATTR_RW(set_buffer_size),
486 __ATTR_RW(set_number_of_buffers),
487 __ATTR_RW(set_direction),
488 __ATTR_RW(set_datatype),
489 __ATTR_RW(set_subbuffer_size),
490 __ATTR_RW(set_packets_per_xact),
494 * most_channel_def_attrs - array of default attributes of channel object
496 static struct attribute *most_channel_def_attrs[] = {
497 &most_c_attrs[0].attr,
498 &most_c_attrs[1].attr,
499 &most_c_attrs[2].attr,
500 &most_c_attrs[3].attr,
501 &most_c_attrs[4].attr,
502 &most_c_attrs[5].attr,
503 &most_c_attrs[6].attr,
504 &most_c_attrs[7].attr,
505 &most_c_attrs[8].attr,
506 &most_c_attrs[9].attr,
507 &most_c_attrs[10].attr,
508 &most_c_attrs[11].attr,
509 &most_c_attrs[12].attr,
513 static struct kobj_type most_channel_ktype = {
514 .sysfs_ops = &most_channel_sysfs_ops,
515 .release = most_channel_release,
516 .default_attrs = most_channel_def_attrs,
519 static struct kset *most_channel_kset;
522 * create_most_c_obj - allocates a channel object
523 * @name: name of the channel object
524 * @parent: parent kobject
526 * This create a channel object and registers it with sysfs.
527 * Returns a pointer to the object or NULL when something went wrong.
529 static struct most_c_obj *
530 create_most_c_obj(const char *name, struct kobject *parent)
532 struct most_c_obj *c;
535 c = kzalloc(sizeof(*c), GFP_KERNEL);
538 c->kobj.kset = most_channel_kset;
539 retval = kobject_init_and_add(&c->kobj, &most_channel_ktype, parent,
542 kobject_put(&c->kobj);
545 kobject_uevent(&c->kobj, KOBJ_ADD);
550 * ___I N S T A N C E___
553 static struct list_head instance_list;
556 * struct most_inst_attribute - to access the attributes of instance object
557 * @attr: attributes of an instance
558 * @show: pointer to the show function
559 * @store: pointer to the store function
561 struct most_inst_attribute {
562 struct attribute attr;
563 ssize_t (*show)(struct most_inst_obj *d,
564 struct most_inst_attribute *attr,
566 ssize_t (*store)(struct most_inst_obj *d,
567 struct most_inst_attribute *attr,
572 #define to_instance_attr(a) \
573 container_of(a, struct most_inst_attribute, attr)
576 * instance_attr_show - show function for an instance object
577 * @kobj: pointer to kobject
578 * @attr: pointer to attribute struct
581 static ssize_t instance_attr_show(struct kobject *kobj,
582 struct attribute *attr,
585 struct most_inst_attribute *instance_attr;
586 struct most_inst_obj *instance_obj;
588 instance_attr = to_instance_attr(attr);
589 instance_obj = to_inst_obj(kobj);
591 if (!instance_attr->show)
594 return instance_attr->show(instance_obj, instance_attr, buf);
598 * instance_attr_store - store function for an instance object
599 * @kobj: pointer to kobject
600 * @attr: pointer to attribute struct
602 * @len: length of buffer
604 static ssize_t instance_attr_store(struct kobject *kobj,
605 struct attribute *attr,
609 struct most_inst_attribute *instance_attr;
610 struct most_inst_obj *instance_obj;
612 instance_attr = to_instance_attr(attr);
613 instance_obj = to_inst_obj(kobj);
615 if (!instance_attr->store)
618 return instance_attr->store(instance_obj, instance_attr, buf, len);
621 static const struct sysfs_ops most_inst_sysfs_ops = {
622 .show = instance_attr_show,
623 .store = instance_attr_store,
627 * most_inst_release - release function for instance object
628 * @kobj: pointer to instance's kobject
630 * This frees the allocated memory for the instance object
632 static void most_inst_release(struct kobject *kobj)
634 struct most_inst_obj *inst = to_inst_obj(kobj);
639 static ssize_t description_show(struct most_inst_obj *instance_obj,
640 struct most_inst_attribute *attr,
643 return snprintf(buf, PAGE_SIZE, "%s\n",
644 instance_obj->iface->description);
647 static ssize_t interface_show(struct most_inst_obj *instance_obj,
648 struct most_inst_attribute *attr,
651 switch (instance_obj->iface->interface) {
653 return snprintf(buf, PAGE_SIZE, "loopback\n");
655 return snprintf(buf, PAGE_SIZE, "i2c\n");
657 return snprintf(buf, PAGE_SIZE, "i2s\n");
659 return snprintf(buf, PAGE_SIZE, "tsi\n");
661 return snprintf(buf, PAGE_SIZE, "hbi\n");
662 case ITYPE_MEDIALB_DIM:
663 return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
664 case ITYPE_MEDIALB_DIM2:
665 return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
667 return snprintf(buf, PAGE_SIZE, "usb\n");
669 return snprintf(buf, PAGE_SIZE, "pcie\n");
671 return snprintf(buf, PAGE_SIZE, "unknown\n");
674 static struct most_inst_attribute most_inst_attr_description =
675 __ATTR_RO(description);
677 static struct most_inst_attribute most_inst_attr_interface =
678 __ATTR_RO(interface);
680 static struct attribute *most_inst_def_attrs[] = {
681 &most_inst_attr_description.attr,
682 &most_inst_attr_interface.attr,
686 static struct kobj_type most_inst_ktype = {
687 .sysfs_ops = &most_inst_sysfs_ops,
688 .release = most_inst_release,
689 .default_attrs = most_inst_def_attrs,
692 static struct kset *most_inst_kset;
695 * create_most_inst_obj - creates an instance object
696 * @name: name of the object to be created
698 * This allocates memory for an instance structure, assigns the proper kset
699 * and registers it with sysfs.
701 * Returns a pointer to the instance object or NULL when something went wrong.
703 static struct most_inst_obj *create_most_inst_obj(const char *name)
705 struct most_inst_obj *inst;
708 inst = kzalloc(sizeof(*inst), GFP_KERNEL);
711 inst->kobj.kset = most_inst_kset;
712 retval = kobject_init_and_add(&inst->kobj, &most_inst_ktype, NULL,
715 kobject_put(&inst->kobj);
718 kobject_uevent(&inst->kobj, KOBJ_ADD);
723 * destroy_most_inst_obj - MOST instance release function
724 * @inst: pointer to the instance object
726 * This decrements the reference counter of the instance object.
727 * If the reference count turns zero, its release function is called
729 static void destroy_most_inst_obj(struct most_inst_obj *inst)
731 struct most_c_obj *c, *tmp;
733 list_for_each_entry_safe(c, tmp, &inst->channel_list, list) {
735 flush_channel_fifos(c);
736 kobject_put(&c->kobj);
738 kobject_put(&inst->kobj);
744 struct most_aim_obj {
746 struct list_head list;
747 struct most_aim *driver;
750 #define to_aim_obj(d) container_of(d, struct most_aim_obj, kobj)
752 static struct list_head aim_list;
755 * struct most_aim_attribute - to access the attributes of AIM object
756 * @attr: attributes of an AIM
757 * @show: pointer to the show function
758 * @store: pointer to the store function
760 struct most_aim_attribute {
761 struct attribute attr;
762 ssize_t (*show)(struct most_aim_obj *d,
763 struct most_aim_attribute *attr,
765 ssize_t (*store)(struct most_aim_obj *d,
766 struct most_aim_attribute *attr,
771 #define to_aim_attr(a) container_of(a, struct most_aim_attribute, attr)
774 * aim_attr_show - show function of an AIM object
775 * @kobj: pointer to kobject
776 * @attr: pointer to attribute struct
779 static ssize_t aim_attr_show(struct kobject *kobj,
780 struct attribute *attr,
783 struct most_aim_attribute *aim_attr;
784 struct most_aim_obj *aim_obj;
786 aim_attr = to_aim_attr(attr);
787 aim_obj = to_aim_obj(kobj);
792 return aim_attr->show(aim_obj, aim_attr, buf);
796 * aim_attr_store - store function of an AIM object
797 * @kobj: pointer to kobject
798 * @attr: pointer to attribute struct
800 * @len: length of buffer
802 static ssize_t aim_attr_store(struct kobject *kobj,
803 struct attribute *attr,
807 struct most_aim_attribute *aim_attr;
808 struct most_aim_obj *aim_obj;
810 aim_attr = to_aim_attr(attr);
811 aim_obj = to_aim_obj(kobj);
813 if (!aim_attr->store)
815 return aim_attr->store(aim_obj, aim_attr, buf, len);
818 static const struct sysfs_ops most_aim_sysfs_ops = {
819 .show = aim_attr_show,
820 .store = aim_attr_store,
824 * most_aim_release - AIM release function
825 * @kobj: pointer to AIM's kobject
827 static void most_aim_release(struct kobject *kobj)
829 struct most_aim_obj *aim_obj = to_aim_obj(kobj);
834 static ssize_t links_show(struct most_aim_obj *aim_obj,
835 struct most_aim_attribute *attr,
838 struct most_c_obj *c;
839 struct most_inst_obj *i;
842 list_for_each_entry(i, &instance_list, list) {
843 list_for_each_entry(c, &i->channel_list, list) {
844 if (c->aim0.ptr == aim_obj->driver ||
845 c->aim1.ptr == aim_obj->driver) {
846 offs += snprintf(buf + offs, PAGE_SIZE - offs,
848 kobject_name(&i->kobj),
849 kobject_name(&c->kobj));
858 * split_string - parses and changes string in the buffer buf and
859 * splits it into two mandatory and one optional substrings.
861 * @buf: complete string from attribute 'add_channel'
862 * @a: address of pointer to 1st substring (=instance name)
863 * @b: address of pointer to 2nd substring (=channel name)
864 * @c: optional address of pointer to 3rd substring (=user defined name)
868 * Input: "mdev0:ch6:my_channel\n" or
869 * "mdev0:ch6:my_channel"
871 * Output: *a -> "mdev0", *b -> "ch6", *c -> "my_channel"
873 * Input: "mdev1:ep81\n"
874 * Output: *a -> "mdev1", *b -> "ep81", *c -> ""
876 * Input: "mdev1:ep81"
877 * Output: *a -> "mdev1", *b -> "ep81", *c == NULL
879 static int split_string(char *buf, char **a, char **b, char **c)
881 *a = strsep(&buf, ":");
885 *b = strsep(&buf, ":\n");
890 *c = strsep(&buf, ":\n");
896 * get_channel_by_name - get pointer to channel object
897 * @mdev: name of the device instance
898 * @mdev_ch: name of the respective channel
900 * This retrieves the pointer to a channel object.
903 most_c_obj *get_channel_by_name(char *mdev, char *mdev_ch)
905 struct most_c_obj *c, *tmp;
906 struct most_inst_obj *i, *i_tmp;
909 list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
910 if (!strcmp(kobject_name(&i->kobj), mdev)) {
915 if (unlikely(!found))
916 return ERR_PTR(-EIO);
918 list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
919 if (!strcmp(kobject_name(&c->kobj), mdev_ch)) {
924 if (unlikely(found < 2))
925 return ERR_PTR(-EIO);
930 * add_link_store - store() function for add_link attribute
931 * @aim_obj: pointer to AIM object
932 * @attr: its attributes
934 * @len: buffer length
936 * This parses the string given by buf and splits it into
937 * three substrings. Note: third substring is optional. In case a cdev
938 * AIM is loaded the optional 3rd substring will make up the name of
939 * device node in the /dev directory. If omitted, the device node will
940 * inherit the channel's name within sysfs.
942 * Searches for a pair of device and channel and probes the AIM
945 * (1) echo "mdev0:ch6:my_rxchannel" >add_link
946 * (2) echo "mdev1:ep81" >add_link
948 * (1) would create the device node /dev/my_rxchannel
949 * (2) would create the device node /dev/mdev1-ep81
951 static ssize_t add_link_store(struct most_aim_obj *aim_obj,
952 struct most_aim_attribute *attr,
956 struct most_c_obj *c;
957 struct most_aim **aim_ptr;
958 char buffer[STRING_SIZE];
962 char devnod_buf[STRING_SIZE];
964 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
966 strlcpy(buffer, buf, max_len);
968 ret = split_string(buffer, &mdev, &mdev_ch, &mdev_devnod);
972 if (!mdev_devnod || *mdev_devnod == 0) {
973 snprintf(devnod_buf, sizeof(devnod_buf), "%s-%s", mdev,
975 mdev_devnod = devnod_buf;
978 c = get_channel_by_name(mdev, mdev_ch);
983 aim_ptr = &c->aim0.ptr;
984 else if (!c->aim1.ptr)
985 aim_ptr = &c->aim1.ptr;
989 *aim_ptr = aim_obj->driver;
990 ret = aim_obj->driver->probe_channel(c->iface, c->channel_id,
991 &c->cfg, &c->kobj, mdev_devnod);
1001 * remove_link_store - store function for remove_link attribute
1002 * @aim_obj: pointer to AIM object
1003 * @attr: its attributes
1005 * @len: buffer length
1008 * echo "mdev0:ep81" >remove_link
1010 static ssize_t remove_link_store(struct most_aim_obj *aim_obj,
1011 struct most_aim_attribute *attr,
1015 struct most_c_obj *c;
1016 char buffer[STRING_SIZE];
1020 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
1022 strlcpy(buffer, buf, max_len);
1023 ret = split_string(buffer, &mdev, &mdev_ch, NULL);
1027 c = get_channel_by_name(mdev, mdev_ch);
1031 if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id))
1033 if (c->aim0.ptr == aim_obj->driver)
1035 if (c->aim1.ptr == aim_obj->driver)
1040 static struct most_aim_attribute most_aim_attrs[] = {
1042 __ATTR_WO(add_link),
1043 __ATTR_WO(remove_link),
1046 static struct attribute *most_aim_def_attrs[] = {
1047 &most_aim_attrs[0].attr,
1048 &most_aim_attrs[1].attr,
1049 &most_aim_attrs[2].attr,
1053 static struct kobj_type most_aim_ktype = {
1054 .sysfs_ops = &most_aim_sysfs_ops,
1055 .release = most_aim_release,
1056 .default_attrs = most_aim_def_attrs,
1059 static struct kset *most_aim_kset;
1062 * create_most_aim_obj - creates an AIM object
1063 * @name: name of the AIM
1065 * This creates an AIM object assigns the proper kset and registers
1067 * Returns a pointer to the object or NULL if something went wrong.
1069 static struct most_aim_obj *create_most_aim_obj(const char *name)
1071 struct most_aim_obj *most_aim;
1074 most_aim = kzalloc(sizeof(*most_aim), GFP_KERNEL);
1077 most_aim->kobj.kset = most_aim_kset;
1078 retval = kobject_init_and_add(&most_aim->kobj, &most_aim_ktype,
1081 kobject_put(&most_aim->kobj);
1084 kobject_uevent(&most_aim->kobj, KOBJ_ADD);
1089 * destroy_most_aim_obj - AIM release function
1090 * @p: pointer to AIM object
1092 * This decrements the reference counter of the AIM object. If the
1093 * reference count turns zero, its release function will be called.
1095 static void destroy_most_aim_obj(struct most_aim_obj *p)
1097 kobject_put(&p->kobj);
1105 * Instantiation of the MOST bus
1107 static struct bus_type most_bus = {
1112 * Instantiation of the core driver
1114 static struct device_driver mostcore = {
1119 static inline void trash_mbo(struct mbo *mbo)
1121 unsigned long flags;
1122 struct most_c_obj *c = mbo->context;
1124 spin_lock_irqsave(&c->fifo_lock, flags);
1125 list_add(&mbo->list, &c->trash_fifo);
1126 spin_unlock_irqrestore(&c->fifo_lock, flags);
1129 static bool hdm_mbo_ready(struct most_c_obj *c)
1133 if (c->enqueue_halt)
1136 spin_lock_irq(&c->fifo_lock);
1137 empty = list_empty(&c->halt_fifo);
1138 spin_unlock_irq(&c->fifo_lock);
1143 static void nq_hdm_mbo(struct mbo *mbo)
1145 unsigned long flags;
1146 struct most_c_obj *c = mbo->context;
1148 spin_lock_irqsave(&c->fifo_lock, flags);
1149 list_add_tail(&mbo->list, &c->halt_fifo);
1150 spin_unlock_irqrestore(&c->fifo_lock, flags);
1151 wake_up_interruptible(&c->hdm_fifo_wq);
1154 static int hdm_enqueue_thread(void *data)
1156 struct most_c_obj *c = data;
1159 typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
1161 while (likely(!kthread_should_stop())) {
1162 wait_event_interruptible(c->hdm_fifo_wq,
1164 kthread_should_stop());
1166 mutex_lock(&c->nq_mutex);
1167 spin_lock_irq(&c->fifo_lock);
1168 if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) {
1169 spin_unlock_irq(&c->fifo_lock);
1170 mutex_unlock(&c->nq_mutex);
1174 mbo = list_pop_mbo(&c->halt_fifo);
1175 spin_unlock_irq(&c->fifo_lock);
1177 if (c->cfg.direction == MOST_CH_RX)
1178 mbo->buffer_length = c->cfg.buffer_size;
1180 ret = enqueue(mbo->ifp, mbo->hdm_channel_id, mbo);
1181 mutex_unlock(&c->nq_mutex);
1183 if (unlikely(ret)) {
1184 pr_err("hdm enqueue failed\n");
1186 c->hdm_enqueue_task = NULL;
1194 static int run_enqueue_thread(struct most_c_obj *c, int channel_id)
1196 struct task_struct *task =
1197 kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d",
1201 return PTR_ERR(task);
1203 c->hdm_enqueue_task = task;
1208 * arm_mbo - recycle MBO for further usage
1209 * @mbo: buffer object
1211 * This puts an MBO back to the list to have it ready for up coming
1214 * In case the MBO belongs to a channel that recently has been
1215 * poisoned, the MBO is scheduled to be trashed.
1216 * Calls the completion handler of an attached AIM.
1218 static void arm_mbo(struct mbo *mbo)
1220 unsigned long flags;
1221 struct most_c_obj *c;
1223 BUG_ON((!mbo) || (!mbo->context));
1226 if (c->is_poisoned) {
1231 spin_lock_irqsave(&c->fifo_lock, flags);
1232 ++*mbo->num_buffers_ptr;
1233 list_add_tail(&mbo->list, &c->fifo);
1234 spin_unlock_irqrestore(&c->fifo_lock, flags);
1236 if (c->aim0.refs && c->aim0.ptr->tx_completion)
1237 c->aim0.ptr->tx_completion(c->iface, c->channel_id);
1239 if (c->aim1.refs && c->aim1.ptr->tx_completion)
1240 c->aim1.ptr->tx_completion(c->iface, c->channel_id);
1244 * arm_mbo_chain - helper function that arms an MBO chain for the HDM
1245 * @c: pointer to interface channel
1246 * @dir: direction of the channel
1247 * @compl: pointer to completion function
1249 * This allocates buffer objects including the containing DMA coherent
1250 * buffer and puts them in the fifo.
1251 * Buffers of Rx channels are put in the kthread fifo, hence immediately
1252 * submitted to the HDM.
1254 * Returns the number of allocated and enqueued MBOs.
1256 static int arm_mbo_chain(struct most_c_obj *c, int dir,
1257 void (*compl)(struct mbo *))
1262 u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
1264 atomic_set(&c->mbo_nq_level, 0);
1266 for (i = 0; i < c->cfg.num_buffers; i++) {
1267 mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
1273 mbo->ifp = c->iface;
1274 mbo->hdm_channel_id = c->channel_id;
1275 mbo->virt_address = dma_alloc_coherent(NULL,
1279 if (!mbo->virt_address) {
1280 pr_info("WARN: No DMA coherent buffer.\n");
1284 mbo->complete = compl;
1285 mbo->num_buffers_ptr = &dummy_num_buffers;
1286 if (dir == MOST_CH_RX) {
1288 atomic_inc(&c->mbo_nq_level);
1302 * most_submit_mbo - submits an MBO to fifo
1303 * @mbo: pointer to the MBO
1305 void most_submit_mbo(struct mbo *mbo)
1307 if (WARN_ONCE(!mbo || !mbo->context,
1308 "bad mbo or missing channel reference\n"))
1313 EXPORT_SYMBOL_GPL(most_submit_mbo);
1316 * most_write_completion - write completion handler
1317 * @mbo: pointer to MBO
1319 * This recycles the MBO for further usage. In case the channel has been
1320 * poisoned, the MBO is scheduled to be trashed.
1322 static void most_write_completion(struct mbo *mbo)
1324 struct most_c_obj *c;
1326 BUG_ON((!mbo) || (!mbo->context));
1329 if (mbo->status == MBO_E_INVAL)
1330 pr_info("WARN: Tx MBO status: invalid\n");
1331 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
1338 * get_channel_by_iface - get pointer to channel object
1339 * @iface: pointer to interface instance
1342 * This retrieves a pointer to a channel of the given interface and channel ID.
1345 most_c_obj *get_channel_by_iface(struct most_interface *iface, int id)
1347 struct most_inst_obj *i;
1349 if (unlikely(!iface)) {
1350 pr_err("Bad interface\n");
1353 if (unlikely((id < 0) || (id >= iface->num_channels))) {
1354 pr_err("Channel index (%d) out of range\n", id);
1359 pr_err("interface is not registered\n");
1362 return i->channel[id];
1365 int channel_has_mbo(struct most_interface *iface, int id, struct most_aim *aim)
1367 struct most_c_obj *c = get_channel_by_iface(iface, id);
1368 unsigned long flags;
1374 if (c->aim0.refs && c->aim1.refs &&
1375 ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
1376 (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
1379 spin_lock_irqsave(&c->fifo_lock, flags);
1380 empty = list_empty(&c->fifo);
1381 spin_unlock_irqrestore(&c->fifo_lock, flags);
1384 EXPORT_SYMBOL_GPL(channel_has_mbo);
1387 * most_get_mbo - get pointer to an MBO of pool
1388 * @iface: pointer to interface instance
1391 * This attempts to get a free buffer out of the channel fifo.
1392 * Returns a pointer to MBO on success or NULL otherwise.
1394 struct mbo *most_get_mbo(struct most_interface *iface, int id,
1395 struct most_aim *aim)
1398 struct most_c_obj *c;
1399 unsigned long flags;
1400 int *num_buffers_ptr;
1402 c = get_channel_by_iface(iface, id);
1406 if (c->aim0.refs && c->aim1.refs &&
1407 ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
1408 (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
1411 if (aim == c->aim0.ptr)
1412 num_buffers_ptr = &c->aim0.num_buffers;
1413 else if (aim == c->aim1.ptr)
1414 num_buffers_ptr = &c->aim1.num_buffers;
1416 num_buffers_ptr = &dummy_num_buffers;
1418 spin_lock_irqsave(&c->fifo_lock, flags);
1419 if (list_empty(&c->fifo)) {
1420 spin_unlock_irqrestore(&c->fifo_lock, flags);
1423 mbo = list_pop_mbo(&c->fifo);
1425 spin_unlock_irqrestore(&c->fifo_lock, flags);
1427 mbo->num_buffers_ptr = num_buffers_ptr;
1428 mbo->buffer_length = c->cfg.buffer_size;
1431 EXPORT_SYMBOL_GPL(most_get_mbo);
1434 * most_put_mbo - return buffer to pool
1435 * @mbo: buffer object
1437 void most_put_mbo(struct mbo *mbo)
1439 struct most_c_obj *c = mbo->context;
1441 if (c->cfg.direction == MOST_CH_TX) {
1446 atomic_inc(&c->mbo_nq_level);
1448 EXPORT_SYMBOL_GPL(most_put_mbo);
1451 * most_read_completion - read completion handler
1452 * @mbo: pointer to MBO
1454 * This function is called by the HDM when data has been received from the
1455 * hardware and copied to the buffer of the MBO.
1457 * In case the channel has been poisoned it puts the buffer in the trash queue.
1458 * Otherwise, it passes the buffer to an AIM for further processing.
1460 static void most_read_completion(struct mbo *mbo)
1462 struct most_c_obj *c = mbo->context;
1464 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
1469 if (mbo->status == MBO_E_INVAL) {
1471 atomic_inc(&c->mbo_nq_level);
1475 if (atomic_sub_and_test(1, &c->mbo_nq_level))
1478 if (c->aim0.refs && c->aim0.ptr->rx_completion &&
1479 c->aim0.ptr->rx_completion(mbo) == 0)
1482 if (c->aim1.refs && c->aim1.ptr->rx_completion &&
1483 c->aim1.ptr->rx_completion(mbo) == 0)
1490 * most_start_channel - prepares a channel for communication
1491 * @iface: pointer to interface instance
1494 * This prepares the channel for usage. Cross-checks whether the
1495 * channel's been properly configured.
1497 * Returns 0 on success or error code otherwise.
1499 int most_start_channel(struct most_interface *iface, int id,
1500 struct most_aim *aim)
1504 struct most_c_obj *c = get_channel_by_iface(iface, id);
1509 mutex_lock(&c->start_mutex);
1510 if (c->aim0.refs + c->aim1.refs > 0)
1511 goto out; /* already started by other aim */
1513 if (!try_module_get(iface->mod)) {
1514 pr_info("failed to acquire HDM lock\n");
1515 mutex_unlock(&c->start_mutex);
1519 c->cfg.extra_len = 0;
1520 if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
1521 pr_info("channel configuration failed. Go check settings...\n");
1526 init_waitqueue_head(&c->hdm_fifo_wq);
1528 if (c->cfg.direction == MOST_CH_RX)
1529 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1530 most_read_completion);
1532 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1533 most_write_completion);
1534 if (unlikely(!num_buffer)) {
1535 pr_info("failed to allocate memory\n");
1540 ret = run_enqueue_thread(c, id);
1545 c->aim0.num_buffers = c->cfg.num_buffers / 2;
1546 c->aim1.num_buffers = c->cfg.num_buffers - c->aim0.num_buffers;
1547 atomic_set(&c->mbo_ref, num_buffer);
1550 if (aim == c->aim0.ptr)
1552 if (aim == c->aim1.ptr)
1554 mutex_unlock(&c->start_mutex);
1558 module_put(iface->mod);
1559 mutex_unlock(&c->start_mutex);
1562 EXPORT_SYMBOL_GPL(most_start_channel);
1565 * most_stop_channel - stops a running channel
1566 * @iface: pointer to interface instance
1569 int most_stop_channel(struct most_interface *iface, int id,
1570 struct most_aim *aim)
1572 struct most_c_obj *c;
1574 if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
1575 pr_err("Bad interface or index out of range\n");
1578 c = get_channel_by_iface(iface, id);
1582 mutex_lock(&c->start_mutex);
1583 if (c->aim0.refs + c->aim1.refs >= 2)
1586 if (c->hdm_enqueue_task)
1587 kthread_stop(c->hdm_enqueue_task);
1588 c->hdm_enqueue_task = NULL;
1591 module_put(iface->mod);
1593 c->is_poisoned = true;
1594 if (c->iface->poison_channel(c->iface, c->channel_id)) {
1595 pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
1596 c->iface->description);
1597 mutex_unlock(&c->start_mutex);
1600 flush_trash_fifo(c);
1601 flush_channel_fifos(c);
1603 #ifdef CMPL_INTERRUPTIBLE
1604 if (wait_for_completion_interruptible(&c->cleanup)) {
1605 pr_info("Interrupted while clean up ch %d\n", c->channel_id);
1606 mutex_unlock(&c->start_mutex);
1610 wait_for_completion(&c->cleanup);
1612 c->is_poisoned = false;
1615 if (aim == c->aim0.ptr)
1617 if (aim == c->aim1.ptr)
1619 mutex_unlock(&c->start_mutex);
1622 EXPORT_SYMBOL_GPL(most_stop_channel);
1625 * most_register_aim - registers an AIM (driver) with the core
1626 * @aim: instance of AIM to be registered
1628 int most_register_aim(struct most_aim *aim)
1630 struct most_aim_obj *aim_obj;
1633 pr_err("Bad driver\n");
1636 aim_obj = create_most_aim_obj(aim->name);
1638 pr_info("failed to alloc driver object\n");
1641 aim_obj->driver = aim;
1642 aim->context = aim_obj;
1643 pr_info("registered new application interfacing module %s\n",
1645 list_add_tail(&aim_obj->list, &aim_list);
1648 EXPORT_SYMBOL_GPL(most_register_aim);
1651 * most_deregister_aim - deregisters an AIM (driver) with the core
1652 * @aim: AIM to be removed
1654 int most_deregister_aim(struct most_aim *aim)
1656 struct most_aim_obj *aim_obj;
1657 struct most_c_obj *c, *tmp;
1658 struct most_inst_obj *i, *i_tmp;
1661 pr_err("Bad driver\n");
1665 aim_obj = aim->context;
1667 pr_info("driver not registered.\n");
1670 list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
1671 list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
1672 if (c->aim0.ptr == aim || c->aim1.ptr == aim)
1673 aim->disconnect_channel(
1674 c->iface, c->channel_id);
1675 if (c->aim0.ptr == aim)
1677 if (c->aim1.ptr == aim)
1681 list_del(&aim_obj->list);
1682 destroy_most_aim_obj(aim_obj);
1683 pr_info("deregistering application interfacing module %s\n", aim->name);
1686 EXPORT_SYMBOL_GPL(most_deregister_aim);
1689 * most_register_interface - registers an interface with core
1690 * @iface: pointer to the instance of the interface description.
1692 * Allocates and initializes a new interface instance and all of its channels.
1693 * Returns a pointer to kobject or an error pointer.
1695 struct kobject *most_register_interface(struct most_interface *iface)
1699 char name[STRING_SIZE];
1700 char channel_name[STRING_SIZE];
1701 struct most_c_obj *c;
1702 struct most_inst_obj *inst;
1704 if (!iface || !iface->enqueue || !iface->configure ||
1705 !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
1706 pr_err("Bad interface or channel overflow\n");
1707 return ERR_PTR(-EINVAL);
1710 id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
1712 pr_info("Failed to alloc mdev ID\n");
1715 snprintf(name, STRING_SIZE, "mdev%d", id);
1717 inst = create_most_inst_obj(name);
1719 pr_info("Failed to allocate interface instance\n");
1720 ida_simple_remove(&mdev_id, id);
1721 return ERR_PTR(-ENOMEM);
1725 INIT_LIST_HEAD(&inst->channel_list);
1726 inst->iface = iface;
1728 list_add_tail(&inst->list, &instance_list);
1730 for (i = 0; i < iface->num_channels; i++) {
1731 const char *name_suffix = iface->channel_vector[i].name_suffix;
1734 snprintf(channel_name, STRING_SIZE, "ch%d", i);
1736 snprintf(channel_name, STRING_SIZE, "%s", name_suffix);
1738 /* this increments the reference count of this instance */
1739 c = create_most_c_obj(channel_name, &inst->kobj);
1742 inst->channel[i] = c;
1747 c->keep_mbo = false;
1748 c->enqueue_halt = false;
1749 c->is_poisoned = false;
1750 c->cfg.direction = 0;
1751 c->cfg.data_type = 0;
1752 c->cfg.num_buffers = 0;
1753 c->cfg.buffer_size = 0;
1754 c->cfg.subbuffer_size = 0;
1755 c->cfg.packets_per_xact = 0;
1756 spin_lock_init(&c->fifo_lock);
1757 INIT_LIST_HEAD(&c->fifo);
1758 INIT_LIST_HEAD(&c->trash_fifo);
1759 INIT_LIST_HEAD(&c->halt_fifo);
1760 init_completion(&c->cleanup);
1761 atomic_set(&c->mbo_ref, 0);
1762 mutex_init(&c->start_mutex);
1763 mutex_init(&c->nq_mutex);
1764 list_add_tail(&c->list, &inst->channel_list);
1766 pr_info("registered new MOST device mdev%d (%s)\n",
1767 inst->dev_id, iface->description);
1771 pr_info("Failed allocate channel(s)\n");
1772 list_del(&inst->list);
1773 ida_simple_remove(&mdev_id, id);
1774 destroy_most_inst_obj(inst);
1775 return ERR_PTR(-ENOMEM);
1777 EXPORT_SYMBOL_GPL(most_register_interface);
1780 * most_deregister_interface - deregisters an interface with core
1781 * @iface: pointer to the interface instance description.
1783 * Before removing an interface instance from the list, all running
1784 * channels are stopped and poisoned.
1786 void most_deregister_interface(struct most_interface *iface)
1788 struct most_inst_obj *i = iface->priv;
1789 struct most_c_obj *c;
1792 pr_info("Bad Interface\n");
1795 pr_info("deregistering MOST device %s (%s)\n", i->kobj.name,
1796 iface->description);
1798 list_for_each_entry(c, &i->channel_list, list) {
1800 c->aim0.ptr->disconnect_channel(c->iface,
1803 c->aim1.ptr->disconnect_channel(c->iface,
1809 ida_simple_remove(&mdev_id, i->dev_id);
1811 destroy_most_inst_obj(i);
1813 EXPORT_SYMBOL_GPL(most_deregister_interface);
1816 * most_stop_enqueue - prevents core from enqueueing MBOs
1817 * @iface: pointer to interface
1820 * This is called by an HDM that _cannot_ attend to its duties and
1821 * is imminent to get run over by the core. The core is not going to
1822 * enqueue any further packets unless the flagging HDM calls
1823 * most_resume enqueue().
1825 void most_stop_enqueue(struct most_interface *iface, int id)
1827 struct most_c_obj *c = get_channel_by_iface(iface, id);
1832 mutex_lock(&c->nq_mutex);
1833 c->enqueue_halt = true;
1834 mutex_unlock(&c->nq_mutex);
1836 EXPORT_SYMBOL_GPL(most_stop_enqueue);
1839 * most_resume_enqueue - allow core to enqueue MBOs again
1840 * @iface: pointer to interface
1843 * This clears the enqueue halt flag and enqueues all MBOs currently
1844 * sitting in the wait fifo.
1846 void most_resume_enqueue(struct most_interface *iface, int id)
1848 struct most_c_obj *c = get_channel_by_iface(iface, id);
1853 mutex_lock(&c->nq_mutex);
1854 c->enqueue_halt = false;
1855 mutex_unlock(&c->nq_mutex);
1857 wake_up_interruptible(&c->hdm_fifo_wq);
1859 EXPORT_SYMBOL_GPL(most_resume_enqueue);
1861 static int __init most_init(void)
1865 pr_info("init()\n");
1866 INIT_LIST_HEAD(&instance_list);
1867 INIT_LIST_HEAD(&aim_list);
1870 err = bus_register(&most_bus);
1872 pr_info("Cannot register most bus\n");
1876 most_class = class_create(THIS_MODULE, "most");
1877 if (IS_ERR(most_class)) {
1878 pr_info("No udev support.\n");
1879 err = PTR_ERR(most_class);
1883 err = driver_register(&mostcore);
1885 pr_info("Cannot register core driver\n");
1889 core_dev = device_create(most_class, NULL, 0, NULL, "mostcore");
1890 if (IS_ERR(core_dev)) {
1891 err = PTR_ERR(core_dev);
1895 most_aim_kset = kset_create_and_add("aims", NULL, &core_dev->kobj);
1896 if (!most_aim_kset) {
1898 goto exit_class_container;
1901 most_inst_kset = kset_create_and_add("devices", NULL, &core_dev->kobj);
1902 if (!most_inst_kset) {
1904 goto exit_driver_kset;
1910 kset_unregister(most_aim_kset);
1911 exit_class_container:
1912 device_destroy(most_class, 0);
1914 driver_unregister(&mostcore);
1916 class_destroy(most_class);
1918 bus_unregister(&most_bus);
1922 static void __exit most_exit(void)
1924 struct most_inst_obj *i, *i_tmp;
1925 struct most_aim_obj *d, *d_tmp;
1927 pr_info("exit core module\n");
1928 list_for_each_entry_safe(d, d_tmp, &aim_list, list) {
1929 destroy_most_aim_obj(d);
1932 list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
1934 destroy_most_inst_obj(i);
1936 kset_unregister(most_inst_kset);
1937 kset_unregister(most_aim_kset);
1938 device_destroy(most_class, 0);
1939 driver_unregister(&mostcore);
1940 class_destroy(most_class);
1941 bus_unregister(&most_bus);
1942 ida_destroy(&mdev_id);
1945 module_init(most_init);
1946 module_exit(most_exit);
1947 MODULE_LICENSE("GPL");
1948 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
1949 MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");