2 * Loopback bridge driver for the Greybus loopback module.
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
7 * Released under the GPLv2 only.
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/kthread.h>
14 #include <linux/delay.h>
15 #include <linux/random.h>
16 #include <linux/sizes.h>
17 #include <linux/cdev.h>
19 #include <linux/kfifo.h>
20 #include <linux/debugfs.h>
21 #include <linux/list_sort.h>
23 #include <asm/div64.h>
27 #define NSEC_PER_DAY 86400000000000ULL
29 struct gb_loopback_stats {
36 struct gb_loopback_device {
43 struct list_head list;
59 struct gb_loopback_stats latency;
60 struct gb_loopback_stats throughput;
61 struct gb_loopback_stats requests_per_second;
64 static struct gb_loopback_device gb_dev;
67 struct gb_connection *connection;
70 struct kfifo kfifo_lat;
71 struct kfifo kfifo_ts;
73 struct task_struct *task;
74 struct list_head entry;
76 /* Per connection stats */
77 struct gb_loopback_stats latency;
78 struct gb_loopback_stats throughput;
79 struct gb_loopback_stats requests_per_second;
87 #define GB_LOOPBACK_FIFO_DEFAULT 8192
89 static unsigned kfifo_depth = GB_LOOPBACK_FIFO_DEFAULT;
90 module_param(kfifo_depth, uint, 0444);
92 /* Maximum size of any one send data buffer we support */
93 #define MAX_PACKET_SIZE (PAGE_SIZE * 2)
95 #define GB_LOOPBACK_MS_WAIT_MAX 1000
97 /* interface sysfs attributes */
98 #define gb_loopback_ro_attr(field, pfx, conn) \
99 static ssize_t field##_##pfx##_show(struct device *dev, \
100 struct device_attribute *attr, \
103 struct gb_connection *connection; \
104 struct gb_loopback *gb; \
106 connection = to_gb_connection(dev); \
107 gb = connection->private; \
108 return sprintf(buf, "%u\n", gb->field); \
110 return sprintf(buf, "%u\n", gb_dev.field); \
113 static DEVICE_ATTR_RO(field##_##pfx)
115 #define gb_loopback_ro_stats_attr(name, field, type, pfx, conn) \
116 static ssize_t name##_##field##_##pfx##_show(struct device *dev, \
117 struct device_attribute *attr, \
120 struct gb_connection *connection; \
121 struct gb_loopback *gb; \
123 connection = to_gb_connection(dev); \
124 gb = connection->private; \
125 return sprintf(buf, "%"#type"\n", gb->name.field); \
127 return sprintf(buf, "%"#type"\n", gb_dev.name.field); \
130 static DEVICE_ATTR_RO(name##_##field##_##pfx)
132 #define gb_loopback_ro_avg_attr(name, pfx, conn) \
133 static ssize_t name##_avg_##pfx##_show(struct device *dev, \
134 struct device_attribute *attr, \
137 struct gb_loopback_stats *stats; \
138 struct gb_connection *connection; \
139 struct gb_loopback *gb; \
143 connection = to_gb_connection(dev); \
144 gb = connection->private; \
147 stats = &gb_dev.name; \
149 count = stats->count ? stats->count : 1; \
150 avg = stats->sum + count / 2; /* round closest */ \
151 rem = do_div(avg, count); \
152 return sprintf(buf, "%llu.%06u\n", avg, 1000000 * rem / count); \
154 static DEVICE_ATTR_RO(name##_avg_##pfx)
156 #define gb_loopback_stats_attrs(field, pfx, conn) \
157 gb_loopback_ro_stats_attr(field, min, u, pfx, conn); \
158 gb_loopback_ro_stats_attr(field, max, u, pfx, conn); \
159 gb_loopback_ro_avg_attr(field, pfx, conn)
161 #define gb_loopback_attr(field, type) \
162 static ssize_t field##_show(struct device *dev, \
163 struct device_attribute *attr, \
166 struct gb_connection *connection = to_gb_connection(dev); \
167 struct gb_loopback *gb = connection->private; \
168 return sprintf(buf, "%"#type"\n", gb->field); \
170 static ssize_t field##_store(struct device *dev, \
171 struct device_attribute *attr, \
176 struct gb_connection *connection = to_gb_connection(dev); \
177 mutex_lock(&gb_dev.mutex); \
178 ret = sscanf(buf, "%"#type, &gb->field); \
182 gb_loopback_check_attr(connection); \
183 mutex_unlock(&gb_dev.mutex); \
186 static DEVICE_ATTR_RW(field)
188 #define gb_dev_loopback_ro_attr(field, conn) \
189 static ssize_t field##_show(struct device *dev, \
190 struct device_attribute *attr, \
193 return sprintf(buf, "%u\n", gb_dev.field); \
195 static DEVICE_ATTR_RO(field)
197 #define gb_dev_loopback_rw_attr(field, type) \
198 static ssize_t field##_show(struct device *dev, \
199 struct device_attribute *attr, \
202 return sprintf(buf, "%"#type"\n", gb_dev.field); \
204 static ssize_t field##_store(struct device *dev, \
205 struct device_attribute *attr, \
210 struct gb_connection *connection = to_gb_connection(dev); \
211 mutex_lock(&gb_dev.mutex); \
212 ret = sscanf(buf, "%"#type, &gb_dev.field); \
216 gb_loopback_check_attr(&gb_dev, connection); \
217 mutex_unlock(&gb_dev.mutex); \
220 static DEVICE_ATTR_RW(field)
222 static void gb_loopback_reset_stats(struct gb_loopback_device *gb_dev);
223 static void gb_loopback_check_attr(struct gb_loopback_device *gb_dev,
224 struct gb_connection *connection)
226 struct gb_loopback *gb;
228 if (gb_dev->ms_wait > GB_LOOPBACK_MS_WAIT_MAX)
229 gb_dev->ms_wait = GB_LOOPBACK_MS_WAIT_MAX;
230 if (gb_dev->size > gb_dev->size_max)
231 gb_dev->size = gb_dev->size_max;
232 gb_dev->iteration_count = 0;
235 list_for_each_entry(gb, &gb_dev->list, entry) {
236 mutex_lock(&gb->mutex);
237 gb->iteration_count = 0;
239 if (kfifo_depth < gb_dev->iteration_max) {
240 dev_warn(&connection->dev,
241 "cannot log bytes %u kfifo_depth %u\n",
242 gb_dev->iteration_max, kfifo_depth);
244 kfifo_reset_out(&gb->kfifo_lat);
245 kfifo_reset_out(&gb->kfifo_ts);
246 mutex_unlock(&gb->mutex);
249 switch (gb_dev->type) {
250 case GB_LOOPBACK_TYPE_PING:
251 case GB_LOOPBACK_TYPE_TRANSFER:
252 case GB_LOOPBACK_TYPE_SINK:
253 kfifo_reset_out(&gb_dev->kfifo);
254 gb_loopback_reset_stats(gb_dev);
255 wake_up(&gb_dev->wq);
263 /* Time to send and receive one message */
264 gb_loopback_stats_attrs(latency, dev, false);
265 gb_loopback_stats_attrs(latency, con, true);
266 /* Number of requests sent per second on this cport */
267 gb_loopback_stats_attrs(requests_per_second, dev, false);
268 gb_loopback_stats_attrs(requests_per_second, con, true);
269 /* Quantity of data sent and received on this cport */
270 gb_loopback_stats_attrs(throughput, dev, false);
271 gb_loopback_stats_attrs(throughput, con, true);
272 /* Number of errors encountered during loop */
273 gb_loopback_ro_attr(error, dev, false);
274 gb_loopback_ro_attr(error, con, true);
277 * Type of loopback message to send based on protocol type definitions
278 * 0 => Don't send message
279 * 2 => Send ping message continuously (message without payload)
280 * 3 => Send transfer message continuously (message with payload,
281 * payload returned in response)
282 * 4 => Send a sink message (message with payload, no payload in response)
284 gb_dev_loopback_rw_attr(type, d);
285 /* Size of transfer message payload: 0-4096 bytes */
286 gb_dev_loopback_rw_attr(size, u);
287 /* Time to wait between two messages: 0-1000 ms */
288 gb_dev_loopback_rw_attr(ms_wait, d);
289 /* Maximum iterations for a given operation: 1-(2^32-1), 0 implies infinite */
290 gb_dev_loopback_rw_attr(iteration_max, u);
291 /* The current index of the for (i = 0; i < iteration_max; i++) loop */
292 gb_dev_loopback_ro_attr(iteration_count, false);
293 /* A bit-mask of destination connecitons to include in the test run */
294 gb_dev_loopback_rw_attr(mask, u);
296 static struct attribute *loopback_dev_attrs[] = {
297 &dev_attr_latency_min_dev.attr,
298 &dev_attr_latency_max_dev.attr,
299 &dev_attr_latency_avg_dev.attr,
300 &dev_attr_requests_per_second_min_dev.attr,
301 &dev_attr_requests_per_second_max_dev.attr,
302 &dev_attr_requests_per_second_avg_dev.attr,
303 &dev_attr_throughput_min_dev.attr,
304 &dev_attr_throughput_max_dev.attr,
305 &dev_attr_throughput_avg_dev.attr,
308 &dev_attr_ms_wait.attr,
309 &dev_attr_iteration_count.attr,
310 &dev_attr_iteration_max.attr,
312 &dev_attr_error_dev.attr,
315 ATTRIBUTE_GROUPS(loopback_dev);
317 static struct attribute *loopback_con_attrs[] = {
318 &dev_attr_latency_min_con.attr,
319 &dev_attr_latency_max_con.attr,
320 &dev_attr_latency_avg_con.attr,
321 &dev_attr_requests_per_second_min_con.attr,
322 &dev_attr_requests_per_second_max_con.attr,
323 &dev_attr_requests_per_second_avg_con.attr,
324 &dev_attr_throughput_min_con.attr,
325 &dev_attr_throughput_max_con.attr,
326 &dev_attr_throughput_avg_con.attr,
327 &dev_attr_error_con.attr,
330 ATTRIBUTE_GROUPS(loopback_con);
332 static u32 gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs)
336 do_div(elapsed_nsecs, NSEC_PER_USEC);
341 static u64 __gb_loopback_calc_latency(u64 t1, u64 t2)
346 return NSEC_PER_DAY - t2 + t1;
349 static u64 gb_loopback_calc_latency(struct timeval *ts, struct timeval *te)
353 t1 = timeval_to_ns(ts);
354 t2 = timeval_to_ns(te);
356 return __gb_loopback_calc_latency(t1, t2);
359 static void gb_loopback_push_latency_ts(struct gb_loopback *gb,
360 struct timeval *ts, struct timeval *te)
362 kfifo_in(&gb->kfifo_ts, (unsigned char *)ts, sizeof(*ts));
363 kfifo_in(&gb->kfifo_ts, (unsigned char *)te, sizeof(*te));
366 static int gb_loopback_active(struct gb_loopback *gb)
368 return (gb_dev.mask == 0 || (gb_dev.mask & gb->lbid));
371 static int gb_loopback_operation_sync(struct gb_loopback *gb, int type,
372 void *request, int request_size,
373 void *response, int response_size)
375 struct gb_operation *operation;
376 struct timeval ts, te;
379 do_gettimeofday(&ts);
380 operation = gb_operation_create(gb->connection, type, request_size,
381 response_size, GFP_KERNEL);
388 memcpy(operation->request->payload, request, request_size);
390 ret = gb_operation_request_send_sync(operation);
392 dev_err(&gb->connection->dev,
393 "synchronous operation failed: %d\n", ret);
395 if (response_size == operation->response->payload_size) {
396 memcpy(response, operation->response->payload,
399 dev_err(&gb->connection->dev,
400 "response size %zu expected %d\n",
401 operation->response->payload_size,
405 gb_operation_destroy(operation);
408 do_gettimeofday(&te);
410 /* Calculate the total time the message took */
411 gb_loopback_push_latency_ts(gb, &ts, &te);
412 gb->elapsed_nsecs = gb_loopback_calc_latency(&ts, &te);
417 static int gb_loopback_sink(struct gb_loopback *gb, u32 len)
419 struct gb_loopback_transfer_request *request;
422 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
426 request->len = cpu_to_le32(len);
427 retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_SINK,
428 request, len + sizeof(*request),
434 static int gb_loopback_transfer(struct gb_loopback *gb, u32 len)
436 struct gb_loopback_transfer_request *request;
437 struct gb_loopback_transfer_response *response;
440 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
443 response = kmalloc(len + sizeof(*response), GFP_KERNEL);
449 memset(request->data, 0x5A, len);
451 request->len = cpu_to_le32(len);
452 retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_TRANSFER,
453 request, len + sizeof(*request),
454 response, len + sizeof(*response));
458 if (memcmp(request->data, response->data, len)) {
459 dev_err(&gb->connection->dev, "Loopback Data doesn't match\n");
470 static int gb_loopback_ping(struct gb_loopback *gb)
472 return gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_PING,
476 static int gb_loopback_request_recv(u8 type, struct gb_operation *operation)
478 struct gb_connection *connection = operation->connection;
479 struct gb_loopback_transfer_request *request;
480 struct gb_loopback_transfer_response *response;
483 /* By convention, the AP initiates the version operation */
485 case GB_REQUEST_TYPE_PROTOCOL_VERSION:
486 dev_err(&connection->dev,
487 "module-initiated version operation\n");
489 case GB_LOOPBACK_TYPE_PING:
490 case GB_LOOPBACK_TYPE_SINK:
492 case GB_LOOPBACK_TYPE_TRANSFER:
493 if (operation->request->payload_size < sizeof(*request)) {
494 dev_err(&connection->dev,
495 "transfer request too small (%zu < %zu)\n",
496 operation->request->payload_size,
498 return -EINVAL; /* -EMSGSIZE */
500 request = operation->request->payload;
501 len = le32_to_cpu(request->len);
502 if (len > gb_dev.size_max) {
503 dev_err(&connection->dev,
504 "transfer request too large (%zu > %zu)\n",
505 len, gb_dev.size_max);
510 if (!gb_operation_response_alloc(operation, len,
512 dev_err(&connection->dev,
513 "error allocating response\n");
516 response = operation->response->payload;
517 response->len = cpu_to_le32(len);
518 memcpy(response->data, request->data, len);
522 dev_err(&connection->dev,
523 "unsupported request: %hhu\n", type);
528 static void gb_loopback_reset_stats(struct gb_loopback_device *gb_dev)
530 struct gb_loopback_stats reset = {
533 struct gb_loopback *gb;
535 /* Reset per-connection stats */
536 list_for_each_entry(gb, &gb_dev->list, entry) {
537 mutex_lock(&gb->mutex);
538 memcpy(&gb->latency, &reset,
539 sizeof(struct gb_loopback_stats));
540 memcpy(&gb->throughput, &reset,
541 sizeof(struct gb_loopback_stats));
542 memcpy(&gb->requests_per_second, &reset,
543 sizeof(struct gb_loopback_stats));
544 mutex_unlock(&gb->mutex);
547 /* Reset aggregate stats */
548 memset(&gb_dev->start, 0, sizeof(struct timeval));
549 memset(&gb_dev->end, 0, sizeof(struct timeval));
550 memcpy(&gb_dev->latency, &reset, sizeof(struct gb_loopback_stats));
551 memcpy(&gb_dev->throughput, &reset, sizeof(struct gb_loopback_stats));
552 memcpy(&gb_dev->requests_per_second, &reset,
553 sizeof(struct gb_loopback_stats));
556 static void gb_loopback_update_stats(struct gb_loopback_stats *stats, u32 val)
558 if (stats->min > val)
560 if (stats->max < val)
566 static void gb_loopback_requests_update(struct gb_loopback *gb, u32 latency)
568 u32 req = USEC_PER_SEC;
570 do_div(req, latency);
571 gb_loopback_update_stats(&gb_dev.requests_per_second, req);
572 gb_loopback_update_stats(&gb->requests_per_second, req);
575 static void gb_loopback_throughput_update(struct gb_loopback *gb, u32 latency)
578 u32 aggregate_size = sizeof(struct gb_operation_msg_hdr) * 2;
580 switch (gb_dev.type) {
581 case GB_LOOPBACK_TYPE_PING:
583 case GB_LOOPBACK_TYPE_SINK:
584 aggregate_size += sizeof(struct gb_loopback_transfer_request) +
587 case GB_LOOPBACK_TYPE_TRANSFER:
588 aggregate_size += sizeof(struct gb_loopback_transfer_request) +
589 sizeof(struct gb_loopback_transfer_response) +
596 /* Calculate bytes per second */
597 throughput = USEC_PER_SEC;
598 do_div(throughput, latency);
599 throughput *= aggregate_size;
600 gb_loopback_update_stats(&gb_dev.throughput, throughput);
601 gb_loopback_update_stats(&gb->throughput, throughput);
604 static int gb_loopback_calculate_aggregate_stats(void)
606 struct gb_loopback *gb;
617 for (i = 0; i < gb_dev.iteration_max; i++) {
621 list_for_each_entry(gb, &gb_dev.list, entry) {
622 if (!gb_loopback_active(gb))
624 if (kfifo_out(&gb->kfifo_ts, &ts, sizeof(ts)) < sizeof(ts))
626 if (kfifo_out(&gb->kfifo_ts, &te, sizeof(te)) < sizeof(te))
628 t1 = timeval_to_ns(&ts);
629 t2 = timeval_to_ns(&te);
631 /* minimum timestamp is always what we want */
632 if (latched == 0 || t1 < ts_min)
635 /* maximum timestamp needs to handle rollover */
637 if (latched == 0 || t2 > te_max)
640 if (latched == 0 || rollover == 0)
642 if (rollover == 1 && t2 > te_max)
648 /* Calculate the aggregate timestamp */
649 elapsed_nsecs = __gb_loopback_calc_latency(ts_min, te_max);
650 lat = gb_loopback_nsec_to_usec_latency(elapsed_nsecs);
651 kfifo_in(&gb_dev.kfifo, (unsigned char *)&lat, sizeof(lat));
655 kfifo_reset_out(&gb_dev.kfifo);
659 static void gb_loopback_calculate_stats(struct gb_loopback *gb)
663 /* Express latency in terms of microseconds */
664 lat = gb_loopback_nsec_to_usec_latency(gb->elapsed_nsecs);
666 /* Log latency stastic */
667 gb_loopback_update_stats(&gb_dev.latency, lat);
668 gb_loopback_update_stats(&gb->latency, lat);
670 /* Raw latency log on a per thread basis */
671 kfifo_in(&gb->kfifo_lat, (unsigned char *)&lat, sizeof(lat));
673 /* Log throughput and requests using latency as benchmark */
674 gb_loopback_throughput_update(gb, lat);
675 gb_loopback_requests_update(gb, lat);
678 static int gb_loopback_fn(void *data)
685 struct gb_loopback *gb = data;
686 struct gb_loopback *gb_list;
690 wait_event_interruptible(gb_dev.wq, gb_dev.type ||
691 kthread_should_stop());
692 if (kthread_should_stop())
695 mutex_lock(&gb_dev.mutex);
696 if (!gb_loopback_active(gb)) {
698 goto unlock_continue;
700 if (gb_dev.iteration_max) {
701 /* Determine overall lowest count */
702 low_count = gb->iteration_count;
703 list_for_each_entry(gb_list, &gb_dev.list, entry) {
704 if (!gb_loopback_active(gb_list))
706 if (gb_list->iteration_count < low_count)
707 low_count = gb_list->iteration_count;
709 /* All threads achieved at least low_count iterations */
710 if (gb_dev.iteration_count < low_count) {
711 gb_dev.iteration_count = low_count;
712 sysfs_notify(&gb->connection->dev.kobj, NULL,
715 /* Optionally terminate */
716 if (gb_dev.iteration_count == gb_dev.iteration_max) {
717 gb_loopback_calculate_aggregate_stats();
719 goto unlock_continue;
723 ms_wait = gb_dev.ms_wait;
725 mutex_unlock(&gb_dev.mutex);
727 mutex_lock(&gb->mutex);
728 if (gb->iteration_count >= gb_dev.iteration_max) {
729 /* If this thread finished before siblings then sleep */
731 mutex_unlock(&gb->mutex);
734 /* Else operations to perform */
735 if (type == GB_LOOPBACK_TYPE_PING)
736 error = gb_loopback_ping(gb);
737 else if (type == GB_LOOPBACK_TYPE_TRANSFER)
738 error = gb_loopback_transfer(gb, size);
739 else if (type == GB_LOOPBACK_TYPE_SINK)
740 error = gb_loopback_sink(gb, size);
741 mutex_unlock(&gb->mutex);
743 mutex_lock(&gb_dev.mutex);
744 mutex_lock(&gb->mutex);
750 gb_loopback_calculate_stats(gb);
751 gb->iteration_count++;
753 mutex_unlock(&gb->mutex);
755 mutex_unlock(&gb_dev.mutex);
763 static int gb_loopback_dbgfs_latency_show_common(struct seq_file *s,
770 if (kfifo_len(kfifo) == 0) {
776 retval = kfifo_out(kfifo, &latency, sizeof(latency));
778 seq_printf(s, "%u", latency);
786 static int gb_loopback_dbgfs_latency_show(struct seq_file *s, void *unused)
788 struct gb_loopback *gb = s->private;
790 return gb_loopback_dbgfs_latency_show_common(s, &gb->kfifo_lat,
794 static int gb_loopback_latency_open(struct inode *inode, struct file *file)
796 return single_open(file, gb_loopback_dbgfs_latency_show,
800 static const struct file_operations gb_loopback_debugfs_latency_ops = {
801 .open = gb_loopback_latency_open,
804 .release = single_release,
807 static int gb_loopback_dbgfs_dev_latency_show(struct seq_file *s, void *unused)
809 struct gb_loopback_device *gb_dev = s->private;
811 return gb_loopback_dbgfs_latency_show_common(s, &gb_dev->kfifo,
815 static int gb_loopback_dev_latency_open(struct inode *inode, struct file *file)
817 return single_open(file, gb_loopback_dbgfs_dev_latency_show,
821 static const struct file_operations gb_loopback_debugfs_dev_latency_ops = {
822 .open = gb_loopback_dev_latency_open,
825 .release = single_release,
828 static int gb_loopback_bus_id_compare(void *priv, struct list_head *lha,
829 struct list_head *lhb)
831 struct gb_loopback *a = list_entry(lha, struct gb_loopback, entry);
832 struct gb_loopback *b = list_entry(lhb, struct gb_loopback, entry);
833 struct gb_connection *ca = a->connection;
834 struct gb_connection *cb = b->connection;
836 if (ca->bundle->intf->module->module_id <
837 cb->bundle->intf->module->module_id)
839 if (cb->bundle->intf->module->module_id <
840 ca->bundle->intf->module->module_id)
842 if (ca->bundle->intf->interface_id < cb->bundle->intf->interface_id)
844 if (cb->bundle->intf->interface_id < ca->bundle->intf->interface_id)
846 if (ca->bundle->id < cb->bundle->id)
848 if (cb->bundle->id < ca->bundle->id)
850 if (ca->intf_cport_id < cb->intf_cport_id)
852 else if (cb->intf_cport_id < ca->intf_cport_id)
858 static void gb_loopback_insert_id(struct gb_loopback *gb)
860 struct gb_loopback *gb_list;
863 /* perform an insertion sort */
864 list_add_tail(&gb->entry, &gb_dev.list);
865 list_sort(NULL, &gb_dev.list, gb_loopback_bus_id_compare);
866 list_for_each_entry(gb_list, &gb_dev.list, entry) {
867 gb_list->lbid = 1 << new_lbid;
872 #define DEBUGFS_NAMELEN 32
874 static int gb_loopback_connection_init(struct gb_connection *connection)
876 struct gb_loopback *gb;
878 char name[DEBUGFS_NAMELEN];
879 struct kobject *kobj = &connection->hd->endo->dev.kobj;
881 gb = kzalloc(sizeof(*gb), GFP_KERNEL);
884 gb_loopback_reset_stats(&gb_dev);
886 /* If this is the first connection - create a module endo0:x entries */
887 mutex_lock(&gb_dev.mutex);
889 snprintf(name, sizeof(name), "raw_latency_endo0:%d",
890 connection->bundle->intf->module->module_id);
891 gb_dev.file = debugfs_create_file(name, S_IFREG | S_IRUGO,
892 gb_dev.root, &gb_dev,
893 &gb_loopback_debugfs_dev_latency_ops);
894 retval = sysfs_create_groups(kobj, loopback_dev_groups);
898 /* Calculate maximum payload */
899 gb_dev.size_max = gb_operation_get_payload_size_max(connection);
900 if (gb_dev.size_max <=
901 sizeof(struct gb_loopback_transfer_request)) {
905 gb_dev.size_max -= sizeof(struct gb_loopback_transfer_request);
908 /* Create per-connection sysfs and debugfs data-points */
909 snprintf(name, sizeof(name), "raw_latency_%s",
910 dev_name(&connection->dev));
911 gb->file = debugfs_create_file(name, S_IFREG | S_IRUGO, gb_dev.root, gb,
912 &gb_loopback_debugfs_latency_ops);
913 gb->connection = connection;
914 connection->private = gb;
915 retval = sysfs_create_groups(&connection->dev.kobj,
916 loopback_con_groups);
921 if (kfifo_alloc(&gb->kfifo_lat, kfifo_depth * sizeof(u32),
926 if (kfifo_alloc(&gb->kfifo_ts, kfifo_depth * sizeof(struct timeval) * 2,
932 /* Fork worker thread */
933 mutex_init(&gb->mutex);
934 gb->task = kthread_run(gb_loopback_fn, gb, "gb_loopback");
935 if (IS_ERR(gb->task)) {
936 retval = PTR_ERR(gb->task);
940 gb_loopback_insert_id(gb);
942 mutex_unlock(&gb_dev.mutex);
946 kfifo_free(&gb->kfifo_ts);
948 kfifo_free(&gb->kfifo_lat);
950 sysfs_remove_groups(&connection->dev.kobj, loopback_con_groups);
953 sysfs_remove_groups(kobj, loopback_dev_groups);
954 debugfs_remove(gb_dev.file);
956 debugfs_remove(gb->file);
957 connection->private = NULL;
959 mutex_unlock(&gb_dev.mutex);
965 static void gb_loopback_connection_exit(struct gb_connection *connection)
967 struct gb_loopback *gb = connection->private;
968 struct kobject *kobj = &connection->hd->endo->dev.kobj;
970 if (!IS_ERR_OR_NULL(gb->task))
971 kthread_stop(gb->task);
973 mutex_lock(&gb_dev.mutex);
975 connection->private = NULL;
976 kfifo_free(&gb->kfifo_lat);
977 kfifo_free(&gb->kfifo_ts);
980 sysfs_remove_groups(kobj, loopback_dev_groups);
981 debugfs_remove(gb_dev.file);
983 sysfs_remove_groups(&connection->dev.kobj, loopback_con_groups);
984 debugfs_remove(gb->file);
985 list_del(&gb->entry);
986 mutex_unlock(&gb_dev.mutex);
990 static struct gb_protocol loopback_protocol = {
992 .id = GREYBUS_PROTOCOL_LOOPBACK,
993 .major = GB_LOOPBACK_VERSION_MAJOR,
994 .minor = GB_LOOPBACK_VERSION_MINOR,
995 .connection_init = gb_loopback_connection_init,
996 .connection_exit = gb_loopback_connection_exit,
997 .request_recv = gb_loopback_request_recv,
1000 static int loopback_init(void)
1004 init_waitqueue_head(&gb_dev.wq);
1005 INIT_LIST_HEAD(&gb_dev.list);
1006 mutex_init(&gb_dev.mutex);
1007 gb_dev.root = debugfs_create_dir("gb_loopback", NULL);
1009 if (kfifo_alloc(&gb_dev.kfifo, kfifo_depth * sizeof(u32), GFP_KERNEL)) {
1014 retval = gb_protocol_register(&loopback_protocol);
1019 debugfs_remove_recursive(gb_dev.root);
1022 module_init(loopback_init);
1024 static void __exit loopback_exit(void)
1026 debugfs_remove_recursive(gb_dev.root);
1027 kfifo_free(&gb_dev.kfifo);
1028 gb_protocol_deregister(&loopback_protocol);
1030 module_exit(loopback_exit);
1032 MODULE_LICENSE("GPL v2");