2 * Loopback bridge driver for the Greybus loopback module.
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
7 * Released under the GPLv2 only.
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/kthread.h>
14 #include <linux/delay.h>
15 #include <linux/random.h>
16 #include <linux/sizes.h>
17 #include <linux/cdev.h>
19 #include <linux/kfifo.h>
20 #include <linux/debugfs.h>
22 #include <asm/div64.h>
26 #define NSEC_PER_DAY 86400000000000ULL
28 struct gb_loopback_stats {
35 struct gb_loopback_device {
42 struct list_head list;
58 struct gb_loopback_stats latency;
59 struct gb_loopback_stats latency_gb;
60 struct gb_loopback_stats throughput;
61 struct gb_loopback_stats requests_per_second;
64 static struct gb_loopback_device gb_dev;
67 struct gb_connection *connection;
70 struct kfifo kfifo_lat;
71 struct kfifo kfifo_ts;
73 struct task_struct *task;
74 struct list_head entry;
76 /* Per connection stats */
77 struct gb_loopback_stats latency;
78 struct gb_loopback_stats latency_gb;
79 struct gb_loopback_stats throughput;
80 struct gb_loopback_stats requests_per_second;
89 #define GB_LOOPBACK_FIFO_DEFAULT 8192
91 static unsigned kfifo_depth = GB_LOOPBACK_FIFO_DEFAULT;
92 module_param(kfifo_depth, uint, 0444);
94 /* Maximum size of any one send data buffer we support */
95 #define MAX_PACKET_SIZE (PAGE_SIZE * 2)
97 #define GB_LOOPBACK_MS_WAIT_MAX 1000
99 /* interface sysfs attributes */
100 #define gb_loopback_ro_attr(field, pfx, conn) \
101 static ssize_t field##_##pfx##_show(struct device *dev, \
102 struct device_attribute *attr, \
105 struct gb_connection *connection; \
106 struct gb_loopback *gb; \
108 connection = to_gb_connection(dev); \
109 gb = connection->private; \
110 return sprintf(buf, "%u\n", gb->field); \
112 return sprintf(buf, "%u\n", gb_dev.field); \
115 static DEVICE_ATTR_RO(field##_##pfx)
117 #define gb_loopback_ro_stats_attr(name, field, type, pfx, conn) \
118 static ssize_t name##_##field##_##pfx##_show(struct device *dev, \
119 struct device_attribute *attr, \
122 struct gb_connection *connection; \
123 struct gb_loopback *gb; \
125 connection = to_gb_connection(dev); \
126 gb = connection->private; \
127 return sprintf(buf, "%"#type"\n", gb->name.field); \
129 return sprintf(buf, "%"#type"\n", gb_dev.name.field); \
132 static DEVICE_ATTR_RO(name##_##field##_##pfx)
134 #define gb_loopback_ro_avg_attr(name, pfx, conn) \
135 static ssize_t name##_avg_##pfx##_show(struct device *dev, \
136 struct device_attribute *attr, \
139 struct gb_loopback_stats *stats; \
140 struct gb_connection *connection; \
141 struct gb_loopback *gb; \
145 connection = to_gb_connection(dev); \
146 gb = connection->private; \
149 stats = &gb_dev.name; \
151 count = stats->count ? stats->count : 1; \
152 avg = stats->sum + count / 2; /* round closest */ \
153 rem = do_div(avg, count); \
154 return sprintf(buf, "%llu.%06u\n", avg, 1000000 * rem / count); \
156 static DEVICE_ATTR_RO(name##_avg_##pfx)
158 #define gb_loopback_stats_attrs(field, pfx, conn) \
159 gb_loopback_ro_stats_attr(field, min, u, pfx, conn); \
160 gb_loopback_ro_stats_attr(field, max, u, pfx, conn); \
161 gb_loopback_ro_avg_attr(field, pfx, conn)
163 #define gb_loopback_attr(field, type) \
164 static ssize_t field##_show(struct device *dev, \
165 struct device_attribute *attr, \
168 struct gb_connection *connection = to_gb_connection(dev); \
169 struct gb_loopback *gb = connection->private; \
170 return sprintf(buf, "%"#type"\n", gb->field); \
172 static ssize_t field##_store(struct device *dev, \
173 struct device_attribute *attr, \
178 struct gb_connection *connection = to_gb_connection(dev); \
179 mutex_lock(&gb_dev.mutex); \
180 ret = sscanf(buf, "%"#type, &gb->field); \
184 gb_loopback_check_attr(connection); \
185 mutex_unlock(&gb_dev.mutex); \
188 static DEVICE_ATTR_RW(field)
190 #define gb_dev_loopback_ro_attr(field, conn) \
191 static ssize_t field##_show(struct device *dev, \
192 struct device_attribute *attr, \
195 return sprintf(buf, "%u\n", gb_dev.field); \
197 static DEVICE_ATTR_RO(field)
199 #define gb_dev_loopback_rw_attr(field, type) \
200 static ssize_t field##_show(struct device *dev, \
201 struct device_attribute *attr, \
204 return sprintf(buf, "%"#type"\n", gb_dev.field); \
206 static ssize_t field##_store(struct device *dev, \
207 struct device_attribute *attr, \
212 struct gb_connection *connection = to_gb_connection(dev); \
213 mutex_lock(&gb_dev.mutex); \
214 ret = sscanf(buf, "%"#type, &gb_dev.field); \
218 gb_loopback_check_attr(&gb_dev, connection); \
219 mutex_unlock(&gb_dev.mutex); \
222 static DEVICE_ATTR_RW(field)
224 static void gb_loopback_reset_stats(struct gb_loopback_device *gb_dev);
225 static void gb_loopback_check_attr(struct gb_loopback_device *gb_dev,
226 struct gb_connection *connection)
228 struct gb_loopback *gb;
230 if (gb_dev->ms_wait > GB_LOOPBACK_MS_WAIT_MAX)
231 gb_dev->ms_wait = GB_LOOPBACK_MS_WAIT_MAX;
232 if (gb_dev->size > gb_dev->size_max)
233 gb_dev->size = gb_dev->size_max;
234 gb_dev->iteration_count = 0;
237 list_for_each_entry(gb, &gb_dev->list, entry) {
238 mutex_lock(&gb->mutex);
239 gb->iteration_count = 0;
241 if (kfifo_depth < gb_dev->iteration_max) {
242 dev_warn(&connection->dev,
243 "cannot log bytes %u kfifo_depth %u\n",
244 gb_dev->iteration_max, kfifo_depth);
246 kfifo_reset_out(&gb->kfifo_lat);
247 kfifo_reset_out(&gb->kfifo_ts);
248 mutex_unlock(&gb->mutex);
251 switch (gb_dev->type) {
252 case GB_LOOPBACK_TYPE_PING:
253 case GB_LOOPBACK_TYPE_TRANSFER:
254 case GB_LOOPBACK_TYPE_SINK:
255 kfifo_reset_out(&gb_dev->kfifo);
256 gb_loopback_reset_stats(gb_dev);
257 wake_up(&gb_dev->wq);
265 /* Time to send and receive one message */
266 gb_loopback_stats_attrs(latency, dev, false);
267 gb_loopback_stats_attrs(latency, con, true);
268 /* Time to send and receive one message not including greybus */
269 gb_loopback_stats_attrs(latency_gb, dev, false);
270 gb_loopback_stats_attrs(latency_gb, con, true);
271 /* Number of requests sent per second on this cport */
272 gb_loopback_stats_attrs(requests_per_second, dev, false);
273 gb_loopback_stats_attrs(requests_per_second, con, true);
274 /* Quantity of data sent and received on this cport */
275 gb_loopback_stats_attrs(throughput, dev, false);
276 gb_loopback_stats_attrs(throughput, con, true);
277 /* Number of errors encountered during loop */
278 gb_loopback_ro_attr(error, dev, false);
279 gb_loopback_ro_attr(error, con, true);
282 * Type of loopback message to send based on protocol type definitions
283 * 0 => Don't send message
284 * 2 => Send ping message continuously (message without payload)
285 * 3 => Send transfer message continuously (message with payload,
286 * payload returned in response)
287 * 4 => Send a sink message (message with payload, no payload in response)
289 gb_dev_loopback_rw_attr(type, d);
290 /* Size of transfer message payload: 0-4096 bytes */
291 gb_dev_loopback_rw_attr(size, u);
292 /* Time to wait between two messages: 0-1000 ms */
293 gb_dev_loopback_rw_attr(ms_wait, d);
294 /* Maximum iterations for a given operation: 1-(2^32-1), 0 implies infinite */
295 gb_dev_loopback_rw_attr(iteration_max, u);
296 /* The current index of the for (i = 0; i < iteration_max; i++) loop */
297 gb_dev_loopback_ro_attr(iteration_count, false);
298 /* A bit-mask of destination connecitons to include in the test run */
299 gb_dev_loopback_rw_attr(mask, u);
301 static struct attribute *loopback_dev_attrs[] = {
302 &dev_attr_latency_min_dev.attr,
303 &dev_attr_latency_max_dev.attr,
304 &dev_attr_latency_avg_dev.attr,
305 &dev_attr_latency_gb_min_dev.attr,
306 &dev_attr_latency_gb_max_dev.attr,
307 &dev_attr_latency_gb_avg_dev.attr,
308 &dev_attr_requests_per_second_min_dev.attr,
309 &dev_attr_requests_per_second_max_dev.attr,
310 &dev_attr_requests_per_second_avg_dev.attr,
311 &dev_attr_throughput_min_dev.attr,
312 &dev_attr_throughput_max_dev.attr,
313 &dev_attr_throughput_avg_dev.attr,
316 &dev_attr_ms_wait.attr,
317 &dev_attr_iteration_count.attr,
318 &dev_attr_iteration_max.attr,
320 &dev_attr_error_dev.attr,
323 ATTRIBUTE_GROUPS(loopback_dev);
325 static struct attribute *loopback_con_attrs[] = {
326 &dev_attr_latency_min_con.attr,
327 &dev_attr_latency_max_con.attr,
328 &dev_attr_latency_avg_con.attr,
329 &dev_attr_latency_gb_min_con.attr,
330 &dev_attr_latency_gb_max_con.attr,
331 &dev_attr_latency_gb_avg_con.attr,
332 &dev_attr_requests_per_second_min_con.attr,
333 &dev_attr_requests_per_second_max_con.attr,
334 &dev_attr_requests_per_second_avg_con.attr,
335 &dev_attr_throughput_min_con.attr,
336 &dev_attr_throughput_max_con.attr,
337 &dev_attr_throughput_avg_con.attr,
338 &dev_attr_error_con.attr,
341 ATTRIBUTE_GROUPS(loopback_con);
343 static u32 gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs)
347 do_div(elapsed_nsecs, NSEC_PER_USEC);
352 static u64 __gb_loopback_calc_latency(u64 t1, u64 t2)
357 return NSEC_PER_DAY - t2 + t1;
360 static u64 gb_loopback_calc_latency(struct timeval *ts, struct timeval *te)
364 t1 = timeval_to_ns(ts);
365 t2 = timeval_to_ns(te);
367 return __gb_loopback_calc_latency(t1, t2);
370 static void gb_loopback_push_latency_ts(struct gb_loopback *gb,
371 struct timeval *ts, struct timeval *te)
373 kfifo_in(&gb->kfifo_ts, (unsigned char *)ts, sizeof(*ts));
374 kfifo_in(&gb->kfifo_ts, (unsigned char *)te, sizeof(*te));
377 static int gb_loopback_active(struct gb_loopback *gb)
379 return (gb_dev.mask == 0 || (gb_dev.mask & gb->lbid));
382 static int gb_loopback_sink(struct gb_loopback *gb, u32 len)
384 struct timeval ts, te;
385 struct gb_loopback_transfer_request *request;
388 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
392 request->len = cpu_to_le32(len);
394 do_gettimeofday(&ts);
395 retval = gb_operation_sync(gb->connection, GB_LOOPBACK_TYPE_SINK,
396 request, len + sizeof(*request), NULL, 0);
398 do_gettimeofday(&te);
400 /* Calculate the total time the message took */
401 gb_loopback_push_latency_ts(gb, &ts, &te);
402 gb->elapsed_nsecs = gb_loopback_calc_latency(&ts, &te);
404 /* Calculate non-greybus related component of the latency */
405 gb_connection_pop_timestamp(gb->connection, &ts);
406 gb_connection_pop_timestamp(gb->connection, &te);
407 gb->elapsed_nsecs_gb = gb_loopback_calc_latency(&ts, &te);
414 static int gb_loopback_transfer(struct gb_loopback *gb, u32 len)
416 struct timeval ts, te;
417 struct gb_loopback_transfer_request *request;
418 struct gb_loopback_transfer_response *response;
421 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
424 response = kmalloc(len + sizeof(*response), GFP_KERNEL);
430 memset(request->data, 0x5A, len);
432 request->len = cpu_to_le32(len);
434 do_gettimeofday(&ts);
435 retval = gb_operation_sync(gb->connection, GB_LOOPBACK_TYPE_TRANSFER,
436 request, len + sizeof(*request),
437 response, len + sizeof(*response));
438 do_gettimeofday(&te);
440 /* Calculate the total time the message took */
441 gb_loopback_push_latency_ts(gb, &ts, &te);
442 gb->elapsed_nsecs = gb_loopback_calc_latency(&ts, &te);
444 /* Calculate non-greybus related component of the latency */
445 gb_connection_pop_timestamp(gb->connection, &ts);
446 gb_connection_pop_timestamp(gb->connection, &te);
447 gb->elapsed_nsecs_gb = gb_loopback_calc_latency(&ts, &te);
452 if (memcmp(request->data, response->data, len)) {
453 dev_err(&gb->connection->dev, "Loopback Data doesn't match\n");
464 static int gb_loopback_ping(struct gb_loopback *gb)
466 struct timeval ts, te;
469 do_gettimeofday(&ts);
470 retval = gb_operation_sync(gb->connection, GB_LOOPBACK_TYPE_PING,
472 do_gettimeofday(&te);
474 /* Calculate the total time the message took */
475 gb_loopback_push_latency_ts(gb, &ts, &te);
476 gb->elapsed_nsecs = gb_loopback_calc_latency(&ts, &te);
478 /* Calculate non-greybus related component of the latency */
479 gb_connection_pop_timestamp(gb->connection, &ts);
480 gb_connection_pop_timestamp(gb->connection, &te);
481 gb->elapsed_nsecs_gb = gb_loopback_calc_latency(&ts, &te);
486 static int gb_loopback_request_recv(u8 type, struct gb_operation *operation)
488 struct gb_connection *connection = operation->connection;
489 struct gb_loopback_transfer_request *request;
490 struct gb_loopback_transfer_response *response;
493 /* By convention, the AP initiates the version operation */
495 case GB_REQUEST_TYPE_PROTOCOL_VERSION:
496 dev_err(&connection->dev,
497 "module-initiated version operation\n");
499 case GB_LOOPBACK_TYPE_PING:
500 case GB_LOOPBACK_TYPE_SINK:
502 case GB_LOOPBACK_TYPE_TRANSFER:
503 if (operation->request->payload_size < sizeof(*request)) {
504 dev_err(&connection->dev,
505 "transfer request too small (%zu < %zu)\n",
506 operation->request->payload_size,
508 return -EINVAL; /* -EMSGSIZE */
510 request = operation->request->payload;
511 len = le32_to_cpu(request->len);
512 if (len > gb_dev.size_max) {
513 dev_err(&connection->dev,
514 "transfer request too large (%zu > %zu)\n",
515 len, gb_dev.size_max);
520 if (!gb_operation_response_alloc(operation, len,
522 dev_err(&connection->dev,
523 "error allocating response\n");
526 response = operation->response->payload;
527 response->len = cpu_to_le32(len);
528 memcpy(response->data, request->data, len);
532 dev_err(&connection->dev,
533 "unsupported request: %hhu\n", type);
538 static void gb_loopback_reset_stats(struct gb_loopback_device *gb_dev)
540 struct gb_loopback_stats reset = {
543 struct gb_loopback *gb;
545 /* Reset per-connection stats */
546 list_for_each_entry(gb, &gb_dev->list, entry) {
547 mutex_lock(&gb->mutex);
548 memcpy(&gb->latency, &reset,
549 sizeof(struct gb_loopback_stats));
550 memcpy(&gb->latency_gb, &reset,
551 sizeof(struct gb_loopback_stats));
552 memcpy(&gb->throughput, &reset,
553 sizeof(struct gb_loopback_stats));
554 memcpy(&gb->requests_per_second, &reset,
555 sizeof(struct gb_loopback_stats));
556 mutex_unlock(&gb->mutex);
559 /* Reset aggregate stats */
560 memset(&gb_dev->start, 0, sizeof(struct timeval));
561 memset(&gb_dev->end, 0, sizeof(struct timeval));
562 memcpy(&gb_dev->latency, &reset, sizeof(struct gb_loopback_stats));
563 memcpy(&gb_dev->latency_gb, &reset, sizeof(struct gb_loopback_stats));
564 memcpy(&gb_dev->throughput, &reset, sizeof(struct gb_loopback_stats));
565 memcpy(&gb_dev->requests_per_second, &reset,
566 sizeof(struct gb_loopback_stats));
569 static void gb_loopback_update_stats(struct gb_loopback_stats *stats, u32 val)
571 if (stats->min > val)
573 if (stats->max < val)
579 static void gb_loopback_requests_update(struct gb_loopback *gb, u32 latency)
581 u32 req = USEC_PER_SEC;
583 do_div(req, latency);
584 gb_loopback_update_stats(&gb_dev.requests_per_second, req);
585 gb_loopback_update_stats(&gb->requests_per_second, req);
588 static void gb_loopback_throughput_update(struct gb_loopback *gb, u32 latency)
591 u32 aggregate_size = sizeof(struct gb_operation_msg_hdr) * 2;
593 switch (gb_dev.type) {
594 case GB_LOOPBACK_TYPE_PING:
596 case GB_LOOPBACK_TYPE_SINK:
597 aggregate_size += sizeof(struct gb_loopback_transfer_request) +
600 case GB_LOOPBACK_TYPE_TRANSFER:
601 aggregate_size += sizeof(struct gb_loopback_transfer_request) +
602 sizeof(struct gb_loopback_transfer_response) +
609 /* Calculate bytes per second */
610 throughput = USEC_PER_SEC;
611 do_div(throughput, latency);
612 throughput *= aggregate_size;
613 gb_loopback_update_stats(&gb_dev.throughput, throughput);
614 gb_loopback_update_stats(&gb->throughput, throughput);
617 static int gb_loopback_calculate_aggregate_stats(void)
619 struct gb_loopback *gb;
630 for (i = 0; i < gb_dev.iteration_max; i++) {
634 list_for_each_entry(gb, &gb_dev.list, entry) {
635 if (!gb_loopback_active(gb))
637 if (kfifo_out(&gb->kfifo_ts, &ts, sizeof(ts)) < sizeof(ts))
639 if (kfifo_out(&gb->kfifo_ts, &te, sizeof(te)) < sizeof(te))
641 t1 = timeval_to_ns(&ts);
642 t2 = timeval_to_ns(&te);
644 /* minimum timestamp is always what we want */
645 if (latched == 0 || t1 < ts_min)
648 /* maximum timestamp needs to handle rollover */
650 if (latched == 0 || t2 > te_max)
653 if (latched == 0 || rollover == 0)
655 if (rollover == 1 && t2 > te_max)
661 /* Calculate the aggregate timestamp */
662 elapsed_nsecs = __gb_loopback_calc_latency(ts_min, te_max);
663 lat = gb_loopback_nsec_to_usec_latency(elapsed_nsecs);
664 kfifo_in(&gb_dev.kfifo, (unsigned char *)&lat, sizeof(lat));
668 kfifo_reset_out(&gb_dev.kfifo);
672 static void gb_loopback_calculate_stats(struct gb_loopback *gb)
677 /* Express latency in terms of microseconds */
678 lat = gb_loopback_nsec_to_usec_latency(gb->elapsed_nsecs);
680 /* Log latency stastic */
681 gb_loopback_update_stats(&gb_dev.latency, lat);
682 gb_loopback_update_stats(&gb->latency, lat);
684 /* Raw latency log on a per thread basis */
685 kfifo_in(&gb->kfifo_lat, (unsigned char *)&lat, sizeof(lat));
687 /* Log throughput and requests using latency as benchmark */
688 gb_loopback_throughput_update(gb, lat);
689 gb_loopback_requests_update(gb, lat);
691 /* Calculate the greybus related latency number in nanoseconds */
692 tmp = gb->elapsed_nsecs - gb->elapsed_nsecs_gb;
694 gb_loopback_update_stats(&gb_dev.latency_gb, lat);
695 gb_loopback_update_stats(&gb->latency_gb, lat);
698 static int gb_loopback_fn(void *data)
705 struct gb_loopback *gb = data;
706 struct gb_loopback *gb_list;
710 wait_event_interruptible(gb_dev.wq, gb_dev.type ||
711 kthread_should_stop());
712 if (kthread_should_stop())
715 mutex_lock(&gb_dev.mutex);
716 if (!gb_loopback_active(gb))
717 goto unlock_continue;
718 if (gb_dev.iteration_max) {
719 /* Determine overall lowest count */
720 low_count = gb->iteration_count;
721 list_for_each_entry(gb_list, &gb_dev.list, entry) {
722 if (!gb_loopback_active(gb_list))
724 if (gb_list->iteration_count < low_count)
725 low_count = gb_list->iteration_count;
727 /* All threads achieved at least low_count iterations */
728 if (gb_dev.iteration_count < low_count) {
729 gb_dev.iteration_count = low_count;
730 sysfs_notify(&gb->connection->dev.kobj, NULL,
733 /* Optionally terminate */
734 if (gb_dev.iteration_count == gb_dev.iteration_max) {
735 gb_loopback_calculate_aggregate_stats();
737 goto unlock_continue;
741 ms_wait = gb_dev.ms_wait;
743 mutex_unlock(&gb_dev.mutex);
745 mutex_lock(&gb->mutex);
746 if (gb->iteration_count >= gb_dev.iteration_max) {
747 /* If this thread finished before siblings then sleep */
749 mutex_unlock(&gb->mutex);
752 /* Else operations to perform */
753 if (type == GB_LOOPBACK_TYPE_PING)
754 error = gb_loopback_ping(gb);
755 else if (type == GB_LOOPBACK_TYPE_TRANSFER)
756 error = gb_loopback_transfer(gb, size);
757 else if (type == GB_LOOPBACK_TYPE_SINK)
758 error = gb_loopback_sink(gb, size);
759 mutex_unlock(&gb->mutex);
761 mutex_lock(&gb_dev.mutex);
762 mutex_lock(&gb->mutex);
768 gb_loopback_calculate_stats(gb);
769 gb->iteration_count++;
771 mutex_unlock(&gb->mutex);
773 mutex_unlock(&gb_dev.mutex);
781 static int gb_loopback_dbgfs_latency_show_common(struct seq_file *s,
788 if (kfifo_len(kfifo) == 0) {
794 retval = kfifo_out(kfifo, &latency, sizeof(latency));
796 seq_printf(s, "%u", latency);
804 static int gb_loopback_dbgfs_latency_show(struct seq_file *s, void *unused)
806 struct gb_loopback *gb = s->private;
808 return gb_loopback_dbgfs_latency_show_common(s, &gb->kfifo_lat,
812 static int gb_loopback_latency_open(struct inode *inode, struct file *file)
814 return single_open(file, gb_loopback_dbgfs_latency_show,
818 static const struct file_operations gb_loopback_debugfs_latency_ops = {
819 .open = gb_loopback_latency_open,
822 .release = single_release,
825 static int gb_loopback_dbgfs_dev_latency_show(struct seq_file *s, void *unused)
827 struct gb_loopback_device *gb_dev = s->private;
829 return gb_loopback_dbgfs_latency_show_common(s, &gb_dev->kfifo,
833 static int gb_loopback_dev_latency_open(struct inode *inode, struct file *file)
835 return single_open(file, gb_loopback_dbgfs_dev_latency_show,
839 static const struct file_operations gb_loopback_debugfs_dev_latency_ops = {
840 .open = gb_loopback_dev_latency_open,
843 .release = single_release,
846 #define DEBUGFS_NAMELEN 32
848 static int gb_loopback_connection_init(struct gb_connection *connection)
850 struct gb_loopback *gb;
852 char name[DEBUGFS_NAMELEN];
853 struct kobject *kobj = &connection->bundle->intf->module->dev.kobj;
855 gb = kzalloc(sizeof(*gb), GFP_KERNEL);
858 gb_loopback_reset_stats(&gb_dev);
860 /* If this is the first connection - create a module endo0:x entries */
861 mutex_lock(&gb_dev.mutex);
863 snprintf(name, sizeof(name), "raw_latency_endo0:%d",
864 connection->bundle->intf->module->module_id);
865 gb_dev.file = debugfs_create_file(name, S_IFREG | S_IRUGO,
866 gb_dev.root, &gb_dev,
867 &gb_loopback_debugfs_dev_latency_ops);
868 retval = sysfs_create_groups(kobj, loopback_dev_groups);
872 /* Calculate maximum payload */
873 gb_dev.size_max = gb_operation_get_payload_size_max(connection);
874 if (gb_dev.size_max <=
875 sizeof(struct gb_loopback_transfer_request)) {
879 gb_dev.size_max -= sizeof(struct gb_loopback_transfer_request);
882 /* Create per-connection sysfs and debugfs data-points */
883 snprintf(name, sizeof(name), "raw_latency_endo0:%d:%d:%d:%d",
884 connection->bundle->intf->module->module_id,
885 connection->bundle->intf->interface_id,
886 connection->bundle->id,
887 connection->intf_cport_id);
888 gb->file = debugfs_create_file(name, S_IFREG | S_IRUGO, gb_dev.root, gb,
889 &gb_loopback_debugfs_latency_ops);
890 gb->connection = connection;
891 connection->private = gb;
892 retval = sysfs_create_groups(&connection->dev.kobj,
893 loopback_con_groups);
898 if (kfifo_alloc(&gb->kfifo_lat, kfifo_depth * sizeof(u32),
903 if (kfifo_alloc(&gb->kfifo_ts, kfifo_depth * sizeof(struct timeval) * 2,
909 /* Fork worker thread */
910 mutex_init(&gb->mutex);
911 gb->lbid = 1 << gb_dev.count;
912 gb->task = kthread_run(gb_loopback_fn, gb, "gb_loopback");
913 if (IS_ERR(gb->task)) {
914 retval = PTR_ERR(gb->task);
918 list_add_tail(&gb->entry, &gb_dev.list);
920 mutex_unlock(&gb_dev.mutex);
924 kfifo_free(&gb->kfifo_ts);
926 kfifo_free(&gb->kfifo_lat);
928 sysfs_remove_groups(&connection->dev.kobj, loopback_con_groups);
931 sysfs_remove_groups(kobj, loopback_dev_groups);
932 debugfs_remove(gb_dev.file);
934 debugfs_remove(gb->file);
935 connection->private = NULL;
937 mutex_unlock(&gb_dev.mutex);
943 static void gb_loopback_connection_exit(struct gb_connection *connection)
945 struct gb_loopback *gb = connection->private;
946 struct kobject *kobj = &connection->bundle->intf->module->dev.kobj;
948 if (!IS_ERR_OR_NULL(gb->task))
949 kthread_stop(gb->task);
951 mutex_lock(&gb_dev.mutex);
953 connection->private = NULL;
954 kfifo_free(&gb->kfifo_lat);
955 kfifo_free(&gb->kfifo_ts);
958 sysfs_remove_groups(kobj, loopback_dev_groups);
959 debugfs_remove(gb_dev.file);
961 sysfs_remove_groups(&connection->dev.kobj, loopback_con_groups);
962 debugfs_remove(gb->file);
963 list_del(&gb->entry);
964 mutex_unlock(&gb_dev.mutex);
968 static struct gb_protocol loopback_protocol = {
970 .id = GREYBUS_PROTOCOL_LOOPBACK,
971 .major = GB_LOOPBACK_VERSION_MAJOR,
972 .minor = GB_LOOPBACK_VERSION_MINOR,
973 .connection_init = gb_loopback_connection_init,
974 .connection_exit = gb_loopback_connection_exit,
975 .request_recv = gb_loopback_request_recv,
978 static int loopback_init(void)
982 init_waitqueue_head(&gb_dev.wq);
983 INIT_LIST_HEAD(&gb_dev.list);
984 mutex_init(&gb_dev.mutex);
985 gb_dev.root = debugfs_create_dir("gb_loopback", NULL);
987 if (kfifo_alloc(&gb_dev.kfifo, kfifo_depth * sizeof(u32), GFP_KERNEL)) {
992 retval = gb_protocol_register(&loopback_protocol);
997 debugfs_remove_recursive(gb_dev.root);
1000 module_init(loopback_init);
1002 static void __exit loopback_exit(void)
1004 debugfs_remove_recursive(gb_dev.root);
1005 kfifo_free(&gb_dev.kfifo);
1006 gb_protocol_deregister(&loopback_protocol);
1008 module_exit(loopback_exit);
1010 MODULE_LICENSE("GPL v2");