]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/staging/greybus/loopback.c
greybus: loopback: drop redundant endo0 string from debugfs entry name
[karo-tx-linux.git] / drivers / staging / greybus / loopback.c
1 /*
2  * Loopback bridge driver for the Greybus loopback module.
3  *
4  * Copyright 2014 Google Inc.
5  * Copyright 2014 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/kthread.h>
14 #include <linux/delay.h>
15 #include <linux/random.h>
16 #include <linux/sizes.h>
17 #include <linux/cdev.h>
18 #include <linux/fs.h>
19 #include <linux/kfifo.h>
20 #include <linux/debugfs.h>
21 #include <linux/list_sort.h>
22
23 #include <asm/div64.h>
24
25 #include "greybus.h"
26
27 #define NSEC_PER_DAY 86400000000000ULL
28
29 struct gb_loopback_stats {
30         u32 min;
31         u32 max;
32         u64 sum;
33         u32 count;
34 };
35
36 struct gb_loopback_device {
37         struct dentry *root;
38         struct dentry *file;
39         u32 count;
40
41         struct kfifo kfifo;
42         struct mutex mutex;
43         struct list_head list;
44         wait_queue_head_t wq;
45
46         int type;
47         u32 mask;
48         u32 size;
49         u32 iteration_max;
50         u32 iteration_count;
51         size_t size_max;
52         int ms_wait;
53         u32 error;
54
55         struct timeval start;
56         struct timeval end;
57
58         /* Overall stats */
59         struct gb_loopback_stats latency;
60         struct gb_loopback_stats throughput;
61         struct gb_loopback_stats requests_per_second;
62 };
63
64 static struct gb_loopback_device gb_dev;
65
66 struct gb_loopback {
67         struct gb_connection *connection;
68
69         struct dentry *file;
70         struct kfifo kfifo_lat;
71         struct kfifo kfifo_ts;
72         struct mutex mutex;
73         struct task_struct *task;
74         struct list_head entry;
75
76         /* Per connection stats */
77         struct gb_loopback_stats latency;
78         struct gb_loopback_stats throughput;
79         struct gb_loopback_stats requests_per_second;
80
81         u32 lbid;
82         u32 iteration_count;
83         u64 elapsed_nsecs;
84         u32 error;
85 };
86
87 #define GB_LOOPBACK_FIFO_DEFAULT                        8192
88
89 static unsigned kfifo_depth = GB_LOOPBACK_FIFO_DEFAULT;
90 module_param(kfifo_depth, uint, 0444);
91
92 /* Maximum size of any one send data buffer we support */
93 #define MAX_PACKET_SIZE (PAGE_SIZE * 2)
94
95 #define GB_LOOPBACK_MS_WAIT_MAX                         1000
96
97 /* interface sysfs attributes */
98 #define gb_loopback_ro_attr(field, pfx, conn)                           \
99 static ssize_t field##_##pfx##_show(struct device *dev,                 \
100                             struct device_attribute *attr,              \
101                             char *buf)                                  \
102 {                                                                       \
103         struct gb_connection *connection;                               \
104         struct gb_loopback *gb;                                         \
105         if (conn) {                                                     \
106                 connection = to_gb_connection(dev);                     \
107                 gb = connection->private;                               \
108                 return sprintf(buf, "%u\n", gb->field);                 \
109         } else {                                                        \
110                 return sprintf(buf, "%u\n", gb_dev.field);              \
111         }                                                               \
112 }                                                                       \
113 static DEVICE_ATTR_RO(field##_##pfx)
114
115 #define gb_loopback_ro_stats_attr(name, field, type, pfx, conn)         \
116 static ssize_t name##_##field##_##pfx##_show(struct device *dev,        \
117                             struct device_attribute *attr,              \
118                             char *buf)                                  \
119 {                                                                       \
120         struct gb_connection *connection;                               \
121         struct gb_loopback *gb;                                         \
122         if (conn) {                                                     \
123                 connection = to_gb_connection(dev);                     \
124                 gb = connection->private;                               \
125                 return sprintf(buf, "%"#type"\n", gb->name.field);      \
126         } else {                                                        \
127                 return sprintf(buf, "%"#type"\n", gb_dev.name.field);   \
128         }                                                               \
129 }                                                                       \
130 static DEVICE_ATTR_RO(name##_##field##_##pfx)
131
132 #define gb_loopback_ro_avg_attr(name, pfx, conn)                        \
133 static ssize_t name##_avg_##pfx##_show(struct device *dev,              \
134                             struct device_attribute *attr,              \
135                             char *buf)                                  \
136 {                                                                       \
137         struct gb_loopback_stats *stats;                                \
138         struct gb_connection *connection;                               \
139         struct gb_loopback *gb;                                         \
140         u64 avg;                                                        \
141         u32 count, rem;                                                 \
142         if (conn) {                                                     \
143                 connection = to_gb_connection(dev);                     \
144                 gb = connection->private;                               \
145                 stats = &gb->name;                                      \
146         } else {                                                        \
147                 stats = &gb_dev.name;                                   \
148         }                                                               \
149         count = stats->count ? stats->count : 1;                        \
150         avg = stats->sum + count / 2;   /* round closest */             \
151         rem = do_div(avg, count);                                       \
152         return sprintf(buf, "%llu.%06u\n", avg, 1000000 * rem / count); \
153 }                                                                       \
154 static DEVICE_ATTR_RO(name##_avg_##pfx)
155
156 #define gb_loopback_stats_attrs(field, pfx, conn)                       \
157         gb_loopback_ro_stats_attr(field, min, u, pfx, conn);            \
158         gb_loopback_ro_stats_attr(field, max, u, pfx, conn);            \
159         gb_loopback_ro_avg_attr(field, pfx, conn)
160
161 #define gb_loopback_attr(field, type)                                   \
162 static ssize_t field##_show(struct device *dev,                         \
163                             struct device_attribute *attr,              \
164                             char *buf)                                  \
165 {                                                                       \
166         struct gb_connection *connection = to_gb_connection(dev);       \
167         struct gb_loopback *gb = connection->private;                   \
168         return sprintf(buf, "%"#type"\n", gb->field);                   \
169 }                                                                       \
170 static ssize_t field##_store(struct device *dev,                        \
171                             struct device_attribute *attr,              \
172                             const char *buf,                            \
173                             size_t len)                                 \
174 {                                                                       \
175         int ret;                                                        \
176         struct gb_connection *connection = to_gb_connection(dev);       \
177         mutex_lock(&gb_dev.mutex);                                      \
178         ret = sscanf(buf, "%"#type, &gb->field);                        \
179         if (ret != 1)                                                   \
180                 len = -EINVAL;                                          \
181         else                                                            \
182                 gb_loopback_check_attr(connection);                     \
183         mutex_unlock(&gb_dev.mutex);                                    \
184         return len;                                                     \
185 }                                                                       \
186 static DEVICE_ATTR_RW(field)
187
188 #define gb_dev_loopback_ro_attr(field, conn)                            \
189 static ssize_t field##_show(struct device *dev,         \
190                             struct device_attribute *attr,              \
191                             char *buf)                                  \
192 {                                                                       \
193         return sprintf(buf, "%u\n", gb_dev.field);                      \
194 }                                                                       \
195 static DEVICE_ATTR_RO(field)
196
197 #define gb_dev_loopback_rw_attr(field, type)                            \
198 static ssize_t field##_show(struct device *dev,                         \
199                             struct device_attribute *attr,              \
200                             char *buf)                                  \
201 {                                                                       \
202         return sprintf(buf, "%"#type"\n", gb_dev.field);                \
203 }                                                                       \
204 static ssize_t field##_store(struct device *dev,                        \
205                             struct device_attribute *attr,              \
206                             const char *buf,                            \
207                             size_t len)                                 \
208 {                                                                       \
209         int ret;                                                        \
210         struct gb_connection *connection = to_gb_connection(dev);       \
211         mutex_lock(&gb_dev.mutex);                                      \
212         ret = sscanf(buf, "%"#type, &gb_dev.field);                     \
213         if (ret != 1)                                                   \
214                 len = -EINVAL;                                          \
215         else                                                            \
216                 gb_loopback_check_attr(&gb_dev, connection);            \
217         mutex_unlock(&gb_dev.mutex);                                    \
218         return len;                                                     \
219 }                                                                       \
220 static DEVICE_ATTR_RW(field)
221
222 static void gb_loopback_reset_stats(struct gb_loopback_device *gb_dev);
223 static void gb_loopback_check_attr(struct gb_loopback_device *gb_dev,
224                                    struct gb_connection *connection)
225 {
226         struct gb_loopback *gb;
227
228         if (gb_dev->ms_wait > GB_LOOPBACK_MS_WAIT_MAX)
229                 gb_dev->ms_wait = GB_LOOPBACK_MS_WAIT_MAX;
230         if (gb_dev->size > gb_dev->size_max)
231                 gb_dev->size = gb_dev->size_max;
232         gb_dev->iteration_count = 0;
233         gb_dev->error = 0;
234
235         list_for_each_entry(gb, &gb_dev->list, entry) {
236                 mutex_lock(&gb->mutex);
237                 gb->iteration_count = 0;
238                 gb->error = 0;
239                 if (kfifo_depth < gb_dev->iteration_max) {
240                         dev_warn(&connection->dev,
241                                  "cannot log bytes %u kfifo_depth %u\n",
242                                  gb_dev->iteration_max, kfifo_depth);
243                 }
244                 kfifo_reset_out(&gb->kfifo_lat);
245                 kfifo_reset_out(&gb->kfifo_ts);
246                 mutex_unlock(&gb->mutex);
247         }
248
249         switch (gb_dev->type) {
250         case GB_LOOPBACK_TYPE_PING:
251         case GB_LOOPBACK_TYPE_TRANSFER:
252         case GB_LOOPBACK_TYPE_SINK:
253                 kfifo_reset_out(&gb_dev->kfifo);
254                 gb_loopback_reset_stats(gb_dev);
255                 wake_up(&gb_dev->wq);
256                 break;
257         default:
258                 gb_dev->type = 0;
259                 break;
260         }
261 }
262
263 /* Time to send and receive one message */
264 gb_loopback_stats_attrs(latency, dev, false);
265 gb_loopback_stats_attrs(latency, con, true);
266 /* Number of requests sent per second on this cport */
267 gb_loopback_stats_attrs(requests_per_second, dev, false);
268 gb_loopback_stats_attrs(requests_per_second, con, true);
269 /* Quantity of data sent and received on this cport */
270 gb_loopback_stats_attrs(throughput, dev, false);
271 gb_loopback_stats_attrs(throughput, con, true);
272 /* Number of errors encountered during loop */
273 gb_loopback_ro_attr(error, dev, false);
274 gb_loopback_ro_attr(error, con, true);
275
276 /*
277  * Type of loopback message to send based on protocol type definitions
278  * 0 => Don't send message
279  * 2 => Send ping message continuously (message without payload)
280  * 3 => Send transfer message continuously (message with payload,
281  *                                         payload returned in response)
282  * 4 => Send a sink message (message with payload, no payload in response)
283  */
284 gb_dev_loopback_rw_attr(type, d);
285 /* Size of transfer message payload: 0-4096 bytes */
286 gb_dev_loopback_rw_attr(size, u);
287 /* Time to wait between two messages: 0-1000 ms */
288 gb_dev_loopback_rw_attr(ms_wait, d);
289 /* Maximum iterations for a given operation: 1-(2^32-1), 0 implies infinite */
290 gb_dev_loopback_rw_attr(iteration_max, u);
291 /* The current index of the for (i = 0; i < iteration_max; i++) loop */
292 gb_dev_loopback_ro_attr(iteration_count, false);
293 /* A bit-mask of destination connecitons to include in the test run */
294 gb_dev_loopback_rw_attr(mask, u);
295
296 static struct attribute *loopback_dev_attrs[] = {
297         &dev_attr_latency_min_dev.attr,
298         &dev_attr_latency_max_dev.attr,
299         &dev_attr_latency_avg_dev.attr,
300         &dev_attr_requests_per_second_min_dev.attr,
301         &dev_attr_requests_per_second_max_dev.attr,
302         &dev_attr_requests_per_second_avg_dev.attr,
303         &dev_attr_throughput_min_dev.attr,
304         &dev_attr_throughput_max_dev.attr,
305         &dev_attr_throughput_avg_dev.attr,
306         &dev_attr_type.attr,
307         &dev_attr_size.attr,
308         &dev_attr_ms_wait.attr,
309         &dev_attr_iteration_count.attr,
310         &dev_attr_iteration_max.attr,
311         &dev_attr_mask.attr,
312         &dev_attr_error_dev.attr,
313         NULL,
314 };
315 ATTRIBUTE_GROUPS(loopback_dev);
316
317 static struct attribute *loopback_con_attrs[] = {
318         &dev_attr_latency_min_con.attr,
319         &dev_attr_latency_max_con.attr,
320         &dev_attr_latency_avg_con.attr,
321         &dev_attr_requests_per_second_min_con.attr,
322         &dev_attr_requests_per_second_max_con.attr,
323         &dev_attr_requests_per_second_avg_con.attr,
324         &dev_attr_throughput_min_con.attr,
325         &dev_attr_throughput_max_con.attr,
326         &dev_attr_throughput_avg_con.attr,
327         &dev_attr_error_con.attr,
328         NULL,
329 };
330 ATTRIBUTE_GROUPS(loopback_con);
331
332 static u32 gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs)
333 {
334         u32 lat;
335
336         do_div(elapsed_nsecs, NSEC_PER_USEC);
337         lat = elapsed_nsecs;
338         return lat;
339 }
340
341 static u64 __gb_loopback_calc_latency(u64 t1, u64 t2)
342 {
343         if (t2 > t1)
344                 return t2 - t1;
345         else
346                 return NSEC_PER_DAY - t2 + t1;
347 }
348
349 static u64 gb_loopback_calc_latency(struct timeval *ts, struct timeval *te)
350 {
351         u64 t1, t2;
352
353         t1 = timeval_to_ns(ts);
354         t2 = timeval_to_ns(te);
355
356         return __gb_loopback_calc_latency(t1, t2);
357 }
358
359 static void gb_loopback_push_latency_ts(struct gb_loopback *gb,
360                                         struct timeval *ts, struct timeval *te)
361 {
362         kfifo_in(&gb->kfifo_ts, (unsigned char *)ts, sizeof(*ts));
363         kfifo_in(&gb->kfifo_ts, (unsigned char *)te, sizeof(*te));
364 }
365
366 static int gb_loopback_active(struct gb_loopback *gb)
367 {
368         return (gb_dev.mask == 0 || (gb_dev.mask & gb->lbid));
369 }
370
371 static int gb_loopback_operation_sync(struct gb_loopback *gb, int type,
372                                       void *request, int request_size,
373                                       void *response, int response_size)
374 {
375         struct gb_operation *operation;
376         struct timeval ts, te;
377         int ret;
378
379         do_gettimeofday(&ts);
380         operation = gb_operation_create(gb->connection, type, request_size,
381                                         response_size, GFP_KERNEL);
382         if (!operation) {
383                 ret = -ENOMEM;
384                 goto error;
385         }
386
387         if (request_size)
388                 memcpy(operation->request->payload, request, request_size);
389
390         ret = gb_operation_request_send_sync(operation);
391         if (ret) {
392                 dev_err(&gb->connection->dev,
393                         "synchronous operation failed: %d\n", ret);
394         } else {
395                 if (response_size == operation->response->payload_size) {
396                         memcpy(response, operation->response->payload,
397                                response_size);
398                 } else {
399                         dev_err(&gb->connection->dev,
400                                 "response size %zu expected %d\n",
401                                 operation->response->payload_size,
402                                 response_size);
403                 }
404         }
405         gb_operation_destroy(operation);
406
407 error:
408         do_gettimeofday(&te);
409
410         /* Calculate the total time the message took */
411         gb_loopback_push_latency_ts(gb, &ts, &te);
412         gb->elapsed_nsecs = gb_loopback_calc_latency(&ts, &te);
413
414         return ret;
415 }
416
417 static int gb_loopback_sink(struct gb_loopback *gb, u32 len)
418 {
419         struct gb_loopback_transfer_request *request;
420         int retval;
421
422         request = kmalloc(len + sizeof(*request), GFP_KERNEL);
423         if (!request)
424                 return -ENOMEM;
425
426         request->len = cpu_to_le32(len);
427         retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_SINK,
428                                             request, len + sizeof(*request),
429                                             NULL, 0);
430         kfree(request);
431         return retval;
432 }
433
434 static int gb_loopback_transfer(struct gb_loopback *gb, u32 len)
435 {
436         struct gb_loopback_transfer_request *request;
437         struct gb_loopback_transfer_response *response;
438         int retval;
439
440         request = kmalloc(len + sizeof(*request), GFP_KERNEL);
441         if (!request)
442                 return -ENOMEM;
443         response = kmalloc(len + sizeof(*response), GFP_KERNEL);
444         if (!response) {
445                 kfree(request);
446                 return -ENOMEM;
447         }
448
449         memset(request->data, 0x5A, len);
450
451         request->len = cpu_to_le32(len);
452         retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_TRANSFER,
453                                             request, len + sizeof(*request),
454                                             response, len + sizeof(*response));
455         if (retval)
456                 goto gb_error;
457
458         if (memcmp(request->data, response->data, len)) {
459                 dev_err(&gb->connection->dev, "Loopback Data doesn't match\n");
460                 retval = -EREMOTEIO;
461         }
462
463 gb_error:
464         kfree(request);
465         kfree(response);
466
467         return retval;
468 }
469
470 static int gb_loopback_ping(struct gb_loopback *gb)
471 {
472         return gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_PING,
473                                           NULL, 0, NULL, 0);
474 }
475
476 static int gb_loopback_request_recv(u8 type, struct gb_operation *operation)
477 {
478         struct gb_connection *connection = operation->connection;
479         struct gb_loopback_transfer_request *request;
480         struct gb_loopback_transfer_response *response;
481         size_t len;
482
483         /* By convention, the AP initiates the version operation */
484         switch (type) {
485         case GB_REQUEST_TYPE_PROTOCOL_VERSION:
486                 dev_err(&connection->dev,
487                         "module-initiated version operation\n");
488                 return -EINVAL;
489         case GB_LOOPBACK_TYPE_PING:
490         case GB_LOOPBACK_TYPE_SINK:
491                 return 0;
492         case GB_LOOPBACK_TYPE_TRANSFER:
493                 if (operation->request->payload_size < sizeof(*request)) {
494                         dev_err(&connection->dev,
495                                 "transfer request too small (%zu < %zu)\n",
496                                 operation->request->payload_size,
497                                 sizeof(*request));
498                         return -EINVAL; /* -EMSGSIZE */
499                 }
500                 request = operation->request->payload;
501                 len = le32_to_cpu(request->len);
502                 if (len > gb_dev.size_max) {
503                         dev_err(&connection->dev,
504                                 "transfer request too large (%zu > %zu)\n",
505                                 len, gb_dev.size_max);
506                         return -EINVAL;
507                 }
508
509                 if (len) {
510                         if (!gb_operation_response_alloc(operation, len,
511                                                          GFP_KERNEL)) {
512                                 dev_err(&connection->dev,
513                                         "error allocating response\n");
514                                 return -ENOMEM;
515                         }
516                         response = operation->response->payload;
517                         response->len = cpu_to_le32(len);
518                         memcpy(response->data, request->data, len);
519                 }
520                 return 0;
521         default:
522                 dev_err(&connection->dev,
523                         "unsupported request: %hhu\n", type);
524                 return -EINVAL;
525         }
526 }
527
528 static void gb_loopback_reset_stats(struct gb_loopback_device *gb_dev)
529 {
530         struct gb_loopback_stats reset = {
531                 .min = U32_MAX,
532         };
533         struct gb_loopback *gb;
534
535         /* Reset per-connection stats */
536         list_for_each_entry(gb, &gb_dev->list, entry) {
537                 mutex_lock(&gb->mutex);
538                 memcpy(&gb->latency, &reset,
539                        sizeof(struct gb_loopback_stats));
540                 memcpy(&gb->throughput, &reset,
541                        sizeof(struct gb_loopback_stats));
542                 memcpy(&gb->requests_per_second, &reset,
543                        sizeof(struct gb_loopback_stats));
544                 mutex_unlock(&gb->mutex);
545         }
546
547         /* Reset aggregate stats */
548         memset(&gb_dev->start, 0, sizeof(struct timeval));
549         memset(&gb_dev->end, 0, sizeof(struct timeval));
550         memcpy(&gb_dev->latency, &reset, sizeof(struct gb_loopback_stats));
551         memcpy(&gb_dev->throughput, &reset, sizeof(struct gb_loopback_stats));
552         memcpy(&gb_dev->requests_per_second, &reset,
553                sizeof(struct gb_loopback_stats));
554 }
555
556 static void gb_loopback_update_stats(struct gb_loopback_stats *stats, u32 val)
557 {
558         if (stats->min > val)
559                 stats->min = val;
560         if (stats->max < val)
561                 stats->max = val;
562         stats->sum += val;
563         stats->count++;
564 }
565
566 static void gb_loopback_requests_update(struct gb_loopback *gb, u32 latency)
567 {
568         u32 req = USEC_PER_SEC;
569
570         do_div(req, latency);
571         gb_loopback_update_stats(&gb_dev.requests_per_second, req);
572         gb_loopback_update_stats(&gb->requests_per_second, req);
573 }
574
575 static void gb_loopback_throughput_update(struct gb_loopback *gb, u32 latency)
576 {
577         u32 throughput;
578         u32 aggregate_size = sizeof(struct gb_operation_msg_hdr) * 2;
579
580         switch (gb_dev.type) {
581         case GB_LOOPBACK_TYPE_PING:
582                 break;
583         case GB_LOOPBACK_TYPE_SINK:
584                 aggregate_size += sizeof(struct gb_loopback_transfer_request) +
585                                   gb_dev.size;
586                 break;
587         case GB_LOOPBACK_TYPE_TRANSFER:
588                 aggregate_size += sizeof(struct gb_loopback_transfer_request) +
589                                   sizeof(struct gb_loopback_transfer_response) +
590                                   gb_dev.size * 2;
591                 break;
592         default:
593                 return;
594         }
595
596         /* Calculate bytes per second */
597         throughput = USEC_PER_SEC;
598         do_div(throughput, latency);
599         throughput *= aggregate_size;
600         gb_loopback_update_stats(&gb_dev.throughput, throughput);
601         gb_loopback_update_stats(&gb->throughput, throughput);
602 }
603
604 static int gb_loopback_calculate_aggregate_stats(void)
605 {
606         struct gb_loopback *gb;
607         struct timeval ts;
608         struct timeval te;
609         u64 t1, t2;
610         u64 ts_min;
611         u64 te_max;
612         u64 elapsed_nsecs;
613         u32 lat;
614         int i, latched;
615         int rollover = 0;
616
617         for (i = 0; i < gb_dev.iteration_max; i++) {
618                 latched = 0;
619                 ts_min = 0;
620                 te_max = 0;
621                 list_for_each_entry(gb, &gb_dev.list, entry) {
622                         if (!gb_loopback_active(gb))
623                                 continue;
624                         if (kfifo_out(&gb->kfifo_ts, &ts, sizeof(ts)) < sizeof(ts))
625                                 goto error;
626                         if (kfifo_out(&gb->kfifo_ts, &te, sizeof(te)) < sizeof(te))
627                                 goto error;
628                         t1 = timeval_to_ns(&ts);
629                         t2 = timeval_to_ns(&te);
630
631                         /* minimum timestamp is always what we want */
632                         if (latched == 0 || t1 < ts_min)
633                                 ts_min = t1;
634
635                         /* maximum timestamp needs to handle rollover */
636                         if (t2 > t1) {
637                                 if (latched == 0 || t2 > te_max)
638                                         te_max = t2;
639                         } else {
640                                 if (latched == 0 || rollover == 0)
641                                         te_max = t2;
642                                 if (rollover == 1 && t2 > te_max)
643                                         te_max = t2;
644                                 rollover = 1;
645                         }
646                         latched = 1;
647                 }
648                 /* Calculate the aggregate timestamp */
649                 elapsed_nsecs = __gb_loopback_calc_latency(ts_min, te_max);
650                 lat = gb_loopback_nsec_to_usec_latency(elapsed_nsecs);
651                 kfifo_in(&gb_dev.kfifo, (unsigned char *)&lat, sizeof(lat));
652         }
653         return 0;
654 error:
655         kfifo_reset_out(&gb_dev.kfifo);
656         return -ENOMEM;
657 }
658
659 static void gb_loopback_calculate_stats(struct gb_loopback *gb)
660 {
661         u32 lat;
662
663         /* Express latency in terms of microseconds */
664         lat = gb_loopback_nsec_to_usec_latency(gb->elapsed_nsecs);
665
666         /* Log latency stastic */
667         gb_loopback_update_stats(&gb_dev.latency, lat);
668         gb_loopback_update_stats(&gb->latency, lat);
669
670         /* Raw latency log on a per thread basis */
671         kfifo_in(&gb->kfifo_lat, (unsigned char *)&lat, sizeof(lat));
672
673         /* Log throughput and requests using latency as benchmark */
674         gb_loopback_throughput_update(gb, lat);
675         gb_loopback_requests_update(gb, lat);
676 }
677
678 static int gb_loopback_fn(void *data)
679 {
680         int error = 0;
681         int ms_wait = 0;
682         int type;
683         u32 size;
684         u32 low_count;
685         struct gb_loopback *gb = data;
686         struct gb_loopback *gb_list;
687
688         while (1) {
689                 if (!gb_dev.type)
690                         wait_event_interruptible(gb_dev.wq, gb_dev.type ||
691                                                  kthread_should_stop());
692                 if (kthread_should_stop())
693                         break;
694
695                 mutex_lock(&gb_dev.mutex);
696                 if (!gb_loopback_active(gb)) {
697                         ms_wait = 100;
698                         goto unlock_continue;
699                 }
700                 if (gb_dev.iteration_max) {
701                         /* Determine overall lowest count */
702                         low_count = gb->iteration_count;
703                         list_for_each_entry(gb_list, &gb_dev.list, entry) {
704                                 if (!gb_loopback_active(gb_list))
705                                         continue;
706                                 if (gb_list->iteration_count < low_count)
707                                         low_count = gb_list->iteration_count;
708                         }
709                         /* All threads achieved at least low_count iterations */
710                         if (gb_dev.iteration_count < low_count) {
711                                 gb_dev.iteration_count = low_count;
712                                 sysfs_notify(&gb->connection->dev.kobj, NULL,
713                                              "iteration_count");
714                         }
715                         /* Optionally terminate */
716                         if (gb_dev.iteration_count == gb_dev.iteration_max) {
717                                 gb_loopback_calculate_aggregate_stats();
718                                 gb_dev.type = 0;
719                                 goto unlock_continue;
720                         }
721                 }
722                 size = gb_dev.size;
723                 ms_wait = gb_dev.ms_wait;
724                 type = gb_dev.type;
725                 mutex_unlock(&gb_dev.mutex);
726
727                 mutex_lock(&gb->mutex);
728                 if (gb->iteration_count >= gb_dev.iteration_max) {
729                         /* If this thread finished before siblings then sleep */
730                         ms_wait = 1;
731                         mutex_unlock(&gb->mutex);
732                         goto sleep;
733                 }
734                 /* Else operations to perform */
735                 if (type == GB_LOOPBACK_TYPE_PING)
736                         error = gb_loopback_ping(gb);
737                 else if (type == GB_LOOPBACK_TYPE_TRANSFER)
738                         error = gb_loopback_transfer(gb, size);
739                 else if (type == GB_LOOPBACK_TYPE_SINK)
740                         error = gb_loopback_sink(gb, size);
741                 mutex_unlock(&gb->mutex);
742
743                 mutex_lock(&gb_dev.mutex);
744                 mutex_lock(&gb->mutex);
745
746                 if (error) {
747                         gb_dev.error++;
748                         gb->error++;
749                 }
750                 gb_loopback_calculate_stats(gb);
751                 gb->iteration_count++;
752
753                 mutex_unlock(&gb->mutex);
754 unlock_continue:
755                 mutex_unlock(&gb_dev.mutex);
756 sleep:
757                 if (ms_wait)
758                         msleep(ms_wait);
759         }
760         return 0;
761 }
762
763 static int gb_loopback_dbgfs_latency_show_common(struct seq_file *s,
764                                                  struct kfifo *kfifo,
765                                                  struct mutex *mutex)
766 {
767         u32 latency;
768         int retval;
769
770         if (kfifo_len(kfifo) == 0) {
771                 retval = -EAGAIN;
772                 goto done;
773         }
774
775         mutex_lock(mutex);
776         retval = kfifo_out(kfifo, &latency, sizeof(latency));
777         if (retval > 0) {
778                 seq_printf(s, "%u", latency);
779                 retval = 0;
780         }
781         mutex_unlock(mutex);
782 done:
783         return retval;
784 }
785
786 static int gb_loopback_dbgfs_latency_show(struct seq_file *s, void *unused)
787 {
788         struct gb_loopback *gb = s->private;
789
790         return gb_loopback_dbgfs_latency_show_common(s, &gb->kfifo_lat,
791                                                      &gb->mutex);
792 }
793
794 static int gb_loopback_latency_open(struct inode *inode, struct file *file)
795 {
796         return single_open(file, gb_loopback_dbgfs_latency_show,
797                            inode->i_private);
798 }
799
800 static const struct file_operations gb_loopback_debugfs_latency_ops = {
801         .open           = gb_loopback_latency_open,
802         .read           = seq_read,
803         .llseek         = seq_lseek,
804         .release        = single_release,
805 };
806
807 static int gb_loopback_dbgfs_dev_latency_show(struct seq_file *s, void *unused)
808 {
809         struct gb_loopback_device *gb_dev = s->private;
810
811         return gb_loopback_dbgfs_latency_show_common(s, &gb_dev->kfifo,
812                                                      &gb_dev->mutex);
813 }
814
815 static int gb_loopback_dev_latency_open(struct inode *inode, struct file *file)
816 {
817         return single_open(file, gb_loopback_dbgfs_dev_latency_show,
818                            inode->i_private);
819 }
820
821 static const struct file_operations gb_loopback_debugfs_dev_latency_ops = {
822         .open           = gb_loopback_dev_latency_open,
823         .read           = seq_read,
824         .llseek         = seq_lseek,
825         .release        = single_release,
826 };
827
828 static int gb_loopback_bus_id_compare(void *priv, struct list_head *lha,
829                                       struct list_head *lhb)
830 {
831         struct gb_loopback *a = list_entry(lha, struct gb_loopback, entry);
832         struct gb_loopback *b = list_entry(lhb, struct gb_loopback, entry);
833         struct gb_connection *ca = a->connection;
834         struct gb_connection *cb = b->connection;
835
836         if (ca->bundle->intf->module->module_id <
837             cb->bundle->intf->module->module_id)
838                 return -1;
839         if (cb->bundle->intf->module->module_id <
840             ca->bundle->intf->module->module_id)
841                 return 1;
842         if (ca->bundle->intf->interface_id < cb->bundle->intf->interface_id)
843                 return -1;
844         if (cb->bundle->intf->interface_id < ca->bundle->intf->interface_id)
845                 return 1;
846         if (ca->bundle->id < cb->bundle->id)
847                 return -1;
848         if (cb->bundle->id < ca->bundle->id)
849                 return 1;
850         if (ca->intf_cport_id < cb->intf_cport_id)
851                 return -1;
852         else if (cb->intf_cport_id < ca->intf_cport_id)
853                 return 1;
854
855         return 0;
856 }
857
858 static void gb_loopback_insert_id(struct gb_loopback *gb)
859 {
860         struct gb_loopback *gb_list;
861         u32 new_lbid = 0;
862
863         /* perform an insertion sort */
864         list_add_tail(&gb->entry, &gb_dev.list);
865         list_sort(NULL, &gb_dev.list, gb_loopback_bus_id_compare);
866         list_for_each_entry(gb_list, &gb_dev.list, entry) {
867                 gb_list->lbid = 1 << new_lbid;
868                 new_lbid++;
869         }
870 }
871
872 #define DEBUGFS_NAMELEN 32
873
874 static int gb_loopback_connection_init(struct gb_connection *connection)
875 {
876         struct gb_loopback *gb;
877         int retval;
878         char name[DEBUGFS_NAMELEN];
879         struct kobject *kobj = &connection->hd->endo->dev.kobj;
880
881         gb = kzalloc(sizeof(*gb), GFP_KERNEL);
882         if (!gb)
883                 return -ENOMEM;
884         gb_loopback_reset_stats(&gb_dev);
885
886         /* If this is the first connection - create a module endo0:x entries */
887         mutex_lock(&gb_dev.mutex);
888         if (!gb_dev.count) {
889                 snprintf(name, sizeof(name), "raw_latency_endo0:%d",
890                          connection->bundle->intf->module->module_id);
891                 gb_dev.file = debugfs_create_file(name, S_IFREG | S_IRUGO,
892                                                   gb_dev.root, &gb_dev,
893                                   &gb_loopback_debugfs_dev_latency_ops);
894                 retval = sysfs_create_groups(kobj, loopback_dev_groups);
895                 if (retval)
896                         goto out_sysfs;
897
898                 /* Calculate maximum payload */
899                 gb_dev.size_max = gb_operation_get_payload_size_max(connection);
900                 if (gb_dev.size_max <=
901                         sizeof(struct gb_loopback_transfer_request)) {
902                         retval = -EINVAL;
903                         goto out_sysfs_dev;
904                 }
905                 gb_dev.size_max -= sizeof(struct gb_loopback_transfer_request);
906         }
907
908         /* Create per-connection sysfs and debugfs data-points */
909         snprintf(name, sizeof(name), "raw_latency_%s",
910                  dev_name(&connection->dev));
911         gb->file = debugfs_create_file(name, S_IFREG | S_IRUGO, gb_dev.root, gb,
912                                        &gb_loopback_debugfs_latency_ops);
913         gb->connection = connection;
914         connection->private = gb;
915         retval = sysfs_create_groups(&connection->dev.kobj,
916                                      loopback_con_groups);
917         if (retval)
918                 goto out_sysfs_dev;
919
920         /* Allocate kfifo */
921         if (kfifo_alloc(&gb->kfifo_lat, kfifo_depth * sizeof(u32),
922                           GFP_KERNEL)) {
923                 retval = -ENOMEM;
924                 goto out_sysfs_conn;
925         }
926         if (kfifo_alloc(&gb->kfifo_ts, kfifo_depth * sizeof(struct timeval) * 2,
927                           GFP_KERNEL)) {
928                 retval = -ENOMEM;
929                 goto out_kfifo0;
930         }
931
932         /* Fork worker thread */
933         mutex_init(&gb->mutex);
934         gb->task = kthread_run(gb_loopback_fn, gb, "gb_loopback");
935         if (IS_ERR(gb->task)) {
936                 retval = PTR_ERR(gb->task);
937                 goto out_kfifo1;
938         }
939
940         gb_loopback_insert_id(gb);
941         gb_dev.count++;
942         mutex_unlock(&gb_dev.mutex);
943         return 0;
944
945 out_kfifo1:
946         kfifo_free(&gb->kfifo_ts);
947 out_kfifo0:
948         kfifo_free(&gb->kfifo_lat);
949 out_sysfs_conn:
950         sysfs_remove_groups(&connection->dev.kobj, loopback_con_groups);
951 out_sysfs_dev:
952         if (!gb_dev.count) {
953                 sysfs_remove_groups(kobj, loopback_dev_groups);
954                 debugfs_remove(gb_dev.file);
955         }
956         debugfs_remove(gb->file);
957         connection->private = NULL;
958 out_sysfs:
959         mutex_unlock(&gb_dev.mutex);
960         kfree(gb);
961
962         return retval;
963 }
964
965 static void gb_loopback_connection_exit(struct gb_connection *connection)
966 {
967         struct gb_loopback *gb = connection->private;
968         struct kobject *kobj = &connection->hd->endo->dev.kobj;
969
970         if (!IS_ERR_OR_NULL(gb->task))
971                 kthread_stop(gb->task);
972
973         mutex_lock(&gb_dev.mutex);
974
975         connection->private = NULL;
976         kfifo_free(&gb->kfifo_lat);
977         kfifo_free(&gb->kfifo_ts);
978         gb_dev.count--;
979         if (!gb_dev.count) {
980                 sysfs_remove_groups(kobj, loopback_dev_groups);
981                 debugfs_remove(gb_dev.file);
982         }
983         sysfs_remove_groups(&connection->dev.kobj, loopback_con_groups);
984         debugfs_remove(gb->file);
985         list_del(&gb->entry);
986         mutex_unlock(&gb_dev.mutex);
987         kfree(gb);
988 }
989
990 static struct gb_protocol loopback_protocol = {
991         .name                   = "loopback",
992         .id                     = GREYBUS_PROTOCOL_LOOPBACK,
993         .major                  = GB_LOOPBACK_VERSION_MAJOR,
994         .minor                  = GB_LOOPBACK_VERSION_MINOR,
995         .connection_init        = gb_loopback_connection_init,
996         .connection_exit        = gb_loopback_connection_exit,
997         .request_recv           = gb_loopback_request_recv,
998 };
999
1000 static int loopback_init(void)
1001 {
1002         int retval;
1003
1004         init_waitqueue_head(&gb_dev.wq);
1005         INIT_LIST_HEAD(&gb_dev.list);
1006         mutex_init(&gb_dev.mutex);
1007         gb_dev.root = debugfs_create_dir("gb_loopback", NULL);
1008
1009         if (kfifo_alloc(&gb_dev.kfifo, kfifo_depth * sizeof(u32), GFP_KERNEL)) {
1010                 retval = -ENOMEM;
1011                 goto error_debugfs;
1012         }
1013
1014         retval = gb_protocol_register(&loopback_protocol);
1015         if (!retval)
1016                 return retval;
1017
1018 error_debugfs:
1019         debugfs_remove_recursive(gb_dev.root);
1020         return retval;
1021 }
1022 module_init(loopback_init);
1023
1024 static void __exit loopback_exit(void)
1025 {
1026         debugfs_remove_recursive(gb_dev.root);
1027         kfifo_free(&gb_dev.kfifo);
1028         gb_protocol_deregister(&loopback_protocol);
1029 }
1030 module_exit(loopback_exit);
1031
1032 MODULE_LICENSE("GPL v2");