]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/dma/dmatest.c
dmatest: run test via debugfs
[karo-tx-linux.git] / drivers / dma / dmatest.c
1 /*
2  * DMA Engine test module
3  *
4  * Copyright (C) 2007 Atmel Corporation
5  * Copyright (C) 2013 Intel Corporation
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/freezer.h>
15 #include <linux/init.h>
16 #include <linux/kthread.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <linux/wait.h>
22 #include <linux/ctype.h>
23 #include <linux/debugfs.h>
24 #include <linux/uaccess.h>
25 #include <linux/seq_file.h>
26
27 static unsigned int test_buf_size = 16384;
28 module_param(test_buf_size, uint, S_IRUGO);
29 MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
30
31 static char test_channel[20];
32 module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
33 MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
34
35 static char test_device[20];
36 module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
37 MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
38
39 static unsigned int threads_per_chan = 1;
40 module_param(threads_per_chan, uint, S_IRUGO);
41 MODULE_PARM_DESC(threads_per_chan,
42                 "Number of threads to start per channel (default: 1)");
43
44 static unsigned int max_channels;
45 module_param(max_channels, uint, S_IRUGO);
46 MODULE_PARM_DESC(max_channels,
47                 "Maximum number of channels to use (default: all)");
48
49 static unsigned int iterations;
50 module_param(iterations, uint, S_IRUGO);
51 MODULE_PARM_DESC(iterations,
52                 "Iterations before stopping test (default: infinite)");
53
54 static unsigned int xor_sources = 3;
55 module_param(xor_sources, uint, S_IRUGO);
56 MODULE_PARM_DESC(xor_sources,
57                 "Number of xor source buffers (default: 3)");
58
59 static unsigned int pq_sources = 3;
60 module_param(pq_sources, uint, S_IRUGO);
61 MODULE_PARM_DESC(pq_sources,
62                 "Number of p+q source buffers (default: 3)");
63
64 static int timeout = 3000;
65 module_param(timeout, uint, S_IRUGO);
66 MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
67                  "Pass -1 for infinite timeout");
68
69 /*
70  * Initialization patterns. All bytes in the source buffer has bit 7
71  * set, all bytes in the destination buffer has bit 7 cleared.
72  *
73  * Bit 6 is set for all bytes which are to be copied by the DMA
74  * engine. Bit 5 is set for all bytes which are to be overwritten by
75  * the DMA engine.
76  *
77  * The remaining bits are the inverse of a counter which increments by
78  * one for each byte address.
79  */
80 #define PATTERN_SRC             0x80
81 #define PATTERN_DST             0x00
82 #define PATTERN_COPY            0x40
83 #define PATTERN_OVERWRITE       0x20
84 #define PATTERN_COUNT_MASK      0x1f
85
86 struct dmatest_info;
87
88 struct dmatest_thread {
89         struct list_head        node;
90         struct dmatest_info     *info;
91         struct task_struct      *task;
92         struct dma_chan         *chan;
93         u8                      **srcs;
94         u8                      **dsts;
95         enum dma_transaction_type type;
96 };
97
98 struct dmatest_chan {
99         struct list_head        node;
100         struct dma_chan         *chan;
101         struct list_head        threads;
102 };
103
104 /**
105  * struct dmatest_params - test parameters.
106  * @buf_size:           size of the memcpy test buffer
107  * @channel:            bus ID of the channel to test
108  * @device:             bus ID of the DMA Engine to test
109  * @threads_per_chan:   number of threads to start per channel
110  * @max_channels:       maximum number of channels to use
111  * @iterations:         iterations before stopping test
112  * @xor_sources:        number of xor source buffers
113  * @pq_sources:         number of p+q source buffers
114  * @timeout:            transfer timeout in msec, -1 for infinite timeout
115  */
116 struct dmatest_params {
117         unsigned int    buf_size;
118         char            channel[20];
119         char            device[20];
120         unsigned int    threads_per_chan;
121         unsigned int    max_channels;
122         unsigned int    iterations;
123         unsigned int    xor_sources;
124         unsigned int    pq_sources;
125         int             timeout;
126 };
127
128 /**
129  * struct dmatest_info - test information.
130  * @params:             test parameters
131  * @lock:               access protection to the fields of this structure
132  */
133 struct dmatest_info {
134         /* Test parameters */
135         struct dmatest_params   params;
136
137         /* Internal state */
138         struct list_head        channels;
139         unsigned int            nr_channels;
140         struct mutex            lock;
141
142         /* debugfs related stuff */
143         struct dentry           *root;
144         struct dmatest_params   dbgfs_params;
145 };
146
147 static struct dmatest_info test_info;
148
149 static bool dmatest_match_channel(struct dmatest_params *params,
150                 struct dma_chan *chan)
151 {
152         if (params->channel[0] == '\0')
153                 return true;
154         return strcmp(dma_chan_name(chan), params->channel) == 0;
155 }
156
157 static bool dmatest_match_device(struct dmatest_params *params,
158                 struct dma_device *device)
159 {
160         if (params->device[0] == '\0')
161                 return true;
162         return strcmp(dev_name(device->dev), params->device) == 0;
163 }
164
165 static unsigned long dmatest_random(void)
166 {
167         unsigned long buf;
168
169         get_random_bytes(&buf, sizeof(buf));
170         return buf;
171 }
172
173 static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
174                 unsigned int buf_size)
175 {
176         unsigned int i;
177         u8 *buf;
178
179         for (; (buf = *bufs); bufs++) {
180                 for (i = 0; i < start; i++)
181                         buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
182                 for ( ; i < start + len; i++)
183                         buf[i] = PATTERN_SRC | PATTERN_COPY
184                                 | (~i & PATTERN_COUNT_MASK);
185                 for ( ; i < buf_size; i++)
186                         buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
187                 buf++;
188         }
189 }
190
191 static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
192                 unsigned int buf_size)
193 {
194         unsigned int i;
195         u8 *buf;
196
197         for (; (buf = *bufs); bufs++) {
198                 for (i = 0; i < start; i++)
199                         buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
200                 for ( ; i < start + len; i++)
201                         buf[i] = PATTERN_DST | PATTERN_OVERWRITE
202                                 | (~i & PATTERN_COUNT_MASK);
203                 for ( ; i < buf_size; i++)
204                         buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
205         }
206 }
207
208 static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
209                 unsigned int counter, bool is_srcbuf)
210 {
211         u8              diff = actual ^ pattern;
212         u8              expected = pattern | (~counter & PATTERN_COUNT_MASK);
213         const char      *thread_name = current->comm;
214
215         if (is_srcbuf)
216                 pr_warning("%s: srcbuf[0x%x] overwritten!"
217                                 " Expected %02x, got %02x\n",
218                                 thread_name, index, expected, actual);
219         else if ((pattern & PATTERN_COPY)
220                         && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
221                 pr_warning("%s: dstbuf[0x%x] not copied!"
222                                 " Expected %02x, got %02x\n",
223                                 thread_name, index, expected, actual);
224         else if (diff & PATTERN_SRC)
225                 pr_warning("%s: dstbuf[0x%x] was copied!"
226                                 " Expected %02x, got %02x\n",
227                                 thread_name, index, expected, actual);
228         else
229                 pr_warning("%s: dstbuf[0x%x] mismatch!"
230                                 " Expected %02x, got %02x\n",
231                                 thread_name, index, expected, actual);
232 }
233
234 static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
235                 unsigned int end, unsigned int counter, u8 pattern,
236                 bool is_srcbuf)
237 {
238         unsigned int i;
239         unsigned int error_count = 0;
240         u8 actual;
241         u8 expected;
242         u8 *buf;
243         unsigned int counter_orig = counter;
244
245         for (; (buf = *bufs); bufs++) {
246                 counter = counter_orig;
247                 for (i = start; i < end; i++) {
248                         actual = buf[i];
249                         expected = pattern | (~counter & PATTERN_COUNT_MASK);
250                         if (actual != expected) {
251                                 if (error_count < 32)
252                                         dmatest_mismatch(actual, pattern, i,
253                                                          counter, is_srcbuf);
254                                 error_count++;
255                         }
256                         counter++;
257                 }
258         }
259
260         if (error_count > 32)
261                 pr_warning("%s: %u errors suppressed\n",
262                         current->comm, error_count - 32);
263
264         return error_count;
265 }
266
267 /* poor man's completion - we want to use wait_event_freezable() on it */
268 struct dmatest_done {
269         bool                    done;
270         wait_queue_head_t       *wait;
271 };
272
273 static void dmatest_callback(void *arg)
274 {
275         struct dmatest_done *done = arg;
276
277         done->done = true;
278         wake_up_all(done->wait);
279 }
280
281 static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
282                              unsigned int count)
283 {
284         while (count--)
285                 dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
286 }
287
288 static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
289                              unsigned int count)
290 {
291         while (count--)
292                 dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
293 }
294
295 static unsigned int min_odd(unsigned int x, unsigned int y)
296 {
297         unsigned int val = min(x, y);
298
299         return val % 2 ? val : val - 1;
300 }
301
302 /*
303  * This function repeatedly tests DMA transfers of various lengths and
304  * offsets for a given operation type until it is told to exit by
305  * kthread_stop(). There may be multiple threads running this function
306  * in parallel for a single channel, and there may be multiple channels
307  * being tested in parallel.
308  *
309  * Before each test, the source and destination buffer is initialized
310  * with a known pattern. This pattern is different depending on
311  * whether it's in an area which is supposed to be copied or
312  * overwritten, and different in the source and destination buffers.
313  * So if the DMA engine doesn't copy exactly what we tell it to copy,
314  * we'll notice.
315  */
316 static int dmatest_func(void *data)
317 {
318         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
319         struct dmatest_thread   *thread = data;
320         struct dmatest_done     done = { .wait = &done_wait };
321         struct dmatest_info     *info;
322         struct dmatest_params   *params;
323         struct dma_chan         *chan;
324         struct dma_device       *dev;
325         const char              *thread_name;
326         unsigned int            src_off, dst_off, len;
327         unsigned int            error_count;
328         unsigned int            failed_tests = 0;
329         unsigned int            total_tests = 0;
330         dma_cookie_t            cookie;
331         enum dma_status         status;
332         enum dma_ctrl_flags     flags;
333         u8                      *pq_coefs = NULL;
334         int                     ret;
335         int                     src_cnt;
336         int                     dst_cnt;
337         int                     i;
338
339         thread_name = current->comm;
340         set_freezable();
341
342         ret = -ENOMEM;
343
344         smp_rmb();
345         info = thread->info;
346         params = &info->params;
347         chan = thread->chan;
348         dev = chan->device;
349         if (thread->type == DMA_MEMCPY)
350                 src_cnt = dst_cnt = 1;
351         else if (thread->type == DMA_XOR) {
352                 /* force odd to ensure dst = src */
353                 src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
354                 dst_cnt = 1;
355         } else if (thread->type == DMA_PQ) {
356                 /* force odd to ensure dst = src */
357                 src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
358                 dst_cnt = 2;
359
360                 pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
361                 if (!pq_coefs)
362                         goto err_thread_type;
363
364                 for (i = 0; i < src_cnt; i++)
365                         pq_coefs[i] = 1;
366         } else
367                 goto err_thread_type;
368
369         thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
370         if (!thread->srcs)
371                 goto err_srcs;
372         for (i = 0; i < src_cnt; i++) {
373                 thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
374                 if (!thread->srcs[i])
375                         goto err_srcbuf;
376         }
377         thread->srcs[i] = NULL;
378
379         thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
380         if (!thread->dsts)
381                 goto err_dsts;
382         for (i = 0; i < dst_cnt; i++) {
383                 thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
384                 if (!thread->dsts[i])
385                         goto err_dstbuf;
386         }
387         thread->dsts[i] = NULL;
388
389         set_user_nice(current, 10);
390
391         /*
392          * src buffers are freed by the DMAEngine code with dma_unmap_single()
393          * dst buffers are freed by ourselves below
394          */
395         flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT
396               | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
397
398         while (!kthread_should_stop()
399                && !(params->iterations && total_tests >= params->iterations)) {
400                 struct dma_async_tx_descriptor *tx = NULL;
401                 dma_addr_t dma_srcs[src_cnt];
402                 dma_addr_t dma_dsts[dst_cnt];
403                 u8 align = 0;
404
405                 total_tests++;
406
407                 /* honor alignment restrictions */
408                 if (thread->type == DMA_MEMCPY)
409                         align = dev->copy_align;
410                 else if (thread->type == DMA_XOR)
411                         align = dev->xor_align;
412                 else if (thread->type == DMA_PQ)
413                         align = dev->pq_align;
414
415                 if (1 << align > params->buf_size) {
416                         pr_err("%u-byte buffer too small for %d-byte alignment\n",
417                                params->buf_size, 1 << align);
418                         break;
419                 }
420
421                 len = dmatest_random() % params->buf_size + 1;
422                 len = (len >> align) << align;
423                 if (!len)
424                         len = 1 << align;
425                 src_off = dmatest_random() % (params->buf_size - len + 1);
426                 dst_off = dmatest_random() % (params->buf_size - len + 1);
427
428                 src_off = (src_off >> align) << align;
429                 dst_off = (dst_off >> align) << align;
430
431                 dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size);
432                 dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size);
433
434                 for (i = 0; i < src_cnt; i++) {
435                         u8 *buf = thread->srcs[i] + src_off;
436
437                         dma_srcs[i] = dma_map_single(dev->dev, buf, len,
438                                                      DMA_TO_DEVICE);
439                         ret = dma_mapping_error(dev->dev, dma_srcs[i]);
440                         if (ret) {
441                                 unmap_src(dev->dev, dma_srcs, len, i);
442                                 pr_warn("%s: #%u: mapping error %d with "
443                                         "src_off=0x%x len=0x%x\n",
444                                         thread_name, total_tests - 1, ret,
445                                         src_off, len);
446                                 failed_tests++;
447                                 continue;
448                         }
449                 }
450                 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
451                 for (i = 0; i < dst_cnt; i++) {
452                         dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
453                                                      params->buf_size,
454                                                      DMA_BIDIRECTIONAL);
455                         ret = dma_mapping_error(dev->dev, dma_dsts[i]);
456                         if (ret) {
457                                 unmap_src(dev->dev, dma_srcs, len, src_cnt);
458                                 unmap_dst(dev->dev, dma_dsts, params->buf_size,
459                                           i);
460                                 pr_warn("%s: #%u: mapping error %d with "
461                                         "dst_off=0x%x len=0x%x\n",
462                                         thread_name, total_tests - 1, ret,
463                                         dst_off, params->buf_size);
464                                 failed_tests++;
465                                 continue;
466                         }
467                 }
468
469                 if (thread->type == DMA_MEMCPY)
470                         tx = dev->device_prep_dma_memcpy(chan,
471                                                          dma_dsts[0] + dst_off,
472                                                          dma_srcs[0], len,
473                                                          flags);
474                 else if (thread->type == DMA_XOR)
475                         tx = dev->device_prep_dma_xor(chan,
476                                                       dma_dsts[0] + dst_off,
477                                                       dma_srcs, src_cnt,
478                                                       len, flags);
479                 else if (thread->type == DMA_PQ) {
480                         dma_addr_t dma_pq[dst_cnt];
481
482                         for (i = 0; i < dst_cnt; i++)
483                                 dma_pq[i] = dma_dsts[i] + dst_off;
484                         tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
485                                                      src_cnt, pq_coefs,
486                                                      len, flags);
487                 }
488
489                 if (!tx) {
490                         unmap_src(dev->dev, dma_srcs, len, src_cnt);
491                         unmap_dst(dev->dev, dma_dsts, params->buf_size,
492                                   dst_cnt);
493                         pr_warning("%s: #%u: prep error with src_off=0x%x "
494                                         "dst_off=0x%x len=0x%x\n",
495                                         thread_name, total_tests - 1,
496                                         src_off, dst_off, len);
497                         msleep(100);
498                         failed_tests++;
499                         continue;
500                 }
501
502                 done.done = false;
503                 tx->callback = dmatest_callback;
504                 tx->callback_param = &done;
505                 cookie = tx->tx_submit(tx);
506
507                 if (dma_submit_error(cookie)) {
508                         pr_warning("%s: #%u: submit error %d with src_off=0x%x "
509                                         "dst_off=0x%x len=0x%x\n",
510                                         thread_name, total_tests - 1, cookie,
511                                         src_off, dst_off, len);
512                         msleep(100);
513                         failed_tests++;
514                         continue;
515                 }
516                 dma_async_issue_pending(chan);
517
518                 wait_event_freezable_timeout(done_wait,
519                                              done.done || kthread_should_stop(),
520                                              msecs_to_jiffies(params->timeout));
521
522                 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
523
524                 if (!done.done) {
525                         /*
526                          * We're leaving the timed out dma operation with
527                          * dangling pointer to done_wait.  To make this
528                          * correct, we'll need to allocate wait_done for
529                          * each test iteration and perform "who's gonna
530                          * free it this time?" dancing.  For now, just
531                          * leave it dangling.
532                          */
533                         pr_warning("%s: #%u: test timed out\n",
534                                    thread_name, total_tests - 1);
535                         failed_tests++;
536                         continue;
537                 } else if (status != DMA_SUCCESS) {
538                         pr_warning("%s: #%u: got completion callback,"
539                                    " but status is \'%s\'\n",
540                                    thread_name, total_tests - 1,
541                                    status == DMA_ERROR ? "error" : "in progress");
542                         failed_tests++;
543                         continue;
544                 }
545
546                 /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
547                 unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
548
549                 error_count = 0;
550
551                 pr_debug("%s: verifying source buffer...\n", thread_name);
552                 error_count += dmatest_verify(thread->srcs, 0, src_off,
553                                 0, PATTERN_SRC, true);
554                 error_count += dmatest_verify(thread->srcs, src_off,
555                                 src_off + len, src_off,
556                                 PATTERN_SRC | PATTERN_COPY, true);
557                 error_count += dmatest_verify(thread->srcs, src_off + len,
558                                 params->buf_size, src_off + len,
559                                 PATTERN_SRC, true);
560
561                 pr_debug("%s: verifying dest buffer...\n",
562                                 thread->task->comm);
563                 error_count += dmatest_verify(thread->dsts, 0, dst_off,
564                                 0, PATTERN_DST, false);
565                 error_count += dmatest_verify(thread->dsts, dst_off,
566                                 dst_off + len, src_off,
567                                 PATTERN_SRC | PATTERN_COPY, false);
568                 error_count += dmatest_verify(thread->dsts, dst_off + len,
569                                 params->buf_size, dst_off + len,
570                                 PATTERN_DST, false);
571
572                 if (error_count) {
573                         pr_warning("%s: #%u: %u errors with "
574                                 "src_off=0x%x dst_off=0x%x len=0x%x\n",
575                                 thread_name, total_tests - 1, error_count,
576                                 src_off, dst_off, len);
577                         failed_tests++;
578                 } else {
579                         pr_debug("%s: #%u: No errors with "
580                                 "src_off=0x%x dst_off=0x%x len=0x%x\n",
581                                 thread_name, total_tests - 1,
582                                 src_off, dst_off, len);
583                 }
584         }
585
586         ret = 0;
587         for (i = 0; thread->dsts[i]; i++)
588                 kfree(thread->dsts[i]);
589 err_dstbuf:
590         kfree(thread->dsts);
591 err_dsts:
592         for (i = 0; thread->srcs[i]; i++)
593                 kfree(thread->srcs[i]);
594 err_srcbuf:
595         kfree(thread->srcs);
596 err_srcs:
597         kfree(pq_coefs);
598 err_thread_type:
599         pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
600                         thread_name, total_tests, failed_tests, ret);
601
602         /* terminate all transfers on specified channels */
603         if (ret)
604                 dmaengine_terminate_all(chan);
605
606         if (params->iterations > 0)
607                 while (!kthread_should_stop()) {
608                         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
609                         interruptible_sleep_on(&wait_dmatest_exit);
610                 }
611
612         return ret;
613 }
614
615 static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
616 {
617         struct dmatest_thread   *thread;
618         struct dmatest_thread   *_thread;
619         int                     ret;
620
621         list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
622                 ret = kthread_stop(thread->task);
623                 pr_debug("dmatest: thread %s exited with status %d\n",
624                                 thread->task->comm, ret);
625                 list_del(&thread->node);
626                 kfree(thread);
627         }
628
629         /* terminate all transfers on specified channels */
630         dmaengine_terminate_all(dtc->chan);
631
632         kfree(dtc);
633 }
634
635 static int dmatest_add_threads(struct dmatest_info *info,
636                 struct dmatest_chan *dtc, enum dma_transaction_type type)
637 {
638         struct dmatest_params *params = &info->params;
639         struct dmatest_thread *thread;
640         struct dma_chan *chan = dtc->chan;
641         char *op;
642         unsigned int i;
643
644         if (type == DMA_MEMCPY)
645                 op = "copy";
646         else if (type == DMA_XOR)
647                 op = "xor";
648         else if (type == DMA_PQ)
649                 op = "pq";
650         else
651                 return -EINVAL;
652
653         for (i = 0; i < params->threads_per_chan; i++) {
654                 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
655                 if (!thread) {
656                         pr_warning("dmatest: No memory for %s-%s%u\n",
657                                    dma_chan_name(chan), op, i);
658
659                         break;
660                 }
661                 thread->info = info;
662                 thread->chan = dtc->chan;
663                 thread->type = type;
664                 smp_wmb();
665                 thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
666                                 dma_chan_name(chan), op, i);
667                 if (IS_ERR(thread->task)) {
668                         pr_warning("dmatest: Failed to run thread %s-%s%u\n",
669                                         dma_chan_name(chan), op, i);
670                         kfree(thread);
671                         break;
672                 }
673
674                 /* srcbuf and dstbuf are allocated by the thread itself */
675
676                 list_add_tail(&thread->node, &dtc->threads);
677         }
678
679         return i;
680 }
681
682 static int dmatest_add_channel(struct dmatest_info *info,
683                 struct dma_chan *chan)
684 {
685         struct dmatest_chan     *dtc;
686         struct dma_device       *dma_dev = chan->device;
687         unsigned int            thread_count = 0;
688         int cnt;
689
690         dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
691         if (!dtc) {
692                 pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
693                 return -ENOMEM;
694         }
695
696         dtc->chan = chan;
697         INIT_LIST_HEAD(&dtc->threads);
698
699         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
700                 cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
701                 thread_count += cnt > 0 ? cnt : 0;
702         }
703         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
704                 cnt = dmatest_add_threads(info, dtc, DMA_XOR);
705                 thread_count += cnt > 0 ? cnt : 0;
706         }
707         if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
708                 cnt = dmatest_add_threads(info, dtc, DMA_PQ);
709                 thread_count += cnt > 0 ? cnt : 0;
710         }
711
712         pr_info("dmatest: Started %u threads using %s\n",
713                 thread_count, dma_chan_name(chan));
714
715         list_add_tail(&dtc->node, &info->channels);
716         info->nr_channels++;
717
718         return 0;
719 }
720
721 static bool filter(struct dma_chan *chan, void *param)
722 {
723         struct dmatest_params *params = param;
724
725         if (!dmatest_match_channel(params, chan) ||
726             !dmatest_match_device(params, chan->device))
727                 return false;
728         else
729                 return true;
730 }
731
732 static int __run_threaded_test(struct dmatest_info *info)
733 {
734         dma_cap_mask_t mask;
735         struct dma_chan *chan;
736         struct dmatest_params *params = &info->params;
737         int err = 0;
738
739         dma_cap_zero(mask);
740         dma_cap_set(DMA_MEMCPY, mask);
741         for (;;) {
742                 chan = dma_request_channel(mask, filter, params);
743                 if (chan) {
744                         err = dmatest_add_channel(info, chan);
745                         if (err) {
746                                 dma_release_channel(chan);
747                                 break; /* add_channel failed, punt */
748                         }
749                 } else
750                         break; /* no more channels available */
751                 if (params->max_channels &&
752                     info->nr_channels >= params->max_channels)
753                         break; /* we have all we need */
754         }
755         return err;
756 }
757
758 #ifndef MODULE
759 static int run_threaded_test(struct dmatest_info *info)
760 {
761         int ret;
762
763         mutex_lock(&info->lock);
764         ret = __run_threaded_test(info);
765         mutex_unlock(&info->lock);
766         return ret;
767 }
768 #endif
769
770 static void __stop_threaded_test(struct dmatest_info *info)
771 {
772         struct dmatest_chan *dtc, *_dtc;
773         struct dma_chan *chan;
774
775         list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
776                 list_del(&dtc->node);
777                 chan = dtc->chan;
778                 dmatest_cleanup_channel(dtc);
779                 pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan));
780                 dma_release_channel(chan);
781         }
782
783         info->nr_channels = 0;
784 }
785
786 static void stop_threaded_test(struct dmatest_info *info)
787 {
788         mutex_lock(&info->lock);
789         __stop_threaded_test(info);
790         mutex_unlock(&info->lock);
791 }
792
793 static int __restart_threaded_test(struct dmatest_info *info, bool run)
794 {
795         struct dmatest_params *params = &info->params;
796         int ret;
797
798         /* Stop any running test first */
799         __stop_threaded_test(info);
800
801         if (run == false)
802                 return 0;
803
804         /* Copy test parameters */
805         memcpy(params, &info->dbgfs_params, sizeof(*params));
806
807         /* Run test with new parameters */
808         ret = __run_threaded_test(info);
809         if (ret) {
810                 __stop_threaded_test(info);
811                 pr_err("dmatest: Can't run test\n");
812         }
813
814         return ret;
815 }
816
817 static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos,
818                 const void __user *from, size_t count)
819 {
820         char tmp[20];
821         ssize_t len;
822
823         len = simple_write_to_buffer(tmp, sizeof(tmp) - 1, ppos, from, count);
824         if (len >= 0) {
825                 tmp[len] = '\0';
826                 strlcpy(to, strim(tmp), available);
827         }
828
829         return len;
830 }
831
832 static ssize_t dtf_read_channel(struct file *file, char __user *buf,
833                 size_t count, loff_t *ppos)
834 {
835         struct dmatest_info *info = file->private_data;
836         return simple_read_from_buffer(buf, count, ppos,
837                         info->dbgfs_params.channel,
838                         strlen(info->dbgfs_params.channel));
839 }
840
841 static ssize_t dtf_write_channel(struct file *file, const char __user *buf,
842                 size_t size, loff_t *ppos)
843 {
844         struct dmatest_info *info = file->private_data;
845         return dtf_write_string(info->dbgfs_params.channel,
846                                 sizeof(info->dbgfs_params.channel),
847                                 ppos, buf, size);
848 }
849
850 static const struct file_operations dtf_channel_fops = {
851         .read   = dtf_read_channel,
852         .write  = dtf_write_channel,
853         .open   = simple_open,
854         .llseek = default_llseek,
855 };
856
857 static ssize_t dtf_read_device(struct file *file, char __user *buf,
858                 size_t count, loff_t *ppos)
859 {
860         struct dmatest_info *info = file->private_data;
861         return simple_read_from_buffer(buf, count, ppos,
862                         info->dbgfs_params.device,
863                         strlen(info->dbgfs_params.device));
864 }
865
866 static ssize_t dtf_write_device(struct file *file, const char __user *buf,
867                 size_t size, loff_t *ppos)
868 {
869         struct dmatest_info *info = file->private_data;
870         return dtf_write_string(info->dbgfs_params.device,
871                                 sizeof(info->dbgfs_params.device),
872                                 ppos, buf, size);
873 }
874
875 static const struct file_operations dtf_device_fops = {
876         .read   = dtf_read_device,
877         .write  = dtf_write_device,
878         .open   = simple_open,
879         .llseek = default_llseek,
880 };
881
882 static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
883                 size_t count, loff_t *ppos)
884 {
885         struct dmatest_info *info = file->private_data;
886         char buf[3];
887
888         mutex_lock(&info->lock);
889         if (info->nr_channels)
890                 buf[0] = 'Y';
891         else
892                 buf[0] = 'N';
893         mutex_unlock(&info->lock);
894         buf[1] = '\n';
895         buf[2] = 0x00;
896         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
897 }
898
899 static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
900                 size_t count, loff_t *ppos)
901 {
902         struct dmatest_info *info = file->private_data;
903         char buf[16];
904         bool bv;
905         int ret = 0;
906
907         if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1))))
908                 return -EFAULT;
909
910         if (strtobool(buf, &bv) == 0) {
911                 mutex_lock(&info->lock);
912                 ret = __restart_threaded_test(info, bv);
913                 mutex_unlock(&info->lock);
914         }
915
916         return ret ? ret : count;
917 }
918
919 static const struct file_operations dtf_run_fops = {
920         .read   = dtf_read_run,
921         .write  = dtf_write_run,
922         .open   = simple_open,
923         .llseek = default_llseek,
924 };
925
926 static int dmatest_register_dbgfs(struct dmatest_info *info)
927 {
928         struct dentry *d;
929         struct dmatest_params *params = &info->dbgfs_params;
930         int ret = -ENOMEM;
931
932         d = debugfs_create_dir("dmatest", NULL);
933         if (IS_ERR(d))
934                 return PTR_ERR(d);
935         if (!d)
936                 goto err_root;
937
938         info->root = d;
939
940         /* Copy initial values */
941         memcpy(params, &info->params, sizeof(*params));
942
943         /* Test parameters */
944
945         d = debugfs_create_u32("test_buf_size", S_IWUSR | S_IRUGO, info->root,
946                                (u32 *)&params->buf_size);
947         if (IS_ERR_OR_NULL(d))
948                 goto err_node;
949
950         d = debugfs_create_file("channel", S_IRUGO | S_IWUSR, info->root,
951                                 info, &dtf_channel_fops);
952         if (IS_ERR_OR_NULL(d))
953                 goto err_node;
954
955         d = debugfs_create_file("device", S_IRUGO | S_IWUSR, info->root,
956                                 info, &dtf_device_fops);
957         if (IS_ERR_OR_NULL(d))
958                 goto err_node;
959
960         d = debugfs_create_u32("threads_per_chan", S_IWUSR | S_IRUGO, info->root,
961                                (u32 *)&params->threads_per_chan);
962         if (IS_ERR_OR_NULL(d))
963                 goto err_node;
964
965         d = debugfs_create_u32("max_channels", S_IWUSR | S_IRUGO, info->root,
966                                (u32 *)&params->max_channels);
967         if (IS_ERR_OR_NULL(d))
968                 goto err_node;
969
970         d = debugfs_create_u32("iterations", S_IWUSR | S_IRUGO, info->root,
971                                (u32 *)&params->iterations);
972         if (IS_ERR_OR_NULL(d))
973                 goto err_node;
974
975         d = debugfs_create_u32("xor_sources", S_IWUSR | S_IRUGO, info->root,
976                                (u32 *)&params->xor_sources);
977         if (IS_ERR_OR_NULL(d))
978                 goto err_node;
979
980         d = debugfs_create_u32("pq_sources", S_IWUSR | S_IRUGO, info->root,
981                                (u32 *)&params->pq_sources);
982         if (IS_ERR_OR_NULL(d))
983                 goto err_node;
984
985         d = debugfs_create_u32("timeout", S_IWUSR | S_IRUGO, info->root,
986                                (u32 *)&params->timeout);
987         if (IS_ERR_OR_NULL(d))
988                 goto err_node;
989
990         /* Run or stop threaded test */
991         d = debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root,
992                                 info, &dtf_run_fops);
993         if (IS_ERR_OR_NULL(d))
994                 goto err_node;
995
996         return 0;
997
998 err_node:
999         debugfs_remove_recursive(info->root);
1000 err_root:
1001         pr_err("dmatest: Failed to initialize debugfs\n");
1002         return ret;
1003 }
1004
1005 static int __init dmatest_init(void)
1006 {
1007         struct dmatest_info *info = &test_info;
1008         struct dmatest_params *params = &info->params;
1009         int ret;
1010
1011         memset(info, 0, sizeof(*info));
1012
1013         mutex_init(&info->lock);
1014         INIT_LIST_HEAD(&info->channels);
1015
1016         /* Set default parameters */
1017         params->buf_size = test_buf_size;
1018         strlcpy(params->channel, test_channel, sizeof(params->channel));
1019         strlcpy(params->device, test_device, sizeof(params->device));
1020         params->threads_per_chan = threads_per_chan;
1021         params->max_channels = max_channels;
1022         params->iterations = iterations;
1023         params->xor_sources = xor_sources;
1024         params->pq_sources = pq_sources;
1025         params->timeout = timeout;
1026
1027         ret = dmatest_register_dbgfs(info);
1028         if (ret)
1029                 return ret;
1030
1031 #ifdef MODULE
1032         return 0;
1033 #else
1034         return run_threaded_test(info);
1035 #endif
1036 }
1037 /* when compiled-in wait for drivers to load first */
1038 late_initcall(dmatest_init);
1039
1040 static void __exit dmatest_exit(void)
1041 {
1042         struct dmatest_info *info = &test_info;
1043
1044         debugfs_remove_recursive(info->root);
1045         stop_threaded_test(info);
1046 }
1047 module_exit(dmatest_exit);
1048
1049 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1050 MODULE_LICENSE("GPL v2");