]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/dma/dmatest.c
dmatest: define MAX_ERROR_COUNT constant
[karo-tx-linux.git] / drivers / dma / dmatest.c
1 /*
2  * DMA Engine test module
3  *
4  * Copyright (C) 2007 Atmel Corporation
5  * Copyright (C) 2013 Intel Corporation
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/freezer.h>
15 #include <linux/init.h>
16 #include <linux/kthread.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <linux/wait.h>
22 #include <linux/ctype.h>
23 #include <linux/debugfs.h>
24 #include <linux/uaccess.h>
25 #include <linux/seq_file.h>
26
27 static unsigned int test_buf_size = 16384;
28 module_param(test_buf_size, uint, S_IRUGO);
29 MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
30
31 static char test_channel[20];
32 module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
33 MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
34
35 static char test_device[20];
36 module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
37 MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
38
39 static unsigned int threads_per_chan = 1;
40 module_param(threads_per_chan, uint, S_IRUGO);
41 MODULE_PARM_DESC(threads_per_chan,
42                 "Number of threads to start per channel (default: 1)");
43
44 static unsigned int max_channels;
45 module_param(max_channels, uint, S_IRUGO);
46 MODULE_PARM_DESC(max_channels,
47                 "Maximum number of channels to use (default: all)");
48
49 static unsigned int iterations;
50 module_param(iterations, uint, S_IRUGO);
51 MODULE_PARM_DESC(iterations,
52                 "Iterations before stopping test (default: infinite)");
53
54 static unsigned int xor_sources = 3;
55 module_param(xor_sources, uint, S_IRUGO);
56 MODULE_PARM_DESC(xor_sources,
57                 "Number of xor source buffers (default: 3)");
58
59 static unsigned int pq_sources = 3;
60 module_param(pq_sources, uint, S_IRUGO);
61 MODULE_PARM_DESC(pq_sources,
62                 "Number of p+q source buffers (default: 3)");
63
64 static int timeout = 3000;
65 module_param(timeout, uint, S_IRUGO);
66 MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
67                  "Pass -1 for infinite timeout");
68
69 /* Maximum amount of mismatched bytes in buffer to print */
70 #define MAX_ERROR_COUNT         32
71
72 /*
73  * Initialization patterns. All bytes in the source buffer has bit 7
74  * set, all bytes in the destination buffer has bit 7 cleared.
75  *
76  * Bit 6 is set for all bytes which are to be copied by the DMA
77  * engine. Bit 5 is set for all bytes which are to be overwritten by
78  * the DMA engine.
79  *
80  * The remaining bits are the inverse of a counter which increments by
81  * one for each byte address.
82  */
83 #define PATTERN_SRC             0x80
84 #define PATTERN_DST             0x00
85 #define PATTERN_COPY            0x40
86 #define PATTERN_OVERWRITE       0x20
87 #define PATTERN_COUNT_MASK      0x1f
88
89 struct dmatest_info;
90
91 struct dmatest_thread {
92         struct list_head        node;
93         struct dmatest_info     *info;
94         struct task_struct      *task;
95         struct dma_chan         *chan;
96         u8                      **srcs;
97         u8                      **dsts;
98         enum dma_transaction_type type;
99         bool                    done;
100 };
101
102 struct dmatest_chan {
103         struct list_head        node;
104         struct dma_chan         *chan;
105         struct list_head        threads;
106 };
107
108 /**
109  * struct dmatest_params - test parameters.
110  * @buf_size:           size of the memcpy test buffer
111  * @channel:            bus ID of the channel to test
112  * @device:             bus ID of the DMA Engine to test
113  * @threads_per_chan:   number of threads to start per channel
114  * @max_channels:       maximum number of channels to use
115  * @iterations:         iterations before stopping test
116  * @xor_sources:        number of xor source buffers
117  * @pq_sources:         number of p+q source buffers
118  * @timeout:            transfer timeout in msec, -1 for infinite timeout
119  */
120 struct dmatest_params {
121         unsigned int    buf_size;
122         char            channel[20];
123         char            device[20];
124         unsigned int    threads_per_chan;
125         unsigned int    max_channels;
126         unsigned int    iterations;
127         unsigned int    xor_sources;
128         unsigned int    pq_sources;
129         int             timeout;
130 };
131
132 /**
133  * struct dmatest_info - test information.
134  * @params:             test parameters
135  * @lock:               access protection to the fields of this structure
136  */
137 struct dmatest_info {
138         /* Test parameters */
139         struct dmatest_params   params;
140
141         /* Internal state */
142         struct list_head        channels;
143         unsigned int            nr_channels;
144         struct mutex            lock;
145
146         /* debugfs related stuff */
147         struct dentry           *root;
148         struct dmatest_params   dbgfs_params;
149 };
150
151 static struct dmatest_info test_info;
152
153 static bool dmatest_match_channel(struct dmatest_params *params,
154                 struct dma_chan *chan)
155 {
156         if (params->channel[0] == '\0')
157                 return true;
158         return strcmp(dma_chan_name(chan), params->channel) == 0;
159 }
160
161 static bool dmatest_match_device(struct dmatest_params *params,
162                 struct dma_device *device)
163 {
164         if (params->device[0] == '\0')
165                 return true;
166         return strcmp(dev_name(device->dev), params->device) == 0;
167 }
168
169 static unsigned long dmatest_random(void)
170 {
171         unsigned long buf;
172
173         get_random_bytes(&buf, sizeof(buf));
174         return buf;
175 }
176
177 static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
178                 unsigned int buf_size)
179 {
180         unsigned int i;
181         u8 *buf;
182
183         for (; (buf = *bufs); bufs++) {
184                 for (i = 0; i < start; i++)
185                         buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
186                 for ( ; i < start + len; i++)
187                         buf[i] = PATTERN_SRC | PATTERN_COPY
188                                 | (~i & PATTERN_COUNT_MASK);
189                 for ( ; i < buf_size; i++)
190                         buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
191                 buf++;
192         }
193 }
194
195 static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
196                 unsigned int buf_size)
197 {
198         unsigned int i;
199         u8 *buf;
200
201         for (; (buf = *bufs); bufs++) {
202                 for (i = 0; i < start; i++)
203                         buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
204                 for ( ; i < start + len; i++)
205                         buf[i] = PATTERN_DST | PATTERN_OVERWRITE
206                                 | (~i & PATTERN_COUNT_MASK);
207                 for ( ; i < buf_size; i++)
208                         buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
209         }
210 }
211
212 static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
213                 unsigned int counter, bool is_srcbuf)
214 {
215         u8              diff = actual ^ pattern;
216         u8              expected = pattern | (~counter & PATTERN_COUNT_MASK);
217         const char      *thread_name = current->comm;
218
219         if (is_srcbuf)
220                 pr_warning("%s: srcbuf[0x%x] overwritten!"
221                                 " Expected %02x, got %02x\n",
222                                 thread_name, index, expected, actual);
223         else if ((pattern & PATTERN_COPY)
224                         && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
225                 pr_warning("%s: dstbuf[0x%x] not copied!"
226                                 " Expected %02x, got %02x\n",
227                                 thread_name, index, expected, actual);
228         else if (diff & PATTERN_SRC)
229                 pr_warning("%s: dstbuf[0x%x] was copied!"
230                                 " Expected %02x, got %02x\n",
231                                 thread_name, index, expected, actual);
232         else
233                 pr_warning("%s: dstbuf[0x%x] mismatch!"
234                                 " Expected %02x, got %02x\n",
235                                 thread_name, index, expected, actual);
236 }
237
238 static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
239                 unsigned int end, unsigned int counter, u8 pattern,
240                 bool is_srcbuf)
241 {
242         unsigned int i;
243         unsigned int error_count = 0;
244         u8 actual;
245         u8 expected;
246         u8 *buf;
247         unsigned int counter_orig = counter;
248
249         for (; (buf = *bufs); bufs++) {
250                 counter = counter_orig;
251                 for (i = start; i < end; i++) {
252                         actual = buf[i];
253                         expected = pattern | (~counter & PATTERN_COUNT_MASK);
254                         if (actual != expected) {
255                                 if (error_count < MAX_ERROR_COUNT)
256                                         dmatest_mismatch(actual, pattern, i,
257                                                          counter, is_srcbuf);
258                                 error_count++;
259                         }
260                         counter++;
261                 }
262         }
263
264         if (error_count > MAX_ERROR_COUNT)
265                 pr_warning("%s: %u errors suppressed\n",
266                         current->comm, error_count - MAX_ERROR_COUNT);
267
268         return error_count;
269 }
270
271 /* poor man's completion - we want to use wait_event_freezable() on it */
272 struct dmatest_done {
273         bool                    done;
274         wait_queue_head_t       *wait;
275 };
276
277 static void dmatest_callback(void *arg)
278 {
279         struct dmatest_done *done = arg;
280
281         done->done = true;
282         wake_up_all(done->wait);
283 }
284
285 static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
286                              unsigned int count)
287 {
288         while (count--)
289                 dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
290 }
291
292 static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
293                              unsigned int count)
294 {
295         while (count--)
296                 dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
297 }
298
299 static unsigned int min_odd(unsigned int x, unsigned int y)
300 {
301         unsigned int val = min(x, y);
302
303         return val % 2 ? val : val - 1;
304 }
305
306 /*
307  * This function repeatedly tests DMA transfers of various lengths and
308  * offsets for a given operation type until it is told to exit by
309  * kthread_stop(). There may be multiple threads running this function
310  * in parallel for a single channel, and there may be multiple channels
311  * being tested in parallel.
312  *
313  * Before each test, the source and destination buffer is initialized
314  * with a known pattern. This pattern is different depending on
315  * whether it's in an area which is supposed to be copied or
316  * overwritten, and different in the source and destination buffers.
317  * So if the DMA engine doesn't copy exactly what we tell it to copy,
318  * we'll notice.
319  */
320 static int dmatest_func(void *data)
321 {
322         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
323         struct dmatest_thread   *thread = data;
324         struct dmatest_done     done = { .wait = &done_wait };
325         struct dmatest_info     *info;
326         struct dmatest_params   *params;
327         struct dma_chan         *chan;
328         struct dma_device       *dev;
329         const char              *thread_name;
330         unsigned int            src_off, dst_off, len;
331         unsigned int            error_count;
332         unsigned int            failed_tests = 0;
333         unsigned int            total_tests = 0;
334         dma_cookie_t            cookie;
335         enum dma_status         status;
336         enum dma_ctrl_flags     flags;
337         u8                      *pq_coefs = NULL;
338         int                     ret;
339         int                     src_cnt;
340         int                     dst_cnt;
341         int                     i;
342
343         thread_name = current->comm;
344         set_freezable();
345
346         ret = -ENOMEM;
347
348         smp_rmb();
349         info = thread->info;
350         params = &info->params;
351         chan = thread->chan;
352         dev = chan->device;
353         if (thread->type == DMA_MEMCPY)
354                 src_cnt = dst_cnt = 1;
355         else if (thread->type == DMA_XOR) {
356                 /* force odd to ensure dst = src */
357                 src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
358                 dst_cnt = 1;
359         } else if (thread->type == DMA_PQ) {
360                 /* force odd to ensure dst = src */
361                 src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
362                 dst_cnt = 2;
363
364                 pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
365                 if (!pq_coefs)
366                         goto err_thread_type;
367
368                 for (i = 0; i < src_cnt; i++)
369                         pq_coefs[i] = 1;
370         } else
371                 goto err_thread_type;
372
373         thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
374         if (!thread->srcs)
375                 goto err_srcs;
376         for (i = 0; i < src_cnt; i++) {
377                 thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
378                 if (!thread->srcs[i])
379                         goto err_srcbuf;
380         }
381         thread->srcs[i] = NULL;
382
383         thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
384         if (!thread->dsts)
385                 goto err_dsts;
386         for (i = 0; i < dst_cnt; i++) {
387                 thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
388                 if (!thread->dsts[i])
389                         goto err_dstbuf;
390         }
391         thread->dsts[i] = NULL;
392
393         set_user_nice(current, 10);
394
395         /*
396          * src buffers are freed by the DMAEngine code with dma_unmap_single()
397          * dst buffers are freed by ourselves below
398          */
399         flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT
400               | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
401
402         while (!kthread_should_stop()
403                && !(params->iterations && total_tests >= params->iterations)) {
404                 struct dma_async_tx_descriptor *tx = NULL;
405                 dma_addr_t dma_srcs[src_cnt];
406                 dma_addr_t dma_dsts[dst_cnt];
407                 u8 align = 0;
408
409                 total_tests++;
410
411                 /* honor alignment restrictions */
412                 if (thread->type == DMA_MEMCPY)
413                         align = dev->copy_align;
414                 else if (thread->type == DMA_XOR)
415                         align = dev->xor_align;
416                 else if (thread->type == DMA_PQ)
417                         align = dev->pq_align;
418
419                 if (1 << align > params->buf_size) {
420                         pr_err("%u-byte buffer too small for %d-byte alignment\n",
421                                params->buf_size, 1 << align);
422                         break;
423                 }
424
425                 len = dmatest_random() % params->buf_size + 1;
426                 len = (len >> align) << align;
427                 if (!len)
428                         len = 1 << align;
429                 src_off = dmatest_random() % (params->buf_size - len + 1);
430                 dst_off = dmatest_random() % (params->buf_size - len + 1);
431
432                 src_off = (src_off >> align) << align;
433                 dst_off = (dst_off >> align) << align;
434
435                 dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size);
436                 dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size);
437
438                 for (i = 0; i < src_cnt; i++) {
439                         u8 *buf = thread->srcs[i] + src_off;
440
441                         dma_srcs[i] = dma_map_single(dev->dev, buf, len,
442                                                      DMA_TO_DEVICE);
443                         ret = dma_mapping_error(dev->dev, dma_srcs[i]);
444                         if (ret) {
445                                 unmap_src(dev->dev, dma_srcs, len, i);
446                                 pr_warn("%s: #%u: mapping error %d with "
447                                         "src_off=0x%x len=0x%x\n",
448                                         thread_name, total_tests - 1, ret,
449                                         src_off, len);
450                                 failed_tests++;
451                                 continue;
452                         }
453                 }
454                 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
455                 for (i = 0; i < dst_cnt; i++) {
456                         dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
457                                                      params->buf_size,
458                                                      DMA_BIDIRECTIONAL);
459                         ret = dma_mapping_error(dev->dev, dma_dsts[i]);
460                         if (ret) {
461                                 unmap_src(dev->dev, dma_srcs, len, src_cnt);
462                                 unmap_dst(dev->dev, dma_dsts, params->buf_size,
463                                           i);
464                                 pr_warn("%s: #%u: mapping error %d with "
465                                         "dst_off=0x%x len=0x%x\n",
466                                         thread_name, total_tests - 1, ret,
467                                         dst_off, params->buf_size);
468                                 failed_tests++;
469                                 continue;
470                         }
471                 }
472
473                 if (thread->type == DMA_MEMCPY)
474                         tx = dev->device_prep_dma_memcpy(chan,
475                                                          dma_dsts[0] + dst_off,
476                                                          dma_srcs[0], len,
477                                                          flags);
478                 else if (thread->type == DMA_XOR)
479                         tx = dev->device_prep_dma_xor(chan,
480                                                       dma_dsts[0] + dst_off,
481                                                       dma_srcs, src_cnt,
482                                                       len, flags);
483                 else if (thread->type == DMA_PQ) {
484                         dma_addr_t dma_pq[dst_cnt];
485
486                         for (i = 0; i < dst_cnt; i++)
487                                 dma_pq[i] = dma_dsts[i] + dst_off;
488                         tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
489                                                      src_cnt, pq_coefs,
490                                                      len, flags);
491                 }
492
493                 if (!tx) {
494                         unmap_src(dev->dev, dma_srcs, len, src_cnt);
495                         unmap_dst(dev->dev, dma_dsts, params->buf_size,
496                                   dst_cnt);
497                         pr_warning("%s: #%u: prep error with src_off=0x%x "
498                                         "dst_off=0x%x len=0x%x\n",
499                                         thread_name, total_tests - 1,
500                                         src_off, dst_off, len);
501                         msleep(100);
502                         failed_tests++;
503                         continue;
504                 }
505
506                 done.done = false;
507                 tx->callback = dmatest_callback;
508                 tx->callback_param = &done;
509                 cookie = tx->tx_submit(tx);
510
511                 if (dma_submit_error(cookie)) {
512                         pr_warning("%s: #%u: submit error %d with src_off=0x%x "
513                                         "dst_off=0x%x len=0x%x\n",
514                                         thread_name, total_tests - 1, cookie,
515                                         src_off, dst_off, len);
516                         msleep(100);
517                         failed_tests++;
518                         continue;
519                 }
520                 dma_async_issue_pending(chan);
521
522                 wait_event_freezable_timeout(done_wait,
523                                              done.done || kthread_should_stop(),
524                                              msecs_to_jiffies(params->timeout));
525
526                 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
527
528                 if (!done.done) {
529                         /*
530                          * We're leaving the timed out dma operation with
531                          * dangling pointer to done_wait.  To make this
532                          * correct, we'll need to allocate wait_done for
533                          * each test iteration and perform "who's gonna
534                          * free it this time?" dancing.  For now, just
535                          * leave it dangling.
536                          */
537                         pr_warning("%s: #%u: test timed out\n",
538                                    thread_name, total_tests - 1);
539                         failed_tests++;
540                         continue;
541                 } else if (status != DMA_SUCCESS) {
542                         pr_warning("%s: #%u: got completion callback,"
543                                    " but status is \'%s\'\n",
544                                    thread_name, total_tests - 1,
545                                    status == DMA_ERROR ? "error" : "in progress");
546                         failed_tests++;
547                         continue;
548                 }
549
550                 /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
551                 unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
552
553                 error_count = 0;
554
555                 pr_debug("%s: verifying source buffer...\n", thread_name);
556                 error_count += dmatest_verify(thread->srcs, 0, src_off,
557                                 0, PATTERN_SRC, true);
558                 error_count += dmatest_verify(thread->srcs, src_off,
559                                 src_off + len, src_off,
560                                 PATTERN_SRC | PATTERN_COPY, true);
561                 error_count += dmatest_verify(thread->srcs, src_off + len,
562                                 params->buf_size, src_off + len,
563                                 PATTERN_SRC, true);
564
565                 pr_debug("%s: verifying dest buffer...\n",
566                                 thread->task->comm);
567                 error_count += dmatest_verify(thread->dsts, 0, dst_off,
568                                 0, PATTERN_DST, false);
569                 error_count += dmatest_verify(thread->dsts, dst_off,
570                                 dst_off + len, src_off,
571                                 PATTERN_SRC | PATTERN_COPY, false);
572                 error_count += dmatest_verify(thread->dsts, dst_off + len,
573                                 params->buf_size, dst_off + len,
574                                 PATTERN_DST, false);
575
576                 if (error_count) {
577                         pr_warning("%s: #%u: %u errors with "
578                                 "src_off=0x%x dst_off=0x%x len=0x%x\n",
579                                 thread_name, total_tests - 1, error_count,
580                                 src_off, dst_off, len);
581                         failed_tests++;
582                 } else {
583                         pr_debug("%s: #%u: No errors with "
584                                 "src_off=0x%x dst_off=0x%x len=0x%x\n",
585                                 thread_name, total_tests - 1,
586                                 src_off, dst_off, len);
587                 }
588         }
589
590         ret = 0;
591         for (i = 0; thread->dsts[i]; i++)
592                 kfree(thread->dsts[i]);
593 err_dstbuf:
594         kfree(thread->dsts);
595 err_dsts:
596         for (i = 0; thread->srcs[i]; i++)
597                 kfree(thread->srcs[i]);
598 err_srcbuf:
599         kfree(thread->srcs);
600 err_srcs:
601         kfree(pq_coefs);
602 err_thread_type:
603         pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
604                         thread_name, total_tests, failed_tests, ret);
605
606         /* terminate all transfers on specified channels */
607         if (ret)
608                 dmaengine_terminate_all(chan);
609
610         thread->done = true;
611
612         if (params->iterations > 0)
613                 while (!kthread_should_stop()) {
614                         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
615                         interruptible_sleep_on(&wait_dmatest_exit);
616                 }
617
618         return ret;
619 }
620
621 static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
622 {
623         struct dmatest_thread   *thread;
624         struct dmatest_thread   *_thread;
625         int                     ret;
626
627         list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
628                 ret = kthread_stop(thread->task);
629                 pr_debug("dmatest: thread %s exited with status %d\n",
630                                 thread->task->comm, ret);
631                 list_del(&thread->node);
632                 kfree(thread);
633         }
634
635         /* terminate all transfers on specified channels */
636         dmaengine_terminate_all(dtc->chan);
637
638         kfree(dtc);
639 }
640
641 static int dmatest_add_threads(struct dmatest_info *info,
642                 struct dmatest_chan *dtc, enum dma_transaction_type type)
643 {
644         struct dmatest_params *params = &info->params;
645         struct dmatest_thread *thread;
646         struct dma_chan *chan = dtc->chan;
647         char *op;
648         unsigned int i;
649
650         if (type == DMA_MEMCPY)
651                 op = "copy";
652         else if (type == DMA_XOR)
653                 op = "xor";
654         else if (type == DMA_PQ)
655                 op = "pq";
656         else
657                 return -EINVAL;
658
659         for (i = 0; i < params->threads_per_chan; i++) {
660                 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
661                 if (!thread) {
662                         pr_warning("dmatest: No memory for %s-%s%u\n",
663                                    dma_chan_name(chan), op, i);
664
665                         break;
666                 }
667                 thread->info = info;
668                 thread->chan = dtc->chan;
669                 thread->type = type;
670                 smp_wmb();
671                 thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
672                                 dma_chan_name(chan), op, i);
673                 if (IS_ERR(thread->task)) {
674                         pr_warning("dmatest: Failed to run thread %s-%s%u\n",
675                                         dma_chan_name(chan), op, i);
676                         kfree(thread);
677                         break;
678                 }
679
680                 /* srcbuf and dstbuf are allocated by the thread itself */
681
682                 list_add_tail(&thread->node, &dtc->threads);
683         }
684
685         return i;
686 }
687
688 static int dmatest_add_channel(struct dmatest_info *info,
689                 struct dma_chan *chan)
690 {
691         struct dmatest_chan     *dtc;
692         struct dma_device       *dma_dev = chan->device;
693         unsigned int            thread_count = 0;
694         int cnt;
695
696         dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
697         if (!dtc) {
698                 pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
699                 return -ENOMEM;
700         }
701
702         dtc->chan = chan;
703         INIT_LIST_HEAD(&dtc->threads);
704
705         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
706                 cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
707                 thread_count += cnt > 0 ? cnt : 0;
708         }
709         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
710                 cnt = dmatest_add_threads(info, dtc, DMA_XOR);
711                 thread_count += cnt > 0 ? cnt : 0;
712         }
713         if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
714                 cnt = dmatest_add_threads(info, dtc, DMA_PQ);
715                 thread_count += cnt > 0 ? cnt : 0;
716         }
717
718         pr_info("dmatest: Started %u threads using %s\n",
719                 thread_count, dma_chan_name(chan));
720
721         list_add_tail(&dtc->node, &info->channels);
722         info->nr_channels++;
723
724         return 0;
725 }
726
727 static bool filter(struct dma_chan *chan, void *param)
728 {
729         struct dmatest_params *params = param;
730
731         if (!dmatest_match_channel(params, chan) ||
732             !dmatest_match_device(params, chan->device))
733                 return false;
734         else
735                 return true;
736 }
737
738 static int __run_threaded_test(struct dmatest_info *info)
739 {
740         dma_cap_mask_t mask;
741         struct dma_chan *chan;
742         struct dmatest_params *params = &info->params;
743         int err = 0;
744
745         dma_cap_zero(mask);
746         dma_cap_set(DMA_MEMCPY, mask);
747         for (;;) {
748                 chan = dma_request_channel(mask, filter, params);
749                 if (chan) {
750                         err = dmatest_add_channel(info, chan);
751                         if (err) {
752                                 dma_release_channel(chan);
753                                 break; /* add_channel failed, punt */
754                         }
755                 } else
756                         break; /* no more channels available */
757                 if (params->max_channels &&
758                     info->nr_channels >= params->max_channels)
759                         break; /* we have all we need */
760         }
761         return err;
762 }
763
764 #ifndef MODULE
765 static int run_threaded_test(struct dmatest_info *info)
766 {
767         int ret;
768
769         mutex_lock(&info->lock);
770         ret = __run_threaded_test(info);
771         mutex_unlock(&info->lock);
772         return ret;
773 }
774 #endif
775
776 static void __stop_threaded_test(struct dmatest_info *info)
777 {
778         struct dmatest_chan *dtc, *_dtc;
779         struct dma_chan *chan;
780
781         list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
782                 list_del(&dtc->node);
783                 chan = dtc->chan;
784                 dmatest_cleanup_channel(dtc);
785                 pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan));
786                 dma_release_channel(chan);
787         }
788
789         info->nr_channels = 0;
790 }
791
792 static void stop_threaded_test(struct dmatest_info *info)
793 {
794         mutex_lock(&info->lock);
795         __stop_threaded_test(info);
796         mutex_unlock(&info->lock);
797 }
798
799 static int __restart_threaded_test(struct dmatest_info *info, bool run)
800 {
801         struct dmatest_params *params = &info->params;
802         int ret;
803
804         /* Stop any running test first */
805         __stop_threaded_test(info);
806
807         if (run == false)
808                 return 0;
809
810         /* Copy test parameters */
811         memcpy(params, &info->dbgfs_params, sizeof(*params));
812
813         /* Run test with new parameters */
814         ret = __run_threaded_test(info);
815         if (ret) {
816                 __stop_threaded_test(info);
817                 pr_err("dmatest: Can't run test\n");
818         }
819
820         return ret;
821 }
822
823 static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos,
824                 const void __user *from, size_t count)
825 {
826         char tmp[20];
827         ssize_t len;
828
829         len = simple_write_to_buffer(tmp, sizeof(tmp) - 1, ppos, from, count);
830         if (len >= 0) {
831                 tmp[len] = '\0';
832                 strlcpy(to, strim(tmp), available);
833         }
834
835         return len;
836 }
837
838 static ssize_t dtf_read_channel(struct file *file, char __user *buf,
839                 size_t count, loff_t *ppos)
840 {
841         struct dmatest_info *info = file->private_data;
842         return simple_read_from_buffer(buf, count, ppos,
843                         info->dbgfs_params.channel,
844                         strlen(info->dbgfs_params.channel));
845 }
846
847 static ssize_t dtf_write_channel(struct file *file, const char __user *buf,
848                 size_t size, loff_t *ppos)
849 {
850         struct dmatest_info *info = file->private_data;
851         return dtf_write_string(info->dbgfs_params.channel,
852                                 sizeof(info->dbgfs_params.channel),
853                                 ppos, buf, size);
854 }
855
856 static const struct file_operations dtf_channel_fops = {
857         .read   = dtf_read_channel,
858         .write  = dtf_write_channel,
859         .open   = simple_open,
860         .llseek = default_llseek,
861 };
862
863 static ssize_t dtf_read_device(struct file *file, char __user *buf,
864                 size_t count, loff_t *ppos)
865 {
866         struct dmatest_info *info = file->private_data;
867         return simple_read_from_buffer(buf, count, ppos,
868                         info->dbgfs_params.device,
869                         strlen(info->dbgfs_params.device));
870 }
871
872 static ssize_t dtf_write_device(struct file *file, const char __user *buf,
873                 size_t size, loff_t *ppos)
874 {
875         struct dmatest_info *info = file->private_data;
876         return dtf_write_string(info->dbgfs_params.device,
877                                 sizeof(info->dbgfs_params.device),
878                                 ppos, buf, size);
879 }
880
881 static const struct file_operations dtf_device_fops = {
882         .read   = dtf_read_device,
883         .write  = dtf_write_device,
884         .open   = simple_open,
885         .llseek = default_llseek,
886 };
887
888 static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
889                 size_t count, loff_t *ppos)
890 {
891         struct dmatest_info *info = file->private_data;
892         char buf[3];
893         struct dmatest_chan *dtc;
894         bool alive = false;
895
896         mutex_lock(&info->lock);
897         list_for_each_entry(dtc, &info->channels, node) {
898                 struct dmatest_thread *thread;
899
900                 list_for_each_entry(thread, &dtc->threads, node) {
901                         if (!thread->done) {
902                                 alive = true;
903                                 break;
904                         }
905                 }
906         }
907
908         if (alive) {
909                 buf[0] = 'Y';
910         } else {
911                 __stop_threaded_test(info);
912                 buf[0] = 'N';
913         }
914
915         mutex_unlock(&info->lock);
916         buf[1] = '\n';
917         buf[2] = 0x00;
918         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
919 }
920
921 static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
922                 size_t count, loff_t *ppos)
923 {
924         struct dmatest_info *info = file->private_data;
925         char buf[16];
926         bool bv;
927         int ret = 0;
928
929         if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1))))
930                 return -EFAULT;
931
932         if (strtobool(buf, &bv) == 0) {
933                 mutex_lock(&info->lock);
934                 ret = __restart_threaded_test(info, bv);
935                 mutex_unlock(&info->lock);
936         }
937
938         return ret ? ret : count;
939 }
940
941 static const struct file_operations dtf_run_fops = {
942         .read   = dtf_read_run,
943         .write  = dtf_write_run,
944         .open   = simple_open,
945         .llseek = default_llseek,
946 };
947
948 static int dmatest_register_dbgfs(struct dmatest_info *info)
949 {
950         struct dentry *d;
951         struct dmatest_params *params = &info->dbgfs_params;
952         int ret = -ENOMEM;
953
954         d = debugfs_create_dir("dmatest", NULL);
955         if (IS_ERR(d))
956                 return PTR_ERR(d);
957         if (!d)
958                 goto err_root;
959
960         info->root = d;
961
962         /* Copy initial values */
963         memcpy(params, &info->params, sizeof(*params));
964
965         /* Test parameters */
966
967         d = debugfs_create_u32("test_buf_size", S_IWUSR | S_IRUGO, info->root,
968                                (u32 *)&params->buf_size);
969         if (IS_ERR_OR_NULL(d))
970                 goto err_node;
971
972         d = debugfs_create_file("channel", S_IRUGO | S_IWUSR, info->root,
973                                 info, &dtf_channel_fops);
974         if (IS_ERR_OR_NULL(d))
975                 goto err_node;
976
977         d = debugfs_create_file("device", S_IRUGO | S_IWUSR, info->root,
978                                 info, &dtf_device_fops);
979         if (IS_ERR_OR_NULL(d))
980                 goto err_node;
981
982         d = debugfs_create_u32("threads_per_chan", S_IWUSR | S_IRUGO, info->root,
983                                (u32 *)&params->threads_per_chan);
984         if (IS_ERR_OR_NULL(d))
985                 goto err_node;
986
987         d = debugfs_create_u32("max_channels", S_IWUSR | S_IRUGO, info->root,
988                                (u32 *)&params->max_channels);
989         if (IS_ERR_OR_NULL(d))
990                 goto err_node;
991
992         d = debugfs_create_u32("iterations", S_IWUSR | S_IRUGO, info->root,
993                                (u32 *)&params->iterations);
994         if (IS_ERR_OR_NULL(d))
995                 goto err_node;
996
997         d = debugfs_create_u32("xor_sources", S_IWUSR | S_IRUGO, info->root,
998                                (u32 *)&params->xor_sources);
999         if (IS_ERR_OR_NULL(d))
1000                 goto err_node;
1001
1002         d = debugfs_create_u32("pq_sources", S_IWUSR | S_IRUGO, info->root,
1003                                (u32 *)&params->pq_sources);
1004         if (IS_ERR_OR_NULL(d))
1005                 goto err_node;
1006
1007         d = debugfs_create_u32("timeout", S_IWUSR | S_IRUGO, info->root,
1008                                (u32 *)&params->timeout);
1009         if (IS_ERR_OR_NULL(d))
1010                 goto err_node;
1011
1012         /* Run or stop threaded test */
1013         d = debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root,
1014                                 info, &dtf_run_fops);
1015         if (IS_ERR_OR_NULL(d))
1016                 goto err_node;
1017
1018         return 0;
1019
1020 err_node:
1021         debugfs_remove_recursive(info->root);
1022 err_root:
1023         pr_err("dmatest: Failed to initialize debugfs\n");
1024         return ret;
1025 }
1026
1027 static int __init dmatest_init(void)
1028 {
1029         struct dmatest_info *info = &test_info;
1030         struct dmatest_params *params = &info->params;
1031         int ret;
1032
1033         memset(info, 0, sizeof(*info));
1034
1035         mutex_init(&info->lock);
1036         INIT_LIST_HEAD(&info->channels);
1037
1038         /* Set default parameters */
1039         params->buf_size = test_buf_size;
1040         strlcpy(params->channel, test_channel, sizeof(params->channel));
1041         strlcpy(params->device, test_device, sizeof(params->device));
1042         params->threads_per_chan = threads_per_chan;
1043         params->max_channels = max_channels;
1044         params->iterations = iterations;
1045         params->xor_sources = xor_sources;
1046         params->pq_sources = pq_sources;
1047         params->timeout = timeout;
1048
1049         ret = dmatest_register_dbgfs(info);
1050         if (ret)
1051                 return ret;
1052
1053 #ifdef MODULE
1054         return 0;
1055 #else
1056         return run_threaded_test(info);
1057 #endif
1058 }
1059 /* when compiled-in wait for drivers to load first */
1060 late_initcall(dmatest_init);
1061
1062 static void __exit dmatest_exit(void)
1063 {
1064         struct dmatest_info *info = &test_info;
1065
1066         debugfs_remove_recursive(info->root);
1067         stop_threaded_test(info);
1068 }
1069 module_exit(dmatest_exit);
1070
1071 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1072 MODULE_LICENSE("GPL v2");