]> git.karo-electronics.de Git - karo-tx-linux.git/blob - kernel/locking/test-ww_mutex.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial
[karo-tx-linux.git] / kernel / locking / test-ww_mutex.c
1 /*
2  * Module-based API test facility for ww_mutexes
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, you can access it online at
16  * http://www.gnu.org/licenses/gpl-2.0.html.
17  */
18
19 #include <linux/kernel.h>
20
21 #include <linux/completion.h>
22 #include <linux/delay.h>
23 #include <linux/kthread.h>
24 #include <linux/module.h>
25 #include <linux/random.h>
26 #include <linux/slab.h>
27 #include <linux/ww_mutex.h>
28
29 static DEFINE_WW_CLASS(ww_class);
30 struct workqueue_struct *wq;
31
32 struct test_mutex {
33         struct work_struct work;
34         struct ww_mutex mutex;
35         struct completion ready, go, done;
36         unsigned int flags;
37 };
38
39 #define TEST_MTX_SPIN BIT(0)
40 #define TEST_MTX_TRY BIT(1)
41 #define TEST_MTX_CTX BIT(2)
42 #define __TEST_MTX_LAST BIT(3)
43
44 static void test_mutex_work(struct work_struct *work)
45 {
46         struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
47
48         complete(&mtx->ready);
49         wait_for_completion(&mtx->go);
50
51         if (mtx->flags & TEST_MTX_TRY) {
52                 while (!ww_mutex_trylock(&mtx->mutex))
53                         cond_resched();
54         } else {
55                 ww_mutex_lock(&mtx->mutex, NULL);
56         }
57         complete(&mtx->done);
58         ww_mutex_unlock(&mtx->mutex);
59 }
60
61 static int __test_mutex(unsigned int flags)
62 {
63 #define TIMEOUT (HZ / 16)
64         struct test_mutex mtx;
65         struct ww_acquire_ctx ctx;
66         int ret;
67
68         ww_mutex_init(&mtx.mutex, &ww_class);
69         ww_acquire_init(&ctx, &ww_class);
70
71         INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
72         init_completion(&mtx.ready);
73         init_completion(&mtx.go);
74         init_completion(&mtx.done);
75         mtx.flags = flags;
76
77         schedule_work(&mtx.work);
78
79         wait_for_completion(&mtx.ready);
80         ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
81         complete(&mtx.go);
82         if (flags & TEST_MTX_SPIN) {
83                 unsigned long timeout = jiffies + TIMEOUT;
84
85                 ret = 0;
86                 do {
87                         if (completion_done(&mtx.done)) {
88                                 ret = -EINVAL;
89                                 break;
90                         }
91                         cond_resched();
92                 } while (time_before(jiffies, timeout));
93         } else {
94                 ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
95         }
96         ww_mutex_unlock(&mtx.mutex);
97         ww_acquire_fini(&ctx);
98
99         if (ret) {
100                 pr_err("%s(flags=%x): mutual exclusion failure\n",
101                        __func__, flags);
102                 ret = -EINVAL;
103         }
104
105         flush_work(&mtx.work);
106         destroy_work_on_stack(&mtx.work);
107         return ret;
108 #undef TIMEOUT
109 }
110
111 static int test_mutex(void)
112 {
113         int ret;
114         int i;
115
116         for (i = 0; i < __TEST_MTX_LAST; i++) {
117                 ret = __test_mutex(i);
118                 if (ret)
119                         return ret;
120         }
121
122         return 0;
123 }
124
125 static int test_aa(void)
126 {
127         struct ww_mutex mutex;
128         struct ww_acquire_ctx ctx;
129         int ret;
130
131         ww_mutex_init(&mutex, &ww_class);
132         ww_acquire_init(&ctx, &ww_class);
133
134         ww_mutex_lock(&mutex, &ctx);
135
136         if (ww_mutex_trylock(&mutex))  {
137                 pr_err("%s: trylocked itself!\n", __func__);
138                 ww_mutex_unlock(&mutex);
139                 ret = -EINVAL;
140                 goto out;
141         }
142
143         ret = ww_mutex_lock(&mutex, &ctx);
144         if (ret != -EALREADY) {
145                 pr_err("%s: missed deadlock for recursing, ret=%d\n",
146                        __func__, ret);
147                 if (!ret)
148                         ww_mutex_unlock(&mutex);
149                 ret = -EINVAL;
150                 goto out;
151         }
152
153         ret = 0;
154 out:
155         ww_mutex_unlock(&mutex);
156         ww_acquire_fini(&ctx);
157         return ret;
158 }
159
160 struct test_abba {
161         struct work_struct work;
162         struct ww_mutex a_mutex;
163         struct ww_mutex b_mutex;
164         struct completion a_ready;
165         struct completion b_ready;
166         bool resolve;
167         int result;
168 };
169
170 static void test_abba_work(struct work_struct *work)
171 {
172         struct test_abba *abba = container_of(work, typeof(*abba), work);
173         struct ww_acquire_ctx ctx;
174         int err;
175
176         ww_acquire_init(&ctx, &ww_class);
177         ww_mutex_lock(&abba->b_mutex, &ctx);
178
179         complete(&abba->b_ready);
180         wait_for_completion(&abba->a_ready);
181
182         err = ww_mutex_lock(&abba->a_mutex, &ctx);
183         if (abba->resolve && err == -EDEADLK) {
184                 ww_mutex_unlock(&abba->b_mutex);
185                 ww_mutex_lock_slow(&abba->a_mutex, &ctx);
186                 err = ww_mutex_lock(&abba->b_mutex, &ctx);
187         }
188
189         if (!err)
190                 ww_mutex_unlock(&abba->a_mutex);
191         ww_mutex_unlock(&abba->b_mutex);
192         ww_acquire_fini(&ctx);
193
194         abba->result = err;
195 }
196
197 static int test_abba(bool resolve)
198 {
199         struct test_abba abba;
200         struct ww_acquire_ctx ctx;
201         int err, ret;
202
203         ww_mutex_init(&abba.a_mutex, &ww_class);
204         ww_mutex_init(&abba.b_mutex, &ww_class);
205         INIT_WORK_ONSTACK(&abba.work, test_abba_work);
206         init_completion(&abba.a_ready);
207         init_completion(&abba.b_ready);
208         abba.resolve = resolve;
209
210         schedule_work(&abba.work);
211
212         ww_acquire_init(&ctx, &ww_class);
213         ww_mutex_lock(&abba.a_mutex, &ctx);
214
215         complete(&abba.a_ready);
216         wait_for_completion(&abba.b_ready);
217
218         err = ww_mutex_lock(&abba.b_mutex, &ctx);
219         if (resolve && err == -EDEADLK) {
220                 ww_mutex_unlock(&abba.a_mutex);
221                 ww_mutex_lock_slow(&abba.b_mutex, &ctx);
222                 err = ww_mutex_lock(&abba.a_mutex, &ctx);
223         }
224
225         if (!err)
226                 ww_mutex_unlock(&abba.b_mutex);
227         ww_mutex_unlock(&abba.a_mutex);
228         ww_acquire_fini(&ctx);
229
230         flush_work(&abba.work);
231         destroy_work_on_stack(&abba.work);
232
233         ret = 0;
234         if (resolve) {
235                 if (err || abba.result) {
236                         pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
237                                __func__, err, abba.result);
238                         ret = -EINVAL;
239                 }
240         } else {
241                 if (err != -EDEADLK && abba.result != -EDEADLK) {
242                         pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
243                                __func__, err, abba.result);
244                         ret = -EINVAL;
245                 }
246         }
247         return ret;
248 }
249
250 struct test_cycle {
251         struct work_struct work;
252         struct ww_mutex a_mutex;
253         struct ww_mutex *b_mutex;
254         struct completion *a_signal;
255         struct completion b_signal;
256         int result;
257 };
258
259 static void test_cycle_work(struct work_struct *work)
260 {
261         struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
262         struct ww_acquire_ctx ctx;
263         int err;
264
265         ww_acquire_init(&ctx, &ww_class);
266         ww_mutex_lock(&cycle->a_mutex, &ctx);
267
268         complete(cycle->a_signal);
269         wait_for_completion(&cycle->b_signal);
270
271         err = ww_mutex_lock(cycle->b_mutex, &ctx);
272         if (err == -EDEADLK) {
273                 ww_mutex_unlock(&cycle->a_mutex);
274                 ww_mutex_lock_slow(cycle->b_mutex, &ctx);
275                 err = ww_mutex_lock(&cycle->a_mutex, &ctx);
276         }
277
278         if (!err)
279                 ww_mutex_unlock(cycle->b_mutex);
280         ww_mutex_unlock(&cycle->a_mutex);
281         ww_acquire_fini(&ctx);
282
283         cycle->result = err;
284 }
285
286 static int __test_cycle(unsigned int nthreads)
287 {
288         struct test_cycle *cycles;
289         unsigned int n, last = nthreads - 1;
290         int ret;
291
292         cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
293         if (!cycles)
294                 return -ENOMEM;
295
296         for (n = 0; n < nthreads; n++) {
297                 struct test_cycle *cycle = &cycles[n];
298
299                 ww_mutex_init(&cycle->a_mutex, &ww_class);
300                 if (n == last)
301                         cycle->b_mutex = &cycles[0].a_mutex;
302                 else
303                         cycle->b_mutex = &cycles[n + 1].a_mutex;
304
305                 if (n == 0)
306                         cycle->a_signal = &cycles[last].b_signal;
307                 else
308                         cycle->a_signal = &cycles[n - 1].b_signal;
309                 init_completion(&cycle->b_signal);
310
311                 INIT_WORK(&cycle->work, test_cycle_work);
312                 cycle->result = 0;
313         }
314
315         for (n = 0; n < nthreads; n++)
316                 queue_work(wq, &cycles[n].work);
317
318         flush_workqueue(wq);
319
320         ret = 0;
321         for (n = 0; n < nthreads; n++) {
322                 struct test_cycle *cycle = &cycles[n];
323
324                 if (!cycle->result)
325                         continue;
326
327                 pr_err("cylic deadlock not resolved, ret[%d/%d] = %d\n",
328                        n, nthreads, cycle->result);
329                 ret = -EINVAL;
330                 break;
331         }
332
333         for (n = 0; n < nthreads; n++)
334                 ww_mutex_destroy(&cycles[n].a_mutex);
335         kfree(cycles);
336         return ret;
337 }
338
339 static int test_cycle(unsigned int ncpus)
340 {
341         unsigned int n;
342         int ret;
343
344         for (n = 2; n <= ncpus + 1; n++) {
345                 ret = __test_cycle(n);
346                 if (ret)
347                         return ret;
348         }
349
350         return 0;
351 }
352
353 struct stress {
354         struct work_struct work;
355         struct ww_mutex *locks;
356         unsigned long timeout;
357         int nlocks;
358 };
359
360 static int *get_random_order(int count)
361 {
362         int *order;
363         int n, r, tmp;
364
365         order = kmalloc_array(count, sizeof(*order), GFP_TEMPORARY);
366         if (!order)
367                 return order;
368
369         for (n = 0; n < count; n++)
370                 order[n] = n;
371
372         for (n = count - 1; n > 1; n--) {
373                 r = get_random_int() % (n + 1);
374                 if (r != n) {
375                         tmp = order[n];
376                         order[n] = order[r];
377                         order[r] = tmp;
378                 }
379         }
380
381         return order;
382 }
383
384 static void dummy_load(struct stress *stress)
385 {
386         usleep_range(1000, 2000);
387 }
388
389 static void stress_inorder_work(struct work_struct *work)
390 {
391         struct stress *stress = container_of(work, typeof(*stress), work);
392         const int nlocks = stress->nlocks;
393         struct ww_mutex *locks = stress->locks;
394         struct ww_acquire_ctx ctx;
395         int *order;
396
397         order = get_random_order(nlocks);
398         if (!order)
399                 return;
400
401         do {
402                 int contended = -1;
403                 int n, err;
404
405                 ww_acquire_init(&ctx, &ww_class);
406 retry:
407                 err = 0;
408                 for (n = 0; n < nlocks; n++) {
409                         if (n == contended)
410                                 continue;
411
412                         err = ww_mutex_lock(&locks[order[n]], &ctx);
413                         if (err < 0)
414                                 break;
415                 }
416                 if (!err)
417                         dummy_load(stress);
418
419                 if (contended > n)
420                         ww_mutex_unlock(&locks[order[contended]]);
421                 contended = n;
422                 while (n--)
423                         ww_mutex_unlock(&locks[order[n]]);
424
425                 if (err == -EDEADLK) {
426                         ww_mutex_lock_slow(&locks[order[contended]], &ctx);
427                         goto retry;
428                 }
429
430                 if (err) {
431                         pr_err_once("stress (%s) failed with %d\n",
432                                     __func__, err);
433                         break;
434                 }
435
436                 ww_acquire_fini(&ctx);
437         } while (!time_after(jiffies, stress->timeout));
438
439         kfree(order);
440         kfree(stress);
441 }
442
443 struct reorder_lock {
444         struct list_head link;
445         struct ww_mutex *lock;
446 };
447
448 static void stress_reorder_work(struct work_struct *work)
449 {
450         struct stress *stress = container_of(work, typeof(*stress), work);
451         LIST_HEAD(locks);
452         struct ww_acquire_ctx ctx;
453         struct reorder_lock *ll, *ln;
454         int *order;
455         int n, err;
456
457         order = get_random_order(stress->nlocks);
458         if (!order)
459                 return;
460
461         for (n = 0; n < stress->nlocks; n++) {
462                 ll = kmalloc(sizeof(*ll), GFP_KERNEL);
463                 if (!ll)
464                         goto out;
465
466                 ll->lock = &stress->locks[order[n]];
467                 list_add(&ll->link, &locks);
468         }
469         kfree(order);
470         order = NULL;
471
472         do {
473                 ww_acquire_init(&ctx, &ww_class);
474
475                 list_for_each_entry(ll, &locks, link) {
476                         err = ww_mutex_lock(ll->lock, &ctx);
477                         if (!err)
478                                 continue;
479
480                         ln = ll;
481                         list_for_each_entry_continue_reverse(ln, &locks, link)
482                                 ww_mutex_unlock(ln->lock);
483
484                         if (err != -EDEADLK) {
485                                 pr_err_once("stress (%s) failed with %d\n",
486                                             __func__, err);
487                                 break;
488                         }
489
490                         ww_mutex_lock_slow(ll->lock, &ctx);
491                         list_move(&ll->link, &locks); /* restarts iteration */
492                 }
493
494                 dummy_load(stress);
495                 list_for_each_entry(ll, &locks, link)
496                         ww_mutex_unlock(ll->lock);
497
498                 ww_acquire_fini(&ctx);
499         } while (!time_after(jiffies, stress->timeout));
500
501 out:
502         list_for_each_entry_safe(ll, ln, &locks, link)
503                 kfree(ll);
504         kfree(order);
505         kfree(stress);
506 }
507
508 static void stress_one_work(struct work_struct *work)
509 {
510         struct stress *stress = container_of(work, typeof(*stress), work);
511         const int nlocks = stress->nlocks;
512         struct ww_mutex *lock = stress->locks + (get_random_int() % nlocks);
513         int err;
514
515         do {
516                 err = ww_mutex_lock(lock, NULL);
517                 if (!err) {
518                         dummy_load(stress);
519                         ww_mutex_unlock(lock);
520                 } else {
521                         pr_err_once("stress (%s) failed with %d\n",
522                                     __func__, err);
523                         break;
524                 }
525         } while (!time_after(jiffies, stress->timeout));
526
527         kfree(stress);
528 }
529
530 #define STRESS_INORDER BIT(0)
531 #define STRESS_REORDER BIT(1)
532 #define STRESS_ONE BIT(2)
533 #define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
534
535 static int stress(int nlocks, int nthreads, unsigned int flags)
536 {
537         struct ww_mutex *locks;
538         int n;
539
540         locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
541         if (!locks)
542                 return -ENOMEM;
543
544         for (n = 0; n < nlocks; n++)
545                 ww_mutex_init(&locks[n], &ww_class);
546
547         for (n = 0; nthreads; n++) {
548                 struct stress *stress;
549                 void (*fn)(struct work_struct *work);
550
551                 fn = NULL;
552                 switch (n & 3) {
553                 case 0:
554                         if (flags & STRESS_INORDER)
555                                 fn = stress_inorder_work;
556                         break;
557                 case 1:
558                         if (flags & STRESS_REORDER)
559                                 fn = stress_reorder_work;
560                         break;
561                 case 2:
562                         if (flags & STRESS_ONE)
563                                 fn = stress_one_work;
564                         break;
565                 }
566
567                 if (!fn)
568                         continue;
569
570                 stress = kmalloc(sizeof(*stress), GFP_KERNEL);
571                 if (!stress)
572                         break;
573
574                 INIT_WORK(&stress->work, fn);
575                 stress->locks = locks;
576                 stress->nlocks = nlocks;
577                 stress->timeout = jiffies + 2*HZ;
578
579                 queue_work(wq, &stress->work);
580                 nthreads--;
581         }
582
583         flush_workqueue(wq);
584
585         for (n = 0; n < nlocks; n++)
586                 ww_mutex_destroy(&locks[n]);
587         kfree(locks);
588
589         return 0;
590 }
591
592 static int __init test_ww_mutex_init(void)
593 {
594         int ncpus = num_online_cpus();
595         int ret;
596
597         wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
598         if (!wq)
599                 return -ENOMEM;
600
601         ret = test_mutex();
602         if (ret)
603                 return ret;
604
605         ret = test_aa();
606         if (ret)
607                 return ret;
608
609         ret = test_abba(false);
610         if (ret)
611                 return ret;
612
613         ret = test_abba(true);
614         if (ret)
615                 return ret;
616
617         ret = test_cycle(ncpus);
618         if (ret)
619                 return ret;
620
621         ret = stress(16, 2*ncpus, STRESS_INORDER);
622         if (ret)
623                 return ret;
624
625         ret = stress(16, 2*ncpus, STRESS_REORDER);
626         if (ret)
627                 return ret;
628
629         ret = stress(4095, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
630         if (ret)
631                 return ret;
632
633         return 0;
634 }
635
636 static void __exit test_ww_mutex_exit(void)
637 {
638         destroy_workqueue(wq);
639 }
640
641 module_init(test_ww_mutex_init);
642 module_exit(test_ww_mutex_exit);
643
644 MODULE_LICENSE("GPL");
645 MODULE_AUTHOR("Intel Corporation");