1 //==========================================================================
5 // Basic thread stress test
7 //==========================================================================
8 //####ECOSGPLCOPYRIGHTBEGIN####
9 // -------------------------------------------
10 // This file is part of eCos, the Embedded Configurable Operating System.
11 // Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc.
13 // eCos is free software; you can redistribute it and/or modify it under
14 // the terms of the GNU General Public License as published by the Free
15 // Software Foundation; either version 2 or (at your option) any later version.
17 // eCos is distributed in the hope that it will be useful, but WITHOUT ANY
18 // WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 // You should have received a copy of the GNU General Public License along
23 // with eCos; if not, write to the Free Software Foundation, Inc.,
24 // 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
26 // As a special exception, if other files instantiate templates or use macros
27 // or inline functions from this file, or you compile this file and link it
28 // with other works to produce a work based on this file, this file does not
29 // by itself cause the resulting work to be covered by the GNU General Public
30 // License. However the source code for this file must still be made available
31 // in accordance with section (3) of the GNU General Public License.
33 // This exception does not invalidate any other reasons why a work based on
34 // this file might be covered by the GNU General Public License.
36 // Alternative licenses for eCos may be arranged by contacting Red Hat, Inc.
37 // at http://sources.redhat.com/ecos/ecos-license/
38 // -------------------------------------------
39 //####ECOSGPLCOPYRIGHTEND####
40 //==========================================================================
41 //#####DESCRIPTIONBEGIN####
44 // Contributors: rosalia, jskov
46 // Description: Very simple thread stress test, with some memory
47 // allocation and alarm handling.
50 // If client_makes_request is big, it means that there are made many more
51 // client requests than can be serviced. Consequently, clients are wasting
52 // CPU time and should be sleeping more.
54 // The list of handler invocations show how many threads are running
55 // at the same time. The more powerful the CPU, the more the numbers
57 //####DESCRIPTIONEND####
59 #include <pkgconf/system.h>
60 #include <cyg/infra/testcase.h>
62 #include <cyg/hal/hal_arch.h>
64 #if defined(CYGPKG_KERNEL) && defined(CYGPKG_IO) && defined(CYGPKG_ISOINFRA)
66 #include <pkgconf/kernel.h>
67 #include <pkgconf/isoinfra.h>
68 #include CYGHWR_MEMORY_LAYOUT_H
70 #if defined(CYGFUN_KERNEL_API_C)
72 #include <cyg/kernel/kapi.h>
74 #ifdef CYGINT_ISO_STDIO_FORMATTED_IO
80 #if defined(CYGPKG_LIBM)
85 #include <cyg/kernel/test/stackmon.h>
87 #if defined(CYGFUN_KERNEL_THREADS_TIMER)
90 /* if TIME_LIMIT is defined, it represents the number of seconds this
91 test should last; if it is undefined the test will go forever */
92 #define DEATH_TIME_LIMIT 20
93 /* #undef DEATH_TIME_LIMIT */
95 // STACK_SIZE is typical +2kB for printf family calls which use big
96 // auto variables. Add more for handler which calls perform_stressful_tasks()
97 #define STACK_SIZE (2*1024 + CYGNUM_HAL_STACK_SIZE_TYPICAL)
98 #define STACK_SIZE_HANDLER (STACK_SIZE + 30*CYGNUM_HAL_STACK_FRAME_SIZE)
102 // If we have instrumentation enabled, make the execution time in the
103 // simulator even shorter that we were going to anyway.
104 #ifdef CYGPKG_KERNEL_INSTRUMENT
105 #define SIM_DELAY_DIVISOR 100
107 #define SIM_DELAY_DIVISOR 10
110 //-----------------------------------------------------------------------
111 // Some targets need to define a smaller number of handlers due to
112 // memory restrictions.
113 #if defined(CYGMEM_REGION_ram_SIZE) && (CYGMEM_REGION_ram_SIZE < 0x80000)
114 #define MAX_HANDLERS 4
115 #define N_LISTENERS 1
119 #undef STACK_SIZE_HANDLER
120 #define STACK_SIZE (1024 + CYGNUM_HAL_STACK_SIZE_TYPICAL)
121 #define STACK_SIZE_HANDLER (STACK_SIZE + 10*CYGNUM_HAL_STACK_FRAME_SIZE)
124 //-----------------------------------------------------------------------
125 // If no target specific definitions, use defaults
127 #define MAX_HANDLERS 19
128 #define N_LISTENERS 4
132 /* Allocate priorities in this order. This ensures that handlers
133 (which are the ones using the CPU) get enough CPU time to actually
134 complete their tasks.
136 The empty space ensures that if libc main() thread should happen to
137 be in the priority range of the handlers, no handlers are
138 accidently reduced so much in priority to get below
139 listeners/clients. */
141 #define P_MAIN_PROGRAM 1
142 #define P_MAIN_PROGRAM_E (P_MAIN_PROGRAM+N_MAIN)
144 #define P_BASE_HANDLER (P_MAIN_PROGRAM_E)
145 #define P_BASE_HANDLER_E (P_BASE_HANDLER+MAX_HANDLERS)
147 #define P_BASE_EMPTY (P_BASE_HANDLER_E)
148 #define P_BASE_EMPTY_E (P_BASE_EMPTY+2)
150 #define P_BASE_LISTENER (P_BASE_EMPTY_E)
151 #define P_BASE_LISTENER_E (P_BASE_LISTENER+N_LISTENERS)
153 #define P_BASE_CLIENT (P_BASE_LISTENER_E)
154 #define P_BASE_CLIENT_E (P_BASE_CLIENT+N_CLIENTS)
156 #define P_MAX (P_BASE_CLIENT_E)
158 /* Ensure there's room for what we request */
159 #if (CYGNUM_KERNEL_SCHED_PRIORITIES >= P_MAX)
161 /* if we use the bitmap scheduler we must make sure we don't use the
162 same priority more than once, so we must store those already in use */
163 static volatile char priority_in_use[P_MAX];
165 /* We may not get the priority we ask for (scheduler may decide to ignore
166 schedule hint). So keep a table of priorities actually assigned to
167 the threads. This information may come in handy for debugging - it's
168 not actively used by the code. */
169 static volatile int priority_translation[P_MAX];
171 /* now declare (and allocate space for) some kernel objects, like the
172 threads we will use */
173 cyg_thread main_thread_s;
174 cyg_thread handler_thread_s[MAX_HANDLERS];
175 cyg_thread listener_thread_s[N_LISTENERS];
176 cyg_thread client_thread_s[N_CLIENTS];
178 /* space for stacks for all threads */
179 char main_stack[STACK_SIZE];
180 char handler_stack[MAX_HANDLERS][STACK_SIZE_HANDLER];
181 char listener_stack[N_LISTENERS][STACK_SIZE];
182 char client_stack[N_CLIENTS][STACK_SIZE];
184 /* now the handles for the threads */
186 cyg_handle_t handlerH[MAX_HANDLERS];
187 cyg_handle_t listenerH[N_LISTENERS];
188 cyg_handle_t clientH[N_CLIENTS];
190 /* space for thread names */
191 char thread_name[P_MAX][20];
193 /* and now variables for the procedure which is the thread */
194 cyg_thread_entry_t main_program, client_program, listener_program,
197 /* a few mutexes used in the code */
198 cyg_mutex_t client_request_lock, handler_slot_lock, statistics_print_lock,
201 /* global variables with which the handler IDs and thread priorities
202 to free are communicated from handlers to main_program. Access to
203 these are protected by free_handler_lock. An id of -1 means the
204 that the variables are empty. */
205 volatile int free_handler_pri = 0;
206 volatile int free_handler_id = -1;
208 /* a global variable with which the client and server coordinate */
209 volatile int client_makes_request = 0;
211 /* if this is true, clients will not make requests */
212 volatile int clients_paused = 0;
215 /* indicates that it's time to print out a report */
216 volatile int time_to_report = 0;
217 /* print status after a delay of this many secs. */
218 int time_report_delay;
220 /*** now application-specific variables ***/
221 /* an array that stores whether the handler threads are in use */
222 volatile int handler_thread_in_use[MAX_HANDLERS];
223 /* total count of active handlers */
224 volatile int handler_thread_in_use_count;
227 /***** statistics-gathering variables *****/
228 struct s_statistics {
229 /* store the number of times each handler has been invoked */
230 unsigned long handler_invocation_histogram[MAX_HANDLERS];
232 /* store how many times malloc has been attempted and how many times
234 unsigned long malloc_tries, malloc_failures;
236 /* how many threads have been created */
237 unsigned long thread_creations, thread_exits;
240 struct s_statistics statistics;
242 /* some function prototypes; those with the sc_ prefix are
243 "statistics-collecting" versions of the cyg_ primitives */
244 cyg_addrword_t sc_thread_create(
245 cyg_addrword_t sched_info, /* scheduling info (eg pri) */
246 cyg_thread_entry_t *entry, /* entry point function */
247 cyg_addrword_t entry_data, /* entry data */
248 char *name, /* optional thread name */
249 void *stack_base, /* stack base, NULL = alloc */
250 cyg_ucount32 stack_size, /* stack size, 0 = default */
251 cyg_handle_t *handle, /* returned thread handle */
252 cyg_thread *thread /* put thread here */
255 void start_handler(void);
256 void stop_handler(int handler_id, int handler_pri);
257 void perform_stressful_tasks(void);
258 void permute_array(char a[], int size, int seed);
259 void setup_death_alarm(cyg_addrword_t data, cyg_handle_t *deathHp,
260 cyg_alarm *death_alarm_p, int *killed_p);
261 void print_statistics(int print_full);
263 /* we need to declare the alarm handling function (which is defined
264 below), so that we can pass it to cyg_alarm_initialize() */
265 cyg_alarm_t report_alarm_func, death_alarm_func;
267 /* handle and alarm for the report alarm */
268 cyg_handle_t report_alarmH, counterH, system_clockH;
269 cyg_alarm report_alarm;
271 /* main launches all the threads of the test */
278 CYG_TEST_INFO("Stress threads test compiled on " __DATE__);
280 cyg_mutex_init(&client_request_lock);
281 cyg_mutex_init(&statistics_print_lock);
282 cyg_mutex_init(&free_handler_lock);
284 /* initialize statistics */
285 memset(&statistics, 0, sizeof(statistics));
287 /* clear priority table */
288 for (i = 0; i < sizeof(priority_in_use); i++)
289 priority_in_use[i] = 0;
291 /* initialize main thread */
293 priority_translation[P_MAIN_PROGRAM] =
294 sc_thread_create(P_MAIN_PROGRAM, main_program, (cyg_addrword_t) 0,
295 "main_program", (void *) main_stack, STACK_SIZE,
296 &mainH, &main_thread_s);
297 priority_in_use[P_MAIN_PROGRAM]++;
300 /* initialize all handler threads to not be in use */
301 for (i = 0; i < MAX_HANDLERS; ++i) {
302 handler_thread_in_use[i] = 0;
304 handler_thread_in_use_count = 0;
305 for (i = 0; i < N_LISTENERS; ++i) {
306 int prio = P_BASE_LISTENER + i;
307 char* name = &thread_name[prio][0];
308 sprintf(name, "listener-%02d", i);
309 priority_translation[prio] =
310 sc_thread_create(prio, listener_program, (cyg_addrword_t) i,
311 name, (void *) listener_stack[i], STACK_SIZE,
312 &listenerH[i], &listener_thread_s[i]);
313 CYG_ASSERT(0 == priority_in_use[prio], "Priority already in use!");
314 priority_in_use[prio]++;
316 for (i = 0; i < N_CLIENTS; ++i) {
317 int prio = P_BASE_CLIENT + i;
318 char* name = &thread_name[prio][0];
319 sprintf(name, "client-%02d", i);
320 priority_translation[prio] =
321 sc_thread_create(prio, client_program, (cyg_addrword_t) i,
322 name, (void *) client_stack[i], STACK_SIZE,
323 &(clientH[i]), &client_thread_s[i]);
324 CYG_ASSERT(0 == priority_in_use[prio], "Priority already in use!");
325 priority_in_use[prio]++;
328 cyg_thread_resume(mainH);
329 for (i = 0; i < N_CLIENTS; ++i) {
330 cyg_thread_resume(clientH[i]);
332 for (i = 0; i < N_LISTENERS; ++i) {
333 cyg_thread_resume(listenerH[i]);
336 /* set up the alarm which gives periodic wakeups to say "time to
338 system_clockH = cyg_real_time_clock();
339 cyg_clock_to_counter(system_clockH, &counterH);
341 cyg_alarm_create(counterH, report_alarm_func,
342 (cyg_addrword_t) 4000,
343 &report_alarmH, &report_alarm);
344 if (cyg_test_is_simulator) {
345 time_report_delay = 2;
347 time_report_delay = 60;
350 cyg_alarm_initialize(report_alarmH, cyg_current_time()+200,
351 time_report_delay*100);
356 /* main_program() -- frees resources and prints status. */
357 void main_program(cyg_addrword_t data)
359 #ifdef DEATH_TIME_LIMIT
361 cyg_alarm death_alarm;
364 setup_death_alarm(0, &deathH, &death_alarm, &is_dead);
365 #endif /* DEATH_TIME_LIMIT */
371 cyg_mutex_lock(&free_handler_lock); {
372 // If any handler has left its ID, copy the ID and
373 // priority values to local variables, and free up the
374 // global communication variables again.
375 if (-1 != free_handler_id) {
376 handler_id = free_handler_id;
377 handler_pri = free_handler_pri;
378 free_handler_id = -1;
380 } cyg_mutex_unlock(&free_handler_lock);
382 if (-1 != handler_id) {
383 stop_handler(handler_id, handler_pri);
386 // If it's time to report status or quit, set pause flag and
387 // keep looping until all handlers have stopped.
388 if (time_to_report) {
390 cyg_mutex_lock(&client_request_lock); {
392 } cyg_mutex_unlock(&client_request_lock);
394 // When all handlers have stopped, we can print statistics
395 // knowing that all (handler allocated) resources should have
396 // been freed. That is, we should be able to determine leaks.
397 if (0 == handler_thread_in_use_count) {
400 // We've done the printing now. Resume the system.
402 cyg_mutex_lock(&client_request_lock); {
404 } cyg_mutex_unlock(&client_request_lock);
408 #ifdef DEATH_TIME_LIMIT
409 // Stop test if time.
412 cyg_mutex_lock(&client_request_lock); {
414 } cyg_mutex_unlock(&client_request_lock);
416 // When all handlers have stopped, we can print statistics
417 // knowing that all (handler allocated) resources should have
418 // been freed. That is, we should be able to determine leaks.
419 if (0 == handler_thread_in_use_count) {
421 CYG_TEST_PASS_FINISH("Kernel thread stress test OK");
424 #endif /* DEATH_TIME_LIMIT */
430 /* client_program() -- an obnoxious client which makes a lot of requests */
431 void client_program(cyg_addrword_t data)
435 system_clockH = cyg_real_time_clock();
436 cyg_clock_to_counter(system_clockH, &counterH);
439 delay = (rand() % 20);
441 /* now send a request to the server */
442 cyg_mutex_lock(&client_request_lock); {
443 if (0 == clients_paused)
444 client_makes_request++;
445 } cyg_mutex_unlock(&client_request_lock);
447 cyg_thread_delay(10+delay);
451 /* listener_program() -- listens for a request and spawns a handler to
452 take care of the request */
453 void listener_program(cyg_addrword_t data)
456 int make_request = 0;
457 cyg_mutex_lock(&client_request_lock); {
458 if (client_makes_request > 0) {
459 --client_makes_request;
462 } cyg_mutex_unlock(&client_request_lock);
467 cyg_thread_delay(2 + (rand() % 10));
471 /* handler_program() -- is spawned to handle each incoming request */
472 void handler_program(cyg_addrword_t data)
474 /* here is where we perform specific stressful tasks */
475 perform_stressful_tasks();
477 cyg_thread_delay(4 + (int) (0.5*log(1.0 + fabs((rand() % 1000000)))));
480 // Loop until the handler id and priority can be communicated to
484 cyg_mutex_lock(&free_handler_lock); {
485 if (-1 == free_handler_id) {
486 free_handler_id = data;
487 free_handler_pri = P_BASE_HANDLER+(int) data;
490 } cyg_mutex_unlock(&free_handler_lock);
500 /* start a new handler */
501 void start_handler(void)
505 int handler_slot = 0;
509 cyg_mutex_lock(&handler_slot_lock); {
510 for (handler_slot = 0; handler_slot < MAX_HANDLERS;++handler_slot){
511 if (!handler_thread_in_use[handler_slot]) {
513 handler_thread_in_use[handler_slot]++;
514 handler_thread_in_use_count++;
518 } cyg_mutex_unlock(&handler_slot_lock);
523 CYG_ASSERT(1 == handler_thread_in_use[handler_slot],
524 "Handler usage count wrong!");
526 prio = P_BASE_HANDLER+handler_slot;
527 CYG_ASSERT(0 == priority_in_use[prio], "Priority already in use!");
528 priority_in_use[prio]++;
530 name = &thread_name[prio][0];
531 sprintf(name, "handler-%02d/%02d", handler_slot, prio);
533 priority_translation[prio] =
534 sc_thread_create(prio, handler_program,
535 (cyg_addrword_t) handler_slot,
536 name, (void *) handler_stack[handler_slot],
537 STACK_SIZE_HANDLER, &handlerH[handler_slot],
538 &handler_thread_s[handler_slot]);
539 cyg_thread_resume(handlerH[handler_slot]);
540 ++statistics.handler_invocation_histogram[handler_slot];
543 /* free a locked handler thread */
544 void stop_handler(int handler_id, int handler_pri)
546 // Finally delete the handler thread. This must be done in a
547 // loop, waiting for the call to return true. If it returns
548 // false, go to sleep for a bit, so the killed thread gets a
549 // chance to run and complete its business.
550 while (!cyg_thread_delete(handlerH[handler_id])) {
553 ++statistics.thread_exits;
555 // Free the handler resources.
556 cyg_mutex_lock(&handler_slot_lock); {
557 handler_thread_in_use[handler_id]--;
558 handler_thread_in_use_count--;
559 priority_in_use[handler_pri]--;
560 CYG_ASSERT(0 == priority_in_use[handler_pri],
561 "Priority not in use!");
562 CYG_ASSERT(0 == handler_thread_in_use[handler_id],
563 "Handler not in use!");
564 CYG_ASSERT(0 <= handler_thread_in_use_count,
565 "Stopped more handlers than was started!");
566 } cyg_mutex_unlock(&handler_slot_lock);
571 /* do things which will stress the system */
572 void perform_stressful_tasks()
574 #define MAX_MALLOCED_SPACES 100 /* do this many mallocs at most */
575 #define MALLOCED_BASE_SIZE 1 /* basic size in bytes */
576 char *spaces[MAX_MALLOCED_SPACES];
577 int sizes[MAX_MALLOCED_SPACES];
578 unsigned int i, j, size;
580 cyg_uint8 pool_space[10][100];
581 cyg_handle_t mempool_handles[10];
582 cyg_mempool_fix mempool_objects[10];
584 /* here I use malloc, which uses the kernel's variable memory pools.
585 note that malloc/free is a bit simple-minded here: it does not
586 try to really fragment things, and it does not try to make the
587 allocation/deallocation concurrent with other thread execution
588 (although I'm about to throw in a yield()) */
589 for (i = 0; i < MAX_MALLOCED_SPACES; ++i) {
590 ++statistics.malloc_tries;
591 size = (i*2+1)*MALLOCED_BASE_SIZE;
592 spaces[i] = (char *) malloc(size);
595 if (spaces[i] != NULL) {
596 // Fill with a known value (differs between chunk).
597 for (j = 0; j < size; ++j) {
598 spaces[i][j] = 0x50 | ((j+i) & 0x0f);
602 if (i % (MAX_MALLOCED_SPACES/10) == 0) {
605 if (i % (MAX_MALLOCED_SPACES/15) == 0) {
606 cyg_thread_delay(i % 5);
612 /* now free it all up */
613 for (i = 0; i < MAX_MALLOCED_SPACES; ++i) {
614 if (spaces[i] != NULL) {
616 for (j = 0; j < size; ++j) {
617 // Validate chunk data.
618 if ((0x50 | ((j+i) & 0x0f)) != spaces[i][j]) {
619 printf("Bad byte in chunk\n");
621 spaces[i][j] = 0xAA; /* write a bit pattern */
625 ++statistics.malloc_failures;
629 /* now allocate and then free some fixed-size memory pools; for
630 now this is simple-minded because it does not have many threads
631 sharing the memory pools and racing for memory. */
632 for (i = 0; i < 10; ++i) {
633 cyg_mempool_fix_create(pool_space[i], 100, (i+1)*3,
634 &mempool_handles[i], &mempool_objects[i]);
637 for (i = 0; i < 10; ++i) {
638 spaces[i] = cyg_mempool_fix_try_alloc(mempool_handles[i]);
641 for (i = 0; i < 10; ++i) {
643 cyg_mempool_fix_delete(mempool_handles[i]);
648 /* report_alarm_func() is invoked as an alarm handler, so it should be
649 quick and simple. in this case it sets a global flag which is
650 checked by main_program. */
651 void report_alarm_func(cyg_handle_t alarmH, cyg_addrword_t data)
656 #ifdef DEATH_TIME_LIMIT
657 /* this sets up death alarms. it gets the handle and alarm from the
658 caller, since they must persist for the life of the alarm */
659 void setup_death_alarm(cyg_addrword_t data, cyg_handle_t *deathHp,
660 cyg_alarm *death_alarm_p, int *killed_p)
662 cyg_handle_t system_clockH, counterH;
663 cyg_resolution_t rtc_res;
665 system_clockH = cyg_real_time_clock();
666 cyg_clock_to_counter(system_clockH, &counterH);
668 cyg_alarm_create(counterH, death_alarm_func,
669 (cyg_addrword_t) killed_p,
670 deathHp, death_alarm_p);
671 rtc_res = cyg_clock_get_resolution(system_clockH);
673 cyg_tick_count_t tick_delay;
674 tick_delay = (long long)
675 ((1000000000.0*rtc_res.divisor)
676 *((double)DEATH_TIME_LIMIT)/((double)rtc_res.dividend));
677 if ( cyg_test_is_simulator )
678 tick_delay /= SIM_DELAY_DIVISOR;
679 #ifdef CYGPKG_HAL_SYNTH
680 // 20 seconds is a long time compared to the run time of other tests.
681 // Reduce to 10 seconds, allowing more tests to get run.
685 cyg_alarm_initialize(*deathHp, cyg_current_time() + tick_delay, 0);
690 /* death_alarm_func() is the alarm handler that kills the current
691 thread after a specified timeout. It does so by setting a flag the
692 thread is constantly checking. */
693 void death_alarm_func(cyg_handle_t alarmH, cyg_addrword_t data)
696 killed_p = (int *) data;
700 /* now I write the sc_ versions of the cyg_functions */
701 cyg_addrword_t sc_thread_create(
702 cyg_addrword_t sched_info, /* scheduling info (eg pri) */
703 cyg_thread_entry_t *entry, /* entry point function */
704 cyg_addrword_t entry_data, /* entry data */
705 char *name, /* optional thread name */
706 void *stack_base, /* stack base, NULL = alloc */
707 cyg_ucount32 stack_size, /* stack size, 0 = default */
708 cyg_handle_t *handle, /* returned thread handle */
709 cyg_thread *thread /* put thread here */
712 ++statistics.thread_creations;
714 cyg_thread_create(sched_info, entry, entry_data, name,
715 stack_base, stack_size, handle, thread);
717 return cyg_thread_get_priority(*handle);
721 #define MINS_HOUR (60)
722 #define MINS_DAY (60*24)
724 void print_statistics(int print_full)
727 static int stat_dumps = 0;
728 static int print_count = 0;
729 static int shift_count = 0;
734 // Find number of minutes.
735 minutes = time_report_delay*stat_dumps / 60;
738 // Return if time/minutes not integer.
739 if ((time_report_delay*stat_dumps % 60) != 0)
742 // After the first day, only dump stat once per day. Do print
743 // a . on the hour though.
744 if ((minutes > MINS_DAY) && ((minutes % MINS_DAY) != 0)) {
745 if ((minutes % MINS_HOUR) == 0) {
752 // After the first hour of the first day, only dump stat once
753 // per hour. Do print . each minute though.
754 if ((minutes < MINS_DAY) && (minutes > MINS_HOUR)
755 && ((minutes % MINS_HOUR) != 0)) {
762 printf("\nState dump %d (%d hours, %d minutes) [numbers >>%d]\n",
763 ++print_count, minutes / MINS_HOUR, minutes % MINS_HOUR,
766 cyg_mutex_lock(&statistics_print_lock); {
767 //--------------------------------
768 // Information private to this test:
769 printf(" Handler-invocations: ");
770 for (i = 0; i < MAX_HANDLERS; ++i) {
771 printf("%4lu ", statistics.handler_invocation_histogram[i]);
774 printf(" malloc()-tries/failures: -- %7lu %7lu\n",
775 statistics.malloc_tries, statistics.malloc_failures);
776 printf(" client_makes_request: %d\n", client_makes_request);
778 // Check for big numbers and reduce if getting close to overflow
779 if (statistics.malloc_tries > 0x40000000) {
781 for (i = 0; i < MAX_HANDLERS; ++i) {
782 statistics.handler_invocation_histogram[i] >>= 1;
784 statistics.malloc_tries >>= 1;
785 statistics.malloc_failures >>= 1;
787 } cyg_mutex_unlock(&statistics_print_lock);
789 #if CYGINT_ISO_MALLINFO
790 //--------------------------------
791 // System information
793 struct mallinfo mem_info;
795 mem_info = mallinfo();
797 printf(" Memory system: Total=0x%08x Free=0x%08x Max=0x%08x\n",
798 mem_info.arena, mem_info.fordblks, mem_info.maxfree);
803 printf(" Stack usage:\n");
804 cyg_test_dump_interrupt_stack_stats( " Interrupt" );
805 cyg_test_dump_idlethread_stack_stats( " Idle" );
807 cyg_test_dump_stack_stats(" Main", main_stack,
808 main_stack + sizeof(main_stack));
809 for (i = 0; i < MAX_HANDLERS; i++) {
810 cyg_test_dump_stack_stats(" Handler", handler_stack[i],
811 handler_stack[i] + sizeof(handler_stack[i]));
813 for (i = 0; i < N_LISTENERS; i++) {
814 cyg_test_dump_stack_stats(" Listener", listener_stack[i],
815 listener_stack[i] + sizeof(listener_stack[i]));
817 for (i = 0; i < N_CLIENTS; i++) {
818 cyg_test_dump_stack_stats(" Client", client_stack[i],
819 client_stack[i] + sizeof(client_stack[i]));
823 #else /* (CYGNUM_KERNEL_SCHED_PRIORITIES >= */
824 /* (N_MAIN+N_CLIENTS+N_LISTENERS+MAX_HANDLERS)) */
825 #define N_A_MSG "not enough priorities available"
826 #endif /* (CYGNUM_KERNEL_SCHED_PRIORITIES >= */
827 /* (N_MAIN+N_CLIENTS+N_LISTENERS+MAX_HANDLERS)) */
829 #else /* CYGINT_ISO_MALLOC */
830 # define N_A_MSG "this test needs malloc"
831 #endif /* CYGINT_ISO_MALLOC */
833 #else /* CYGFUN_KERNEL_THREADS_TIMER */
834 # define N_A_MSG "this test needs kernel threads timer"
835 #endif /* CYGFUN_KERNEL_THREADS_TIMER */
837 #else /* CYGPKG_LIBM */
838 # define N_A_MSG "this test needs libm"
839 #endif /* CYGPKG_LIBM */
841 #else /* CYGINT_ISO_STDIO_FORMATTED_IO */
842 # define N_A_MSG "this test needs stdio formatted I/O"
843 #endif /* CYGINT_ISO_STDIO_FORMATTED_IO */
845 #else // def CYGFUN_KERNEL_API_C
846 # define N_A_MSG "this test needs Kernel C API"
849 #else // def CYGPKG_KERNEL && CYGPKG_IO && CYGPKG_ISOINFRA
850 # define N_A_MSG "this tests needs Kernel, isoinfra and IO"
858 CYG_TEST_NA( N_A_MSG);