1 //==========================================================================
5 //==========================================================================
6 //####BSDCOPYRIGHTBEGIN####
8 // -------------------------------------------
10 // Portions of this software may have been derived from OpenBSD,
11 // FreeBSD or other sources, and are covered by the appropriate
12 // copyright disclaimers included herein.
14 // Portions created by Red Hat are
15 // Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
17 // -------------------------------------------
19 //####BSDCOPYRIGHTEND####
20 //==========================================================================
22 //==========================================================================
26 // eCos wrapper and synch functions
28 //==========================================================================
29 //####BSDCOPYRIGHTBEGIN####
31 // -------------------------------------------
33 // Portions of this software may have been derived from OpenBSD or other sources,
34 // and are covered by the appropriate copyright disclaimers included herein.
36 // -------------------------------------------
38 //####BSDCOPYRIGHTEND####
39 //==========================================================================
40 //#####DESCRIPTIONBEGIN####
42 // Author(s): gthomas, hmt
43 // Contributors: gthomas, hmt
49 //####DESCRIPTIONEND####
51 //==========================================================================
54 // Synch routines, etc., used by network code
56 #include <sys/param.h>
57 #include <pkgconf/net.h>
59 #include <cyg/infra/diag.h>
60 #include <cyg/hal/hal_intr.h>
61 #include <cyg/kernel/kapi.h>
63 #include <cyg/infra/cyg_ass.h>
65 //---------------------------- splx() emulation ------------------------------
66 // This contains both the SPLX stuff and tsleep/wakeup - because those must
67 // be SPLX aware. They release the SPLX lock when sleeping, and reclaim it
68 // (if needs be) at wakeup.
70 // The variable spl_state (and the associated bit patterns) is used to keep
71 // track of the "splx()" level. This is an artifact of the original stack,
72 // based on the BSD interrupt world (interrupts and processing could be
73 // masked based on a level value, supported by hardware). This is not very
74 // real-time, so the emulation uses proper eCos tools and techniques to
75 // accomplish the same result. The key here is in the analysis of the
76 // various "levels", why they are used, etc.
78 // SPL_IMP is called in order to protect internal data structures
79 // short-term, primarily so that interrupt processing does not interfere
82 // SPL_CLOCK is called in order to ensure that a timestamp is valid i.e. no
83 // time passes while the stamp is being taken (since it is a potentially
84 // non-idempotent data structure).
86 // SPL_SOFTNET is used to prevent all other stack processing, including
87 // interrupts (DSRs), etc.
89 // SPL_INTERNAL is used when running the pseudo-DSR in timeout.c - this
90 // runs what should really be the network interface device's DSR, and any
91 // timeout routines that are scheduled. (They are broken out into a thread
92 // to isolate the network locking from the rest of the system)
94 // NB a thread in thi state can tsleep(); see below. Tsleep releases and
95 // reclaims the locks and so on. This necessary because of the possible
99 // He runs, he is lower priority
101 // He or something else awakens me
102 // I want to run, but he has splsoft, so I wait
103 // He runs and releases splsoft
106 static volatile cyg_uint32 spl_state = 0;
109 #define SPL_CLOCK 0x04
110 #define SPL_SOFTNET 0x08
111 #define SPL_INTERNAL 0x10
113 static cyg_mutex_t splx_mutex;
114 static volatile cyg_handle_t splx_thread;
117 #ifdef CYGIMPL_TRACE_SPLX
118 #define SPLXARGS const char *file, const int line
119 #define SPLXMOREARGS , const char *file, const int line
120 #define SPLXTRACE do_sched_event(__FUNCTION__, file, line, spl_state)
122 #define SPLXARGS void
128 static inline cyg_uint32
129 spl_any( cyg_uint32 which )
131 cyg_uint32 old_spl = spl_state;
132 if ( cyg_thread_self() != splx_thread ) {
133 while ( !cyg_mutex_lock( &splx_mutex ) )
135 old_spl = 0; // Free when we unlock this context
136 CYG_ASSERT( 0 == splx_thread, "Thread still owned" );
137 CYG_ASSERT( 0 == spl_state, "spl still set" );
138 splx_thread = cyg_thread_self();
140 CYG_ASSERT( splx_mutex.locked, "spl_any: mutex not locked" );
141 CYG_ASSERT( (cyg_handle_t)splx_mutex.owner == cyg_thread_self(),
142 "spl_any: mutex not mine" );
152 return spl_any( SPL_IMP );
156 cyg_splclock(SPLXARGS)
159 return spl_any( SPL_CLOCK );
166 return spl_any( SPL_NET );
170 cyg_splhigh(SPLXARGS)
173 // splhigh did SPLSOFTNET in the contrib, so this is the same
174 return spl_any( SPL_SOFTNET );
178 cyg_splsoftnet(SPLXARGS)
181 return spl_any( SPL_SOFTNET );
185 cyg_splinternal(SPLXARGS)
188 return spl_any( SPL_INTERNAL );
193 // Return to a previous interrupt state/level.
196 cyg_splx(cyg_uint32 old_state SPLXMOREARGS)
200 CYG_ASSERT( 0 != spl_state, "No state set" );
201 CYG_ASSERT( splx_mutex.locked, "splx: mutex not locked" );
202 CYG_ASSERT( (cyg_handle_t)splx_mutex.owner == cyg_thread_self(),
203 "splx: mutex not mine" );
205 spl_state &= old_state;
207 if ( 0 == spl_state ) {
209 cyg_mutex_unlock( &splx_mutex );
213 //------------------ tsleep() and wakeup() emulation ---------------------------
215 // Structure used to keep track of 'tsleep' style events
217 struct wakeup_event {
221 static struct wakeup_event wakeup_list[CYGPKG_NET_NUM_WAKEUP_EVENTS];
224 // Called to initialize structures used by timeout functions
226 cyg_tsleep_init(void)
229 struct wakeup_event *ev;
230 // Create list of "wakeup event" semaphores
231 for (i = 0, ev = wakeup_list; i < CYGPKG_NET_NUM_WAKEUP_EVENTS; i++, ev++) {
233 cyg_semaphore_init(&ev->sem, 0);
235 // Initialize the mutex and thread id:
236 cyg_mutex_init( &splx_mutex );
244 cyg_wakeup(void *chan)
247 struct wakeup_event *ev;
248 cyg_scheduler_lock(); // Ensure scan is safe
249 // NB this is broadcast semantics because a sleeper/wakee holds the
250 // slot until they exit. This avoids a race condition whereby the
251 // semaphore can get an extra post - and then the slot is freed, so the
252 // sem wait returns immediately, AOK, so the slot wasn't freed.
253 for (i = 0, ev = wakeup_list; i < CYGPKG_NET_NUM_WAKEUP_EVENTS; i++, ev++)
254 if (ev->chan == chan)
255 cyg_semaphore_post(&ev->sem);
257 cyg_scheduler_unlock();
260 // ------------------------------------------------------------------------
261 // Wait for an event with timeout
262 // tsleep(event, priority, state, timeout)
263 // event - the thing to wait for
265 // state - a descriptive message
266 // timeout - max time (in ticks) to wait
268 // 0 - event was "signalled"
269 // ETIMEDOUT - timeout occurred
270 // EINTR - thread broken out of sleep
273 cyg_tsleep(void *chan, int pri, char *wmesg, int timo)
276 struct wakeup_event *ev;
277 cyg_tick_count_t sleep_time;
278 cyg_handle_t self = cyg_thread_self();
279 int old_splflags = 0; // no flags held
281 cyg_scheduler_lock();
283 // Safely find a free slot:
284 for (i = 0, ev = wakeup_list; i < CYGPKG_NET_NUM_WAKEUP_EVENTS; i++, ev++) {
290 CYG_ASSERT( i < CYGPKG_NET_NUM_WAKEUP_EVENTS, "no sleep slots" );
291 CYG_ASSERT( 1 == cyg_scheduler_read_lock(),
292 "Tsleep - called with scheduler locked" );
294 if ( i >= CYGPKG_NET_NUM_WAKEUP_EVENTS ) {
295 cyg_scheduler_unlock();
299 // If we are the owner, then we must release the mutex when
301 if ( self == splx_thread ) {
302 old_splflags = spl_state; // Keep them for restoration
303 CYG_ASSERT( spl_state, "spl_state not set" );
304 // Also want to assert that the mutex is locked...
305 CYG_ASSERT( splx_mutex.locked, "Splx mutex not locked" );
306 CYG_ASSERT( (cyg_handle_t)splx_mutex.owner == self, "Splx mutex not mine" );
309 cyg_mutex_unlock( &splx_mutex );
312 // Re-initialize the semaphore - it might have counted up arbitrarily
313 // in the time between a prior sleeper being signalled and them
315 cyg_semaphore_init(&ev->sem, 0);
317 // This part actually does the wait:
318 // As of the new kernel, we can do this without unlocking the scheduler
320 sleep_time = cyg_current_time() + timo;
321 if (!cyg_semaphore_timed_wait(&ev->sem, sleep_time)) {
322 if( cyg_current_time() >= sleep_time )
328 if (!cyg_semaphore_wait(&ev->sem) ) {
333 ev->chan = 0; // Free the slot - the wakeup call cannot do this.
335 if ( old_splflags ) { // restore to previous state
336 // As of the new kernel, we can do this with the scheduler locked
337 cyg_mutex_lock( &splx_mutex ); // this might wait
338 CYG_ASSERT( 0 == splx_thread, "Splx thread set in tsleep" );
339 CYG_ASSERT( 0 == spl_state, "spl_state set in tsleep" );
340 splx_thread = self; // got it now...
341 spl_state = old_splflags;
344 cyg_scheduler_unlock();
350 // ------------------------------------------------------------------------
351 // DEBUGGING ROUTINES
352 #ifdef CYGIMPL_TRACE_SPLX
353 #undef cyg_scheduler_lock
354 #undef cyg_scheduler_safe_lock
355 #undef cyg_scheduler_unlock
357 #define MAX_SCHED_EVENTS 256
358 static struct _sched_event {
361 } sched_event[MAX_SCHED_EVENTS];
362 static int next_sched_event = 0;
363 static int total_sched_events = 0;
366 do_sched_event(char *fun, char *file, int line, int lock)
368 struct _sched_event *se = &sched_event[next_sched_event];
369 if (++next_sched_event == MAX_SCHED_EVENTS) {
370 next_sched_event = 0;
376 total_sched_events++;
380 show_sched_events(void)
383 struct _sched_event *se;
384 if (total_sched_events < MAX_SCHED_EVENTS) {
387 i = next_sched_event + 1;
388 if (i == MAX_SCHED_EVENTS) i = 0;
390 diag_printf("%d total scheduler events\n", total_sched_events);
391 while (i != next_sched_event) {
392 se = &sched_event[i];
393 diag_printf("%s - lock: %d, called from %s.%d\n", se->fun, se->lock, se->file, se->line);
394 if (++i == MAX_SCHED_EVENTS) i = 0;
398 #define SPLX_TRACE_DATA() cyg_scheduler_read_lock()
401 _cyg_scheduler_lock(char *file, int line)
403 cyg_scheduler_lock();
404 do_sched_event(__FUNCTION__, file, line, SPLX_TRACE_DATA());
408 _cyg_scheduler_safe_lock(char *file, int line)
410 cyg_scheduler_safe_lock();
411 do_sched_event(__FUNCTION__, file, line, SPLX_TRACE_DATA());
415 _cyg_scheduler_unlock(char *file, int line)
417 cyg_scheduler_unlock();
418 do_sched_event(__FUNCTION__, file, line, SPLX_TRACE_DATA());
420 #endif // CYGIMPL_TRACE_SPLX