5 Author: Pekka Riikonen <priikone@silcnet.org>
7 Copyright (C) 2005 - 2006 Pekka Riikonen
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
22 SILC_TASK_CALLBACK(silc_fsm_run);
23 SILC_TASK_CALLBACK(silc_fsm_finish_fsm);
24 SILC_TASK_CALLBACK(silc_fsm_sema_timedout);
25 SILC_TASK_CALLBACK(silc_fsm_start_real_thread);
26 static void *silc_fsm_thread(void *context);
27 static void silc_fsm_thread_termination_post(SilcFSMSema sema);
28 static void silc_fsm_sema_ref(SilcFSMSema sema);
29 static void silc_fsm_sema_unref(SilcFSMSema sema);
33 SilcFSM silc_fsm_alloc(void *fsm_context,
34 SilcFSMDestructor destructor,
35 void *destructor_context,
36 SilcSchedule schedule)
40 fsm = silc_calloc(1, sizeof(*fsm));
44 if (!silc_fsm_init(fsm, fsm_context, destructor,
45 destructor_context, schedule)) {
55 SilcBool silc_fsm_init(SilcFSM fsm,
57 SilcFSMDestructor destructor,
58 void *destructor_context,
59 SilcSchedule schedule)
64 fsm->fsm_context = fsm_context;
65 fsm->state_context = NULL;
66 fsm->destructor = destructor;
67 fsm->destructor_context = destructor_context;
68 fsm->schedule = schedule;
70 fsm->async_call = FALSE;
78 /* Allocate FSM thread. Internally machine and thread use same context. */
80 SilcFSMThread silc_fsm_thread_alloc(SilcFSM fsm,
82 SilcFSMThreadDestructor destructor,
83 void *destructor_context,
88 thread = silc_calloc(1, sizeof(*thread));
92 silc_fsm_thread_init(thread, fsm, thread_context, destructor,
93 destructor_context, real_thread);
97 /* Initialize FSM thread. Internally machine and thread use same context. */
99 void silc_fsm_thread_init(SilcFSMThread thread,
101 void *thread_context,
102 SilcFSMThreadDestructor destructor,
103 void *destructor_context,
104 SilcBool real_thread)
106 SILC_LOG_DEBUG(("Initializing new thread %p (%s)",
107 thread, real_thread ? "real" : "FSM"));
109 SILC_ASSERT(!fsm->thread);
111 thread->fsm_context = thread_context;
112 thread->state_context = NULL;
113 thread->destructor = (SilcFSMDestructor)destructor;
114 thread->destructor_context = destructor_context;
115 thread->schedule = fsm->schedule;
116 thread->thread = TRUE;
117 thread->async_call = FALSE;
118 thread->started = FALSE;
119 thread->real_thread = real_thread;
120 thread->u.t.fsm = fsm;
125 /* Allocate lock for the machine if using real threads. */
126 if (real_thread && !fsm->u.m.lock)
127 if (!silc_mutex_alloc(&fsm->u.m.lock))
128 thread->real_thread = FALSE;
131 /* FSM is destroyed through scheduler to make sure that all dying
132 real system threads will have their finish callbacks scheduled before
133 this one (when SILC_FSM_THREAD_WAIT was used). */
135 SILC_TASK_CALLBACK(silc_fsm_free_final)
139 #if defined(SILC_DEBUG)
140 /* We must be finished */
141 SILC_ASSERT(f->finished);
143 /* Machine must not have active threads */
144 if (!f->thread && f->u.m.threads)
145 SILC_ASSERT(f->u.m.threads == 0);
146 #endif /* SILC_DEBUG */
148 if (!f->thread && f->u.m.lock)
149 silc_mutex_free(f->u.m.lock);
151 if (f->thread && f->u.t.sema)
152 silc_fsm_sema_free(f->u.t.sema);
159 void silc_fsm_free(void *fsm)
163 silc_schedule_task_add_timeout(f->schedule, silc_fsm_free_final, f, 0, 0);
165 silc_fsm_free_final(f->schedule, silc_schedule_get_context(f->schedule),
169 /* Task to start real thread. We start threads through scheduler, not
170 directly in silc_fsm_start. */
172 SILC_TASK_CALLBACK(silc_fsm_start_real_thread)
177 if (silc_thread_create(silc_fsm_thread, f, FALSE))
179 #endif /* SILC_THREADS */
181 SILC_LOG_DEBUG(("Could not create real thread, using normal FSM thread"));
183 /* Normal FSM operation */
184 f->real_thread = FALSE;
185 silc_fsm_continue_sync(f);
188 /* Start FSM in the specified state */
190 void silc_fsm_start(void *fsm, SilcFSMStateCallback start_state)
194 SILC_LOG_DEBUG(("Starting %s %p", f->thread ? "thread" : "FSM", fsm));
197 f->next_state = start_state;
198 f->synchronous = FALSE;
201 /* Start real thread through scheduler */
202 if (f->thread && f->real_thread) {
203 silc_schedule_task_add_timeout(f->schedule, silc_fsm_start_real_thread,
208 /* Normal FSM operation */
209 silc_schedule_task_add_timeout(f->schedule, silc_fsm_run, f, 0, 0);
212 /* Start FSM in the specified state synchronously */
214 void silc_fsm_start_sync(void *fsm, SilcFSMStateCallback start_state)
218 SILC_LOG_DEBUG(("Starting %s %p", f->thread ? "thread" : "FSM", fsm));
221 f->next_state = start_state;
222 f->synchronous = TRUE;
225 /* Start real thread directly */
226 if (f->thread && f->real_thread) {
227 silc_fsm_start_real_thread(f->schedule,
228 silc_schedule_get_context(f->schedule),
233 /* Normal FSM operation */
234 silc_fsm_run(f->schedule, silc_schedule_get_context(f->schedule), 0, 0, f);
237 /* Set next FSM state */
239 void silc_fsm_next(void *fsm, SilcFSMStateCallback next_state)
242 f->next_state = next_state;
245 /* Continue after timeout */
247 void silc_fsm_next_later(void *fsm, SilcFSMStateCallback next_state,
248 SilcUInt32 seconds, SilcUInt32 useconds)
251 f->next_state = next_state;
252 if (!seconds && !useconds)
254 silc_schedule_task_add_timeout(f->schedule, silc_fsm_run, f,
256 f->next_later = TRUE;
259 /* Continue after callback or async operation */
261 void silc_fsm_continue(void *fsm)
265 silc_schedule_task_del_by_all(f->schedule, 0, silc_fsm_run, f);
266 f->next_later = FALSE;
268 if (!silc_schedule_task_add_timeout(f->schedule, silc_fsm_run, f, 0, 0))
269 silc_fsm_run(f->schedule, silc_schedule_get_context(f->schedule), 0, 0, f);
272 /* Continue after callback or async operation immediately */
274 void silc_fsm_continue_sync(void *fsm)
278 silc_schedule_task_del_by_all(f->schedule, 0, silc_fsm_run, f);
279 f->next_later = FALSE;
281 silc_fsm_run(f->schedule, silc_schedule_get_context(f->schedule), 0, 0, f);
286 void silc_fsm_finish(void *fsm)
290 SILC_ASSERT(!f->finished);
294 silc_schedule_task_del_by_all(f->schedule, 0, silc_fsm_run, f);
295 f->next_later = FALSE;
297 /* If we are thread and using real threads, the FSM thread will finish
298 after the real thread has finished, in the main thread. */
299 if (f->thread && f->real_thread) {
300 /* Stop the real thread's scheduler to finish the thread */
301 silc_schedule_stop(f->schedule);
302 silc_schedule_wakeup(f->schedule);
306 /* Normal FSM operation */
308 silc_fsm_finish_fsm(f->schedule, silc_schedule_get_context(f->schedule),
311 silc_schedule_task_add_timeout(f->schedule, silc_fsm_finish_fsm, f, 0, 0);
314 /* Return associated scheduler */
316 SilcSchedule silc_fsm_get_schedule(void *fsm)
322 /* Return thread's machine */
324 SilcFSM silc_fsm_get_machine(SilcFSMThread thread)
326 SILC_ASSERT(thread->thread);
327 return (SilcFSM)thread->u.t.fsm;
330 /* Returns TRUE if FSM is started and not yet finished */
332 SilcBool silc_fsm_is_started(void *fsm)
336 if (f->started && !f->finished)
344 void silc_fsm_set_context(void *fsm, void *fsm_context)
347 f->fsm_context = fsm_context;
352 void *silc_fsm_get_context(void *fsm)
355 return f->fsm_context;
358 /* Set state context */
360 void silc_fsm_set_state_context(void *fsm, void *state_context)
363 f->state_context = state_context;
366 /* Get state context */
368 void *silc_fsm_get_state_context(void *fsm)
371 return f->state_context;
374 /* Wait for thread to terminate */
376 SilcBool silc_fsm_thread_wait(void *fsm, void *thread)
380 SILC_ASSERT(t->thread);
384 t->u.t.sema = silc_fsm_sema_alloc(t->u.t.fsm, 0);
388 SILC_LOG_DEBUG(("Waiting for thread %p to terminate", thread));
389 silc_fsm_sema_wait(t->u.t.sema, fsm);
395 SILC_TASK_CALLBACK(silc_fsm_run)
397 SilcFSM fsm = context;
398 SilcFSMStatus status;
400 SILC_LOG_DEBUG(("Running %s %p", fsm->thread ? "thread" : "FSM", fsm));
404 status = fsm->next_state(fsm, fsm->fsm_context, fsm->state_context);
405 while (status == SILC_FSM_CONTINUE);
409 /* Continue through scheduler */
410 silc_fsm_continue(fsm);
414 /* The machine is in hold */
415 SILC_LOG_DEBUG(("State wait %p", fsm));
416 fsm->synchronous = FALSE;
419 case SILC_FSM_FINISH:
420 /* Finish the state machine */
421 SILC_LOG_DEBUG(("State finish %p", fsm));
422 silc_fsm_finish(fsm);
430 /* Finishes the FSM. This is always executed in the main thread, even
431 for FSM threads that were run in real threads. */
433 SILC_TASK_CALLBACK(silc_fsm_finish_fsm)
435 SilcFSM fsm = context;
437 SILC_LOG_DEBUG(("%s %p, is finished", fsm->thread ? "Thread" : "FSM", fsm));
439 fsm->next_state = NULL;
442 /* This is thread, send signal */
444 silc_fsm_thread_termination_post(fsm->u.t.sema);
445 silc_fsm_sema_free(fsm->u.t.sema);
446 fsm->u.t.sema = NULL;
449 /* Remove the thread from machine */
450 fsm->u.t.fsm->u.m.threads--;
452 /* Call the destructor callback only if the underlaying machine is
454 if (fsm->destructor && fsm->u.t.fsm->finished == FALSE)
455 fsm->destructor(fsm, fsm->fsm_context, fsm->destructor_context);
459 silc_mutex_free(fsm->u.m.lock);
460 fsm->u.m.lock = NULL;
463 /* Call the destructor callback. */
465 fsm->destructor(fsm, fsm->fsm_context, fsm->destructor_context);
469 /* Allocate FSM semaphore */
471 SilcFSMSema silc_fsm_sema_alloc(SilcFSM fsm, SilcUInt32 value)
475 sema = silc_calloc(1, sizeof(*sema));
479 silc_fsm_sema_init(sema, fsm, value);
480 sema->allocated = TRUE;
485 /* Initializes FSM semaphore */
487 void silc_fsm_sema_init(SilcFSMSema sema, SilcFSM fsm, SilcUInt32 value)
489 SILC_LOG_DEBUG(("Initializing semaphore %p", sema));
490 SILC_ASSERT(!fsm->thread);
491 memset(sema, 0, sizeof(*sema));
494 silc_list_init(sema->waiters, struct SilcFSMObject, next);
500 void silc_fsm_sema_free(SilcFSMSema sema)
502 if (sema->refcnt > 0)
504 if (silc_list_count(sema->waiters) > 0)
509 /* Reference semaphore */
511 static void silc_fsm_sema_ref(SilcFSMSema sema)
516 /* Unreference semaphore */
518 static void silc_fsm_sema_unref(SilcFSMSema sema)
521 if (sema->refcnt == 0 && sema->allocated)
522 silc_fsm_sema_free(sema);
525 /* Wait until semaphore is non-zero. */
527 SilcUInt32 silc_fsm_sema_wait(SilcFSMSema sema, void *fsm)
529 SilcMutex lock = sema->fsm->u.m.lock;
531 silc_mutex_lock(lock);
534 #if defined(SILC_DEBUG)
536 silc_list_start(sema->waiters);
537 while ((entry = silc_list_get(sema->waiters)) != SILC_LIST_END)
538 SILC_ASSERT(entry != fsm);
539 #endif /* SILC_DEBUG */
541 SILC_LOG_DEBUG(("Waiting for semaphore %p", sema));
543 /* Add the FSM to waiter list */
544 silc_list_add(sema->waiters, fsm);
545 silc_mutex_unlock(lock);
549 SILC_LOG_DEBUG(("Acquired semaphore %p", sema));
551 /* It is possible that this FSM is in the list so remove it */
552 silc_list_del(sema->waiters, fsm);
554 silc_mutex_unlock(lock);
558 /* Wait util semaphore is non-zero, or timeout occurs. */
560 SilcUInt32 silc_fsm_sema_timedwait(SilcFSMSema sema, void *fsm,
561 SilcUInt32 seconds, SilcUInt32 useconds,
564 SilcMutex lock = sema->fsm->u.m.lock;
568 silc_mutex_lock(lock);
570 if (f->sema_timedout) {
571 SILC_LOG_DEBUG(("Semaphore was timedout"));
572 f->sema_timedout = FALSE;
575 silc_mutex_unlock(lock);
579 silc_mutex_unlock(lock);
581 value = silc_fsm_sema_wait(sema, fsm);
583 silc_schedule_task_add_timeout(f->schedule, silc_fsm_sema_timedout,
584 f, seconds, useconds);
594 /* Semaphore timedout */
596 SILC_TASK_CALLBACK(silc_fsm_sema_timedout)
598 SilcFSM fsm = context;
599 SilcMutex lock = fsm->sema->fsm->u.m.lock;
601 SILC_LOG_DEBUG(("Semaphore %p timedout", fsm->sema));
603 /* Remove the waiter from the semaphore */
604 silc_mutex_lock(lock);
605 silc_list_del(fsm->sema->waiters, fsm);
609 silc_fsm_continue(fsm);
610 fsm->sema_timedout = TRUE;
614 silc_mutex_unlock(lock);
617 /* Signalled, semaphore */
619 SILC_TASK_CALLBACK(silc_fsm_signal)
621 SilcFSMSemaPost p = context;
622 SilcMutex lock = p->sema->fsm->u.m.lock;
624 /* If the semaphore value has went to zero while we've been waiting this
625 callback, sempahore has been been signalled already. It can happen
626 when using real threads because the FSM may not be waiting state when
627 the sempahore is posted. */
628 silc_mutex_lock(lock);
629 if (!p->sema->value) {
630 silc_mutex_unlock(lock);
631 silc_fsm_sema_unref(p->sema);
635 silc_mutex_unlock(lock);
637 SILC_LOG_DEBUG(("Signalled %s %p", p->fsm->thread ? "thread" : "FSM",
641 silc_fsm_continue_sync(p->fsm);
643 silc_fsm_sema_unref(p->sema);
647 /* Increase semaphore */
649 void silc_fsm_sema_post(SilcFSMSema sema)
653 SilcMutex lock = sema->fsm->u.m.lock;
655 SILC_LOG_DEBUG(("Posting semaphore %p", sema));
657 silc_mutex_lock(lock);
660 silc_list_start(sema->waiters);
661 while ((fsm = silc_list_get(sema->waiters)) != SILC_LIST_END) {
663 silc_schedule_task_del_by_all(fsm->schedule, 0, silc_fsm_sema_timedout,
668 p = silc_calloc(1, sizeof(*p));
673 silc_fsm_sema_ref(sema);
675 /* Signal through scheduler. Wake up destination scheduler in case
676 caller is a real thread. */
677 silc_schedule_task_add_timeout(fsm->schedule, silc_fsm_signal, p, 0, 0);
678 silc_schedule_wakeup(fsm->schedule);
681 silc_mutex_unlock(lock);
684 /* Post thread termination semaphore. Special function used only to
685 signal thread termination when SILC_FSM_THREAD_WAIT was used. */
687 static void silc_fsm_thread_termination_post(SilcFSMSema sema)
690 SilcMutex lock = sema->fsm->u.m.lock;
692 SILC_LOG_DEBUG(("Post thread terminate semaphore %p", sema));
694 silc_mutex_lock(lock);
696 silc_list_start(sema->waiters);
697 while ((fsm = silc_list_get(sema->waiters)) != SILC_LIST_END) {
698 /* Signal on thread termination. Wake up destination scheduler in case
699 caller is a real thread. */
700 silc_list_del(sema->waiters, fsm);
701 silc_fsm_continue(fsm);
702 silc_schedule_wakeup(fsm->schedule);
705 silc_mutex_unlock(lock);
710 static void *silc_fsm_thread(void *context)
712 SilcFSM fsm = context;
713 SilcSchedule old = fsm->schedule;
715 SILC_LOG_DEBUG(("Starting FSM thread in real thread"));
717 /* We allocate new SilcSchedule for the FSM, as the old SilcSchedule
718 cannot be used in this thread. Application may still use it if it
719 wants but we use our own. */
720 fsm->schedule = silc_schedule_init(0, old);
724 /* Start the FSM thread */
725 if (!silc_schedule_task_add_timeout(fsm->schedule, silc_fsm_run, fsm, 0, 0))
728 /* Run the scheduler */
729 silc_schedule(fsm->schedule);
732 silc_schedule_uninit(fsm->schedule);
736 /* Finish the FSM thread in the main thread */
737 SILC_ASSERT(fsm->finished);
738 silc_schedule_task_add_timeout(fsm->schedule, silc_fsm_finish_fsm,
740 silc_schedule_wakeup(fsm->schedule);