5 Author: Pekka Riikonen <priikone@silcnet.org>
7 Copyright (C) 2005 - 2006 Pekka Riikonen
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
22 SILC_TASK_CALLBACK(silc_fsm_run);
23 SILC_TASK_CALLBACK(silc_fsm_finish);
24 SILC_TASK_CALLBACK(silc_fsm_sema_timedout);
25 SILC_TASK_CALLBACK(silc_fsm_start_real_thread);
26 static void *silc_fsm_thread(void *context);
27 static void silc_fsm_thread_termination_post(SilcFSMSema sema);
28 static void silc_fsm_sema_ref(SilcFSMSema sema);
29 static void silc_fsm_sema_unref(SilcFSMSema sema);
33 SilcFSM silc_fsm_alloc(void *fsm_context,
34 SilcFSMDestructor destructor,
35 void *destructor_context,
36 SilcSchedule schedule)
40 fsm = silc_calloc(1, sizeof(*fsm));
44 if (!silc_fsm_init(fsm, fsm_context, destructor,
45 destructor_context, schedule)) {
55 SilcBool silc_fsm_init(SilcFSM fsm,
57 SilcFSMDestructor destructor,
58 void *destructor_context,
59 SilcSchedule schedule)
64 fsm->fsm_context = fsm_context;
65 fsm->state_context = NULL;
66 fsm->destructor = destructor;
67 fsm->destructor_context = destructor_context;
68 fsm->schedule = schedule;
70 fsm->async_call = FALSE;
77 /* Allocate FSM thread. Internally machine and thread use same context. */
79 SilcFSMThread silc_fsm_thread_alloc(SilcFSM fsm,
81 SilcFSMThreadDestructor destructor,
82 void *destructor_context,
87 thread = silc_calloc(1, sizeof(*thread));
91 silc_fsm_thread_init(thread, fsm, thread_context, destructor,
92 destructor_context, real_thread);
96 /* Initialize FSM thread. Internally machine and thread use same context. */
98 void silc_fsm_thread_init(SilcFSMThread thread,
100 void *thread_context,
101 SilcFSMThreadDestructor destructor,
102 void *destructor_context,
103 SilcBool real_thread)
105 SILC_LOG_DEBUG(("Initializing new thread %p (%s)",
106 thread, real_thread ? "real" : "FSM"));
108 #if defined(SILC_DEBUG)
109 SILC_ASSERT(!fsm->thread);
110 #endif /* SILC_DEBUG */
112 thread->fsm_context = thread_context;
113 thread->state_context = NULL;
114 thread->destructor = (SilcFSMDestructor)destructor;
115 thread->destructor_context = destructor_context;
116 thread->schedule = fsm->schedule;
117 thread->thread = TRUE;
118 thread->async_call = FALSE;
119 thread->real_thread = real_thread;
120 thread->u.t.fsm = fsm;
125 /* Allocate lock for the machine if using real threads. */
126 if (real_thread && !fsm->u.m.lock)
127 if (!silc_mutex_alloc(&fsm->u.m.lock))
128 thread->real_thread = FALSE;
131 /* FSM is destroyed through scheduler to make sure that all dying
132 real system threads will have their finish callbacks scheduled before
133 this one (when SILC_FSM_THREAD_WAIT was used). */
135 SILC_TASK_CALLBACK(silc_fsm_free_final)
139 #if defined(SILC_DEBUG)
140 /* We must be finished */
141 SILC_ASSERT(f->finished);
143 /* Machine must not have active threads */
144 if (!f->thread && f->u.m.threads)
145 SILC_ASSERT(f->u.m.threads == 0);
146 #endif /* SILC_DEBUG */
148 if (!f->thread && f->u.m.lock)
149 silc_mutex_free(f->u.m.lock);
151 if (f->thread && f->u.t.sema)
152 silc_fsm_sema_free(f->u.t.sema);
159 void silc_fsm_free(void *fsm)
162 silc_schedule_task_add_timeout(f->schedule, silc_fsm_free_final, f, 0, 1);
165 /* Task to start real thread. We start threads through scheduler, not
166 directly in silc_fsm_start. */
168 SILC_TASK_CALLBACK(silc_fsm_start_real_thread)
173 if (silc_thread_create(silc_fsm_thread, f, FALSE))
175 #endif /* SILC_THREADS */
177 SILC_LOG_DEBUG(("Could not create real thread, using normal FSM thread"));
179 /* Normal FSM operation */
180 f->real_thread = FALSE;
181 silc_fsm_continue_sync(f);
184 /* Start FSM in the specified state */
186 void silc_fsm_start(void *fsm, SilcFSMStateCallback start_state)
190 SILC_LOG_DEBUG(("Starting %s %p", f->thread ? "thread" : "FSM", fsm));
193 f->next_state = start_state;
194 f->synchronous = FALSE;
196 /* Start real thread through scheduler */
197 if (f->thread && f->real_thread) {
198 silc_schedule_task_add_timeout(f->schedule, silc_fsm_start_real_thread,
203 /* Normal FSM operation */
204 silc_schedule_task_add_timeout(f->schedule, silc_fsm_run, f, 0, 1);
207 /* Start FSM in the specified state synchronously */
209 void silc_fsm_start_sync(void *fsm, SilcFSMStateCallback start_state)
213 SILC_LOG_DEBUG(("Starting %s %p", f->thread ? "thread" : "FSM", fsm));
216 f->next_state = start_state;
217 f->synchronous = TRUE;
219 /* Start real thread directly */
220 if (f->thread && f->real_thread) {
221 silc_fsm_start_real_thread(f->schedule,
222 silc_schedule_get_context(f->schedule),
227 /* Normal FSM operation */
228 silc_fsm_run(f->schedule, silc_schedule_get_context(f->schedule), 0, 0, f);
231 /* Set next FSM state */
233 void silc_fsm_next(void *fsm, SilcFSMStateCallback next_state)
236 f->next_state = next_state;
239 /* Continue after timeout */
241 void silc_fsm_next_later(void *fsm, SilcFSMStateCallback next_state,
242 SilcUInt32 seconds, SilcUInt32 useconds)
245 f->next_state = next_state;
246 if (!seconds && !useconds)
248 silc_schedule_task_add_timeout(f->schedule, silc_fsm_run, f,
252 /* Continue after callback or async operation */
254 void silc_fsm_continue(void *fsm)
257 silc_schedule_task_add_timeout(f->schedule, silc_fsm_run, f, 0, 1);
260 /* Continue after callback or async operation immediately */
262 void silc_fsm_continue_sync(void *fsm)
265 silc_fsm_run(f->schedule, silc_schedule_get_context(f->schedule), 0, 0, f);
268 /* Return associated scheduler */
270 SilcSchedule silc_fsm_get_schedule(void *fsm)
276 /* Return thread's machine */
278 SilcFSM silc_fsm_get_machine(SilcFSMThread thread)
280 SILC_ASSERT(thread->thread);
281 return (SilcFSM)thread->u.t.fsm;
286 void silc_fsm_set_context(void *fsm, void *fsm_context)
289 f->fsm_context = fsm_context;
294 void *silc_fsm_get_context(void *fsm)
297 return f->fsm_context;
300 /* Set state context */
302 void silc_fsm_set_state_context(void *fsm, void *state_context)
305 f->state_context = state_context;
308 /* Get state context */
310 void *silc_fsm_get_state_context(void *fsm)
313 return f->state_context;
316 /* Wait for thread to terminate */
318 SilcBool silc_fsm_thread_wait(void *fsm, void *thread)
322 #if defined(SILC_DEBUG)
323 SILC_ASSERT(t->thread);
324 #endif /* SILC_DEBUG */
328 t->u.t.sema = silc_fsm_sema_alloc(t->u.t.fsm, 0);
332 SILC_LOG_DEBUG(("Waiting for thread %p to terminate", thread));
333 silc_fsm_sema_wait(t->u.t.sema, fsm);
339 SILC_TASK_CALLBACK(silc_fsm_run)
341 SilcFSM fsm = context;
342 SilcFSMStatus status;
344 SILC_LOG_DEBUG(("Running %s %p", fsm->thread ? "thread" : "FSM", fsm));
348 status = fsm->next_state(fsm, fsm->fsm_context, fsm->state_context);
349 while (status == SILC_FSM_CONTINUE);
353 /* Continue through scheduler */
354 silc_fsm_continue(fsm);
358 /* The machine is in hold */
359 SILC_LOG_DEBUG(("State wait %p", fsm));
360 fsm->synchronous = FALSE;
363 case SILC_FSM_FINISH:
364 /* Finish the state machine */
365 SILC_LOG_DEBUG(("State finish %p", fsm));
366 #if defined(SILC_DEBUG)
367 SILC_ASSERT(!fsm->finished);
368 #endif /* SILC_DEBUG */
369 fsm->finished = TRUE;
371 /* If we are thread and using real threads, the FSM thread will finish
372 after the real thread has finished, in the main thread. */
373 if (fsm->thread && fsm->real_thread) {
374 silc_schedule_stop(fsm->schedule);
378 /* Normal FSM operation */
379 if (fsm->synchronous)
380 silc_fsm_finish(fsm->schedule, app_context, 0, 0, fsm);
382 silc_schedule_task_add_timeout(fsm->schedule, silc_fsm_finish,
391 /* Finishes the FSM. This is always executed in the main thread, even
392 for FSM threads that were run in real threads. */
394 SILC_TASK_CALLBACK(silc_fsm_finish)
396 SilcFSM fsm = context;
398 SILC_LOG_DEBUG(("%s %p, is finished", fsm->thread ? "Thread" : "FSM", fsm));
400 fsm->next_state = NULL;
403 /* This is thread, send signal */
405 silc_fsm_thread_termination_post(fsm->u.t.sema);
406 silc_fsm_sema_free(fsm->u.t.sema);
407 fsm->u.t.sema = NULL;
410 /* Remove the thread from machine */
411 fsm->u.t.fsm->u.m.threads--;
413 /* Call the destructor callback only if the underlaying machine is
415 if (fsm->destructor && fsm->u.t.fsm->finished == FALSE)
416 fsm->destructor(fsm, fsm->fsm_context, fsm->destructor_context);
420 silc_mutex_free(fsm->u.m.lock);
421 fsm->u.m.lock = NULL;
424 /* Call the destructor callback. */
426 fsm->destructor(fsm, fsm->fsm_context, fsm->destructor_context);
430 /* Allocate FSM semaphore */
432 SilcFSMSema silc_fsm_sema_alloc(SilcFSM fsm, SilcUInt32 value)
436 sema = silc_calloc(1, sizeof(*sema));
440 silc_fsm_sema_init(sema, fsm, value);
441 sema->allocated = TRUE;
446 /* Initializes FSM semaphore */
448 void silc_fsm_sema_init(SilcFSMSema sema, SilcFSM fsm, SilcUInt32 value)
450 SILC_LOG_DEBUG(("Initializing semaphore %p", sema));
451 #if defined(SILC_DEBUG)
452 SILC_ASSERT(!fsm->thread);
453 #endif /* SILC_DEBUG */
454 memset(sema, 0, sizeof(*sema));
457 silc_list_init(sema->waiters, struct SilcFSMObject, next);
463 void silc_fsm_sema_free(SilcFSMSema sema)
465 if (sema->refcnt > 0)
467 if (silc_list_count(sema->waiters) > 0)
472 /* Reference semaphore */
474 static void silc_fsm_sema_ref(SilcFSMSema sema)
479 /* Unreference semaphore */
481 static void silc_fsm_sema_unref(SilcFSMSema sema)
484 if (sema->refcnt == 0 && sema->allocated)
485 silc_fsm_sema_free(sema);
488 /* Wait until semaphore is non-zero. */
490 SilcUInt32 silc_fsm_sema_wait(SilcFSMSema sema, void *fsm)
492 SilcMutex lock = sema->fsm->u.m.lock;
494 silc_mutex_lock(lock);
497 #if defined(SILC_DEBUG)
499 silc_list_start(sema->waiters);
500 while ((entry = silc_list_get(sema->waiters)) != SILC_LIST_END)
501 SILC_ASSERT(entry != fsm);
502 #endif /* SILC_DEBUG */
504 SILC_LOG_DEBUG(("Waiting for semaphore %p", sema));
506 /* Add the FSM to waiter list */
507 silc_list_add(sema->waiters, fsm);
508 silc_mutex_unlock(lock);
512 SILC_LOG_DEBUG(("Acquired semaphore %p", sema));
514 /* It is possible that this FSM is in the list so remove it */
515 silc_list_del(sema->waiters, fsm);
517 silc_mutex_unlock(lock);
521 /* Wait util semaphore is non-zero, or timeout occurs. */
523 SilcUInt32 silc_fsm_sema_timedwait(SilcFSMSema sema, void *fsm,
524 SilcUInt32 seconds, SilcUInt32 useconds,
527 SilcMutex lock = sema->fsm->u.m.lock;
531 silc_mutex_lock(lock);
533 if (f->sema_timedout) {
534 SILC_LOG_DEBUG(("Semaphore was timedout"));
535 f->sema_timedout = FALSE;
538 silc_mutex_unlock(lock);
542 silc_mutex_unlock(lock);
544 value = silc_fsm_sema_wait(sema, fsm);
546 silc_schedule_task_add_timeout(f->schedule, silc_fsm_sema_timedout,
547 f, seconds, useconds);
557 /* Semaphore timedout */
559 SILC_TASK_CALLBACK(silc_fsm_sema_timedout)
561 SilcFSM fsm = context;
562 SilcMutex lock = fsm->sema->fsm->u.m.lock;
564 SILC_LOG_DEBUG(("Semaphore %p timedout", fsm->sema));
566 /* Remove the waiter from the semaphore */
567 silc_mutex_lock(lock);
568 silc_list_del(fsm->sema->waiters, fsm);
572 silc_fsm_continue(fsm);
573 fsm->sema_timedout = TRUE;
577 silc_mutex_unlock(lock);
580 /* Signalled, semaphore */
582 SILC_TASK_CALLBACK(silc_fsm_signal)
584 SilcFSMSemaPost p = context;
585 SilcMutex lock = p->sema->fsm->u.m.lock;
587 /* If the semaphore value has went to zero while we've been waiting this
588 callback, sempahore has been been signalled already. It can happen
589 when using real threads because the FSM may not be waiting state when
590 the sempahore is posted. */
591 silc_mutex_lock(lock);
592 if (!p->sema->value) {
593 silc_mutex_unlock(lock);
594 silc_fsm_sema_unref(p->sema);
598 silc_mutex_unlock(lock);
600 SILC_LOG_DEBUG(("Signalled %s %p", p->fsm->thread ? "thread" : "FSM",
604 silc_fsm_continue_sync(p->fsm);
606 silc_fsm_sema_unref(p->sema);
610 /* Increase semaphore */
612 void silc_fsm_sema_post(SilcFSMSema sema)
616 SilcMutex lock = sema->fsm->u.m.lock;
618 SILC_LOG_DEBUG(("Posting semaphore %p", sema));
620 silc_mutex_lock(lock);
623 silc_list_start(sema->waiters);
624 while ((fsm = silc_list_get(sema->waiters)) != SILC_LIST_END) {
626 silc_schedule_task_del_by_all(fsm->schedule, 0, silc_fsm_sema_timedout,
631 p = silc_calloc(1, sizeof(*p));
636 silc_fsm_sema_ref(sema);
638 /* Signal through scheduler. Wake up destination scheduler in case
639 caller is a real thread. */
640 silc_schedule_task_add_timeout(fsm->schedule, silc_fsm_signal, p, 0, 1);
641 silc_schedule_wakeup(fsm->schedule);
644 silc_mutex_unlock(lock);
647 /* Post thread termination semaphore. Special function used only to
648 signal thread termination when SILC_FSM_THREAD_WAIT was used. */
650 static void silc_fsm_thread_termination_post(SilcFSMSema sema)
653 SilcMutex lock = sema->fsm->u.m.lock;
655 SILC_LOG_DEBUG(("Post thread terminate semaphore %p", sema));
657 silc_mutex_lock(lock);
659 silc_list_start(sema->waiters);
660 while ((fsm = silc_list_get(sema->waiters)) != SILC_LIST_END) {
661 /* Signal on thread termination. Wake up destination scheduler in case
662 caller is a real thread. */
663 silc_list_del(sema->waiters, fsm);
664 silc_fsm_continue(fsm);
665 silc_schedule_wakeup(fsm->schedule);
668 silc_mutex_unlock(lock);
673 static void *silc_fsm_thread(void *context)
675 SilcFSM fsm = context;
676 SilcSchedule old = fsm->schedule;
678 SILC_LOG_DEBUG(("Starting FSM thread in real thread"));
680 /* We allocate new SilcSchedule for the FSM, as the old SilcSchedule
681 cannot be used in this thread. Application may still use it if it
682 wants but we use our own. */
683 fsm->schedule = silc_schedule_init(0, old);
687 /* Start the FSM thread */
688 if (!silc_schedule_task_add_timeout(fsm->schedule, silc_fsm_run, fsm, 0, 1))
691 /* Run the scheduler */
692 silc_schedule(fsm->schedule);
695 silc_schedule_uninit(fsm->schedule);
699 /* Finish the FSM thread in the main thread */
700 SILC_ASSERT(fsm->finished);
701 silc_schedule_task_add_timeout(fsm->schedule, silc_fsm_finish, fsm, 0, 1);
702 silc_schedule_wakeup(fsm->schedule);