5 Author: Pekka Riikonen <priikone@silcnet.org>
7 Copyright (C) 2005 - 2006 Pekka Riikonen
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
22 SILC_TASK_CALLBACK(silc_fsm_run);
23 SILC_TASK_CALLBACK(silc_fsm_finish);
24 SILC_TASK_CALLBACK(silc_fsm_sema_timedout);
25 SILC_TASK_CALLBACK(silc_fsm_start_real_thread);
26 static void *silc_fsm_thread(void *context);
27 static void silc_fsm_thread_termination_post(SilcFSMSema sema);
28 static void silc_fsm_sema_ref(SilcFSMSema sema);
29 static void silc_fsm_sema_unref(SilcFSMSema sema);
33 SilcFSM silc_fsm_alloc(void *fsm_context,
34 SilcFSMDestructor destructor,
35 void *destructor_context,
36 SilcSchedule schedule)
40 fsm = silc_calloc(1, sizeof(*fsm));
44 if (!silc_fsm_init(fsm, fsm_context, destructor,
45 destructor_context, schedule)) {
55 SilcBool silc_fsm_init(SilcFSM fsm,
57 SilcFSMDestructor destructor,
58 void *destructor_context,
59 SilcSchedule schedule)
64 fsm->fsm_context = fsm_context;
65 fsm->state_context = NULL;
66 fsm->destructor = destructor;
67 fsm->destructor_context = destructor_context;
68 fsm->schedule = schedule;
70 fsm->async_call = FALSE;
77 /* Allocate FSM thread. Internally machine and thread use same context. */
79 SilcFSMThread silc_fsm_thread_alloc(SilcFSM fsm,
81 SilcFSMThreadDestructor destructor,
82 void *destructor_context,
87 thread = silc_calloc(1, sizeof(*thread));
91 silc_fsm_thread_init(thread, fsm, thread_context, destructor,
92 destructor_context, real_thread);
96 /* Initialize FSM thread. Internally machine and thread use same context. */
98 void silc_fsm_thread_init(SilcFSMThread thread,
100 void *thread_context,
101 SilcFSMThreadDestructor destructor,
102 void *destructor_context,
103 SilcBool real_thread)
105 SILC_LOG_DEBUG(("Initializing new thread %p (%s)",
106 thread, real_thread ? "real" : "FSM"));
108 #if defined(SILC_DEBUG)
109 assert(!fsm->thread);
110 #endif /* SILC_DEBUG */
112 thread->fsm_context = thread_context;
113 thread->state_context = NULL;
114 thread->destructor = (SilcFSMDestructor)destructor;
115 thread->destructor_context = destructor_context;
116 thread->schedule = fsm->schedule;
117 thread->thread = TRUE;
118 thread->async_call = FALSE;
119 thread->real_thread = real_thread;
120 thread->u.t.fsm = fsm;
125 /* Allocate lock for the machine if using real threads. */
126 if (real_thread && !fsm->u.m.lock)
127 if (!silc_mutex_alloc(&fsm->u.m.lock))
128 thread->real_thread = FALSE;
131 /* FSM is destroyed through scheduler to make sure that all dying
132 real system threads will have their finish callbacks scheduled before
133 this one (when SILC_FSM_THREAD_WAIT was used). */
135 SILC_TASK_CALLBACK(silc_fsm_free_final)
139 #if defined(SILC_DEBUG)
140 /* We must be finished */
143 /* Machine must not have active threads */
144 if (!f->thread && f->u.m.threads)
145 assert(f->u.m.threads == 0);
146 #endif /* SILC_DEBUG */
148 if (!f->thread && f->u.m.lock)
149 silc_mutex_free(f->u.m.lock);
151 if (f->thread && f->u.t.sema)
152 silc_fsm_sema_free(f->u.t.sema);
159 void silc_fsm_free(void *fsm)
162 silc_schedule_task_add_timeout(f->schedule, silc_fsm_free_final, f, 0, 1);
165 /* Task to start real thread. We start threads through scheduler, not
166 directly in silc_fsm_start. */
168 SILC_TASK_CALLBACK(silc_fsm_start_real_thread)
173 if (silc_thread_create(silc_fsm_thread, f, FALSE))
175 #endif /* SILC_THREADS */
177 SILC_LOG_DEBUG(("Could not create real thread, using normal FSM thread"));
179 /* Normal FSM operation */
180 f->real_thread = FALSE;
181 silc_fsm_continue_sync(f);
184 /* Start FSM in the specified state */
186 void silc_fsm_start(void *fsm, SilcFSMStateCallback start_state)
190 SILC_LOG_DEBUG(("Starting %s %p", f->thread ? "thread" : "FSM", fsm));
193 f->next_state = start_state;
194 f->synchronous = FALSE;
196 /* Start real thread through scheduler */
197 if (f->thread && f->real_thread) {
198 silc_schedule_task_add_timeout(f->schedule, silc_fsm_start_real_thread,
203 /* Normal FSM operation */
204 silc_schedule_task_add_timeout(f->schedule, silc_fsm_run, f, 0, 1);
207 /* Start FSM in the specified state synchronously */
209 void silc_fsm_start_sync(void *fsm, SilcFSMStateCallback start_state)
213 SILC_LOG_DEBUG(("Starting %s %p", f->thread ? "thread" : "FSM", fsm));
216 f->next_state = start_state;
217 f->synchronous = TRUE;
219 /* Start real thread directly */
220 if (f->thread && f->real_thread) {
221 silc_fsm_start_real_thread(f->schedule,
222 silc_schedule_get_context(f->schedule),
227 /* Normal FSM operation */
228 silc_fsm_run(f->schedule, silc_schedule_get_context(f->schedule), 0, 0, f);
231 /* Set next FSM state */
233 void silc_fsm_next(void *fsm, SilcFSMStateCallback next_state)
236 f->next_state = next_state;
239 /* Continue after timeout */
241 void silc_fsm_next_later(void *fsm, SilcFSMStateCallback next_state,
242 SilcUInt32 seconds, SilcUInt32 useconds)
245 f->next_state = next_state;
246 if (!seconds && !useconds)
248 silc_schedule_task_add_timeout(f->schedule, silc_fsm_run, f,
252 /* Continue after callback or async operation */
254 void silc_fsm_continue(void *fsm)
257 silc_schedule_task_add_timeout(f->schedule, silc_fsm_run, f, 0, 1);
260 /* Continue after callback or async operation immediately */
262 void silc_fsm_continue_sync(void *fsm)
265 silc_fsm_run(f->schedule, silc_schedule_get_context(f->schedule), 0, 0, f);
268 /* Return associated scheduler */
270 SilcSchedule silc_fsm_get_schedule(void *fsm)
276 /* Return thread's machine */
278 SilcFSM silc_fsm_get_machine(SilcFSMThread thread)
280 assert(thread->thread);
281 return (SilcFSM)thread->u.t.fsm;
286 void silc_fsm_set_context(void *fsm, void *fsm_context)
289 f->fsm_context = fsm_context;
294 void *silc_fsm_get_context(void *fsm)
297 return f->fsm_context;
300 /* Set state context */
302 void silc_fsm_set_state_context(void *fsm, void *state_context)
305 f->state_context = state_context;
308 /* Get state context */
310 void *silc_fsm_get_state_context(void *fsm)
313 return f->state_context;
316 /* Wait for thread to terminate */
318 SilcBool silc_fsm_thread_wait(void *fsm, void *thread)
322 #if defined(SILC_DEBUG)
324 #endif /* SILC_DEBUG */
328 t->u.t.sema = silc_fsm_sema_alloc(t->u.t.fsm, 0);
332 SILC_LOG_DEBUG(("Waiting for thread %p to terminate", thread));
333 silc_fsm_sema_wait(t->u.t.sema, fsm);
339 SILC_TASK_CALLBACK(silc_fsm_run)
341 SilcFSM fsm = context;
342 SilcFSMStatus status;
344 SILC_LOG_DEBUG(("Running %s %p", fsm->thread ? "thread" : "FSM", fsm));
348 status = fsm->next_state(fsm, fsm->fsm_context, fsm->state_context);
349 while (status == SILC_FSM_CONTINUE);
353 /* Continue through scheduler */
354 silc_fsm_continue(fsm);
358 /* The machine is in hold */
359 SILC_LOG_DEBUG(("State wait %p", fsm));
360 fsm->synchronous = FALSE;
363 case SILC_FSM_FINISH:
364 /* Finish the state machine */
365 SILC_LOG_DEBUG(("State finish %p", fsm));
366 #if defined(SILC_DEBUG)
367 assert(!fsm->finished);
368 #endif /* SILC_DEBUG */
369 fsm->finished = TRUE;
371 /* If we are thread and using real threads, the FSM thread will finish
372 in the main thread, not in the created thread. */
373 if (fsm->thread && fsm->real_thread) {
374 silc_schedule_task_add_timeout(app_context, silc_fsm_finish, fsm, 0, 1);
375 silc_schedule_wakeup(app_context);
376 silc_schedule_stop(fsm->schedule);
380 /* Normal FSM operation */
381 if (fsm->synchronous)
382 silc_fsm_finish(fsm->schedule, app_context, 0, 0, fsm);
384 silc_schedule_task_add_timeout(fsm->schedule, silc_fsm_finish,
393 /* Finishes the FSM. This is always executed in the main thread, even
394 for FSM threads that were run in real threads. */
396 SILC_TASK_CALLBACK(silc_fsm_finish)
398 SilcFSM fsm = context;
400 SILC_LOG_DEBUG(("%s %p, is finished", fsm->thread ? "Thread" : "FSM", fsm));
402 fsm->next_state = NULL;
405 /* This is thread, send signal */
407 silc_fsm_thread_termination_post(fsm->u.t.sema);
408 silc_fsm_sema_free(fsm->u.t.sema);
409 fsm->u.t.sema = NULL;
412 /* Remove the thread from machine */
413 fsm->u.t.fsm->u.m.threads--;
415 /* Call the destructor callback only if the underlaying machine is
417 if (fsm->destructor && fsm->u.t.fsm->finished == FALSE)
418 fsm->destructor(fsm, fsm->fsm_context, fsm->destructor_context);
422 silc_mutex_free(fsm->u.m.lock);
423 fsm->u.m.lock = NULL;
426 /* Call the destructor callback. */
428 fsm->destructor(fsm, fsm->fsm_context, fsm->destructor_context);
432 /* Allocate FSM semaphore */
434 SilcFSMSema silc_fsm_sema_alloc(SilcFSM fsm, SilcUInt32 value)
438 sema = silc_calloc(1, sizeof(*sema));
442 silc_fsm_sema_init(sema, fsm, value);
443 sema->allocated = TRUE;
448 /* Initializes FSM semaphore */
450 void silc_fsm_sema_init(SilcFSMSema sema, SilcFSM fsm, SilcUInt32 value)
452 SILC_LOG_DEBUG(("Initializing semaphore %p", sema));
453 #if defined(SILC_DEBUG)
454 assert(!fsm->thread);
455 #endif /* SILC_DEBUG */
456 memset(sema, 0, sizeof(*sema));
459 silc_list_init(sema->waiters, struct SilcFSMObject, next);
465 void silc_fsm_sema_free(SilcFSMSema sema)
467 if (sema->refcnt > 0)
469 #if defined(SILC_DEBUG)
470 assert(silc_list_count(sema->waiters) == 0);
471 #endif /* SILC_DEBUG */
475 /* Reference semaphore */
477 static void silc_fsm_sema_ref(SilcFSMSema sema)
482 /* Unreference semaphore */
484 static void silc_fsm_sema_unref(SilcFSMSema sema)
487 if (sema->refcnt == 0 && sema->allocated)
488 silc_fsm_sema_free(sema);
491 /* Wait until semaphore is non-zero. */
493 SilcUInt32 silc_fsm_sema_wait(SilcFSMSema sema, void *fsm)
495 SilcMutex lock = sema->fsm->u.m.lock;
497 silc_mutex_lock(lock);
500 #if defined(SILC_DEBUG)
502 silc_list_start(sema->waiters);
503 while ((entry = silc_list_get(sema->waiters)) != SILC_LIST_END)
504 assert(entry != fsm);
505 #endif /* SILC_DEBUG */
507 SILC_LOG_DEBUG(("Waiting for semaphore %p", sema));
509 /* Add the FSM to waiter list */
510 silc_list_add(sema->waiters, fsm);
511 silc_mutex_unlock(lock);
515 SILC_LOG_DEBUG(("Acquired semaphore %p", sema));
517 /* It is possible that this FSM is in the list so remove it */
518 silc_list_del(sema->waiters, fsm);
520 silc_mutex_unlock(lock);
524 /* Wait util semaphore is non-zero, or timeout occurs. */
526 SilcUInt32 silc_fsm_sema_timedwait(SilcFSMSema sema, void *fsm,
527 SilcUInt32 seconds, SilcUInt32 useconds,
530 SilcMutex lock = sema->fsm->u.m.lock;
534 silc_mutex_lock(lock);
536 if (f->sema_timedout) {
537 SILC_LOG_DEBUG(("Semaphore was timedout"));
538 f->sema_timedout = FALSE;
541 silc_mutex_unlock(lock);
545 silc_mutex_unlock(lock);
547 value = silc_fsm_sema_wait(sema, fsm);
549 silc_schedule_task_add_timeout(f->schedule, silc_fsm_sema_timedout,
550 f, seconds, useconds);
560 /* Semaphore timedout */
562 SILC_TASK_CALLBACK(silc_fsm_sema_timedout)
564 SilcFSM fsm = context;
565 SilcMutex lock = fsm->sema->fsm->u.m.lock;
567 SILC_LOG_DEBUG(("Semaphore %p timedout", fsm->sema));
569 /* Remove the waiter from the semaphore */
570 silc_mutex_lock(lock);
571 silc_list_del(fsm->sema->waiters, fsm);
575 silc_fsm_continue(fsm);
576 fsm->sema_timedout = TRUE;
580 silc_mutex_unlock(lock);
583 /* Signalled, semaphore */
585 SILC_TASK_CALLBACK(silc_fsm_signal)
587 SilcFSMSemaPost p = context;
588 SilcMutex lock = p->sema->fsm->u.m.lock;
590 /* If the semaphore value has went to zero while we've been waiting this
591 callback, sempahore has been been signalled already. It can happen
592 when using real threads because the FSM may not be waiting state when
593 the sempahore is posted. */
594 silc_mutex_lock(lock);
595 if (!p->sema->value) {
596 silc_mutex_unlock(lock);
597 silc_fsm_sema_unref(p->sema);
601 silc_mutex_unlock(lock);
603 SILC_LOG_DEBUG(("Signalled %s %p", p->fsm->thread ? "thread" : "FSM",
607 silc_fsm_continue_sync(p->fsm);
609 silc_fsm_sema_unref(p->sema);
613 /* Increase semaphore */
615 void silc_fsm_sema_post(SilcFSMSema sema)
619 SilcMutex lock = sema->fsm->u.m.lock;
621 SILC_LOG_DEBUG(("Posting semaphore %p", sema));
623 silc_mutex_lock(lock);
626 silc_list_start(sema->waiters);
627 while ((fsm = silc_list_get(sema->waiters)) != SILC_LIST_END) {
629 silc_schedule_task_del_by_all(fsm->schedule, 0, silc_fsm_sema_timedout,
634 p = silc_calloc(1, sizeof(*p));
639 silc_fsm_sema_ref(sema);
641 /* Signal through scheduler. Wake up destination scheduler in case
642 caller is a real thread. */
643 silc_schedule_task_add_timeout(fsm->schedule, silc_fsm_signal, p, 0, 1);
644 silc_schedule_wakeup(fsm->schedule);
647 silc_mutex_unlock(lock);
650 /* Post thread termination semaphore. Special function used only to
651 signal thread termination when SILC_FSM_THREAD_WAIT was used. */
653 static void silc_fsm_thread_termination_post(SilcFSMSema sema)
656 SilcMutex lock = sema->fsm->u.m.lock;
658 SILC_LOG_DEBUG(("Post thread terminate semaphore %p", sema));
660 silc_mutex_lock(lock);
662 silc_list_start(sema->waiters);
663 while ((fsm = silc_list_get(sema->waiters)) != SILC_LIST_END) {
664 /* Signal on thread termination. Wake up destination scheduler in case
665 caller is a real thread. */
666 silc_list_del(sema->waiters, fsm);
667 silc_fsm_continue(fsm);
668 silc_schedule_wakeup(fsm->schedule);
671 silc_mutex_unlock(lock);
676 static void *silc_fsm_thread(void *context)
678 SilcFSM fsm = context;
679 SilcSchedule old = fsm->schedule;
681 SILC_LOG_DEBUG(("Starting FSM thread in real thread"));
683 /* We allocate new SilcSchedule for the FSM, as the old SilcSchedule
684 cannot be used in this thread. Application may still use it if it
685 wants but we use our own. */
686 fsm->schedule = silc_schedule_init(0, old);
690 /* Start the FSM thread */
691 if (!silc_schedule_task_add_timeout(fsm->schedule, silc_fsm_run, fsm, 0, 1))
694 /* Run the scheduler */
695 silc_schedule(fsm->schedule);
698 silc_schedule_uninit(fsm->schedule);