5 Author: Pekka Riikonen <priikone@silcnet.org>
7 Copyright (C) 2005 - 2006 Pekka Riikonen
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
22 SILC_TASK_CALLBACK(silc_fsm_run);
23 SILC_TASK_CALLBACK(silc_fsm_finish);
24 SILC_TASK_CALLBACK(silc_fsm_sema_timedout);
25 SILC_TASK_CALLBACK(silc_fsm_start_real_thread);
26 static void *silc_fsm_thread(void *context);
27 static void silc_fsm_thread_termination_post(SilcFSMSema sema);
28 static void silc_fsm_sema_ref(SilcFSMSema sema);
29 static void silc_fsm_sema_unref(SilcFSMSema sema);
33 SilcFSM silc_fsm_alloc(void *fsm_context,
34 SilcFSMDestructor destructor,
35 void *destructor_context,
36 SilcSchedule schedule)
40 fsm = silc_calloc(1, sizeof(*fsm));
44 if (!silc_fsm_init(fsm, fsm_context, destructor,
45 destructor_context, schedule)) {
55 SilcBool silc_fsm_init(SilcFSM fsm,
57 SilcFSMDestructor destructor,
58 void *destructor_context,
59 SilcSchedule schedule)
64 fsm->fsm_context = fsm_context;
65 fsm->state_context = NULL;
66 fsm->destructor = destructor;
67 fsm->destructor_context = destructor_context;
68 fsm->schedule = schedule;
70 fsm->async_call = FALSE;
77 /* Allocate FSM thread. Internally machine and thread use same context. */
79 SilcFSMThread silc_fsm_thread_alloc(SilcFSM fsm,
81 SilcFSMThreadDestructor destructor,
82 void *destructor_context,
87 thread = silc_calloc(1, sizeof(*thread));
91 silc_fsm_thread_init(thread, fsm, thread_context, destructor,
92 destructor_context, real_thread);
96 /* Initialize FSM thread. Internally machine and thread use same context. */
98 void silc_fsm_thread_init(SilcFSMThread thread,
100 void *thread_context,
101 SilcFSMThreadDestructor destructor,
102 void *destructor_context,
103 SilcBool real_thread)
105 SILC_LOG_DEBUG(("Initializing new thread %p (%s)",
106 thread, real_thread ? "real" : "FSM"));
108 #if defined(SILC_DEBUG)
109 assert(!fsm->thread);
110 #endif /* SILC_DEBUG */
112 thread->fsm_context = thread_context;
113 thread->state_context = NULL;
114 thread->destructor = (SilcFSMDestructor)destructor;
115 thread->destructor_context = destructor_context;
116 thread->schedule = fsm->schedule;
117 thread->thread = TRUE;
118 thread->async_call = FALSE;
119 thread->real_thread = real_thread;
120 thread->u.t.fsm = fsm;
125 /* Allocate lock for the machine if using real threads. */
126 if (real_thread && !fsm->u.m.lock)
127 if (!silc_mutex_alloc(&fsm->u.m.lock))
128 thread->real_thread = FALSE;
131 /* FSM is destroyed through scheduler to make sure that all dying
132 real system threads will have their finish callbacks scheduled before
133 this one (when SILC_FSM_THREAD_WAIT was used). */
135 SILC_TASK_CALLBACK(silc_fsm_free_final)
139 #if defined(SILC_DEBUG)
140 /* We must be finished */
143 /* Machine must not have active threads */
144 if (!f->thread && f->u.m.threads)
145 assert(f->u.m.threads == 0);
146 #endif /* SILC_DEBUG */
148 if (!f->thread && f->u.m.lock)
149 silc_mutex_free(f->u.m.lock);
151 if (f->thread && f->u.t.sema)
152 silc_fsm_sema_free(f->u.t.sema);
159 void silc_fsm_free(void *fsm)
162 silc_schedule_task_add_timeout(f->schedule, silc_fsm_free_final, f, 0, 1);
165 /* Task to start real thread. We start threads through scheduler, not
166 directly in silc_fsm_start. */
168 SILC_TASK_CALLBACK(silc_fsm_start_real_thread)
173 if (silc_thread_create(silc_fsm_thread, f, FALSE))
175 #endif /* SILC_THREADS */
177 SILC_LOG_DEBUG(("Could not create real thread, using normal FSM thread"));
179 /* Normal FSM operation */
180 f->real_thread = FALSE;
181 silc_fsm_continue_sync(f);
184 /* Start FSM in the specified state */
186 void silc_fsm_start(void *fsm, SilcFSMStateCallback start_state)
190 SILC_LOG_DEBUG(("Starting %s %p", f->thread ? "thread" : "FSM", fsm));
193 f->next_state = start_state;
194 f->synchronous = FALSE;
196 /* Start real thread through scheduler */
197 if (f->thread && f->real_thread) {
198 silc_schedule_task_add_timeout(f->schedule, silc_fsm_start_real_thread,
203 /* Normal FSM operation */
204 silc_schedule_task_add_timeout(f->schedule, silc_fsm_run, f, 0, 1);
207 /* Start FSM in the specified state synchronously */
209 void silc_fsm_start_sync(void *fsm, SilcFSMStateCallback start_state)
213 SILC_LOG_DEBUG(("Starting %s %p", f->thread ? "thread" : "FSM", fsm));
216 f->next_state = start_state;
217 f->synchronous = TRUE;
219 /* Start real thread directly */
220 if (f->thread && f->real_thread) {
221 silc_fsm_start_real_thread(f->schedule,
222 silc_schedule_get_context(f->schedule),
227 /* Normal FSM operation */
228 silc_fsm_run(f->schedule, silc_schedule_get_context(f->schedule), 0, 0, f);
231 /* Set next FSM state */
233 void silc_fsm_next(void *fsm, SilcFSMStateCallback next_state)
236 f->next_state = next_state;
239 /* Continue after timeout */
241 void silc_fsm_next_later(void *fsm, SilcFSMStateCallback next_state,
242 SilcUInt32 seconds, SilcUInt32 useconds)
245 f->next_state = next_state;
246 if (!seconds && !useconds)
248 silc_schedule_task_add_timeout(f->schedule, silc_fsm_run, f,
252 /* Continue after callback or async operation */
254 void silc_fsm_continue(void *fsm)
257 silc_schedule_task_add_timeout(f->schedule, silc_fsm_run, f, 0, 1);
260 /* Continue after callback or async operation immediately */
262 void silc_fsm_continue_sync(void *fsm)
265 silc_fsm_run(f->schedule, silc_schedule_get_context(f->schedule), 0, 0, f);
268 /* Return associated scheduler */
270 SilcSchedule silc_fsm_get_schedule(void *fsm)
276 /* Return thread's machine */
278 SilcFSM silc_fsm_get_machine(SilcFSMThread thread)
280 assert(thread->thread);
281 return (SilcFSM)thread->u.t.fsm;
286 void silc_fsm_set_context(void *fsm, void *fsm_context)
289 f->fsm_context = fsm_context;
294 void *silc_fsm_get_context(void *fsm)
297 return f->fsm_context;
300 /* Set state context */
302 void silc_fsm_set_state_context(void *fsm, void *state_context)
305 f->state_context = state_context;
308 /* Get state context */
310 void *silc_fsm_get_state_context(void *fsm)
313 return f->state_context;
316 /* Wait for thread to terminate */
318 SilcBool silc_fsm_thread_wait(void *fsm, void *thread)
321 #if defined(SILC_DEBUG)
323 #endif /* SILC_DEBUG */
324 t->u.t.sema = silc_fsm_sema_alloc(t->u.t.fsm, 0);
327 silc_fsm_sema_wait(t->u.t.sema, fsm);
333 SILC_TASK_CALLBACK(silc_fsm_run)
335 SilcFSM fsm = context;
336 SilcFSMStatus status;
338 SILC_LOG_DEBUG(("Running %s %p", fsm->thread ? "thread" : "FSM", fsm));
342 status = fsm->next_state(fsm, fsm->fsm_context, fsm->state_context);
343 while (status == SILC_FSM_CONTINUE);
347 /* Continue through scheduler */
348 silc_fsm_continue(fsm);
352 /* The machine is in hold */
353 SILC_LOG_DEBUG(("State wait %p", fsm));
354 fsm->synchronous = FALSE;
357 case SILC_FSM_FINISH:
358 /* Finish the state machine */
359 SILC_LOG_DEBUG(("State finish %p", fsm));
360 #if defined(SILC_DEBUG)
361 assert(!fsm->finished);
362 #endif /* SILC_DEBUG */
363 fsm->finished = TRUE;
365 /* If we are thread and using real threads, the FSM thread will finish
366 in the main thread, not in the created thread. */
367 if (fsm->thread && fsm->real_thread) {
368 silc_schedule_task_add_timeout(app_context, silc_fsm_finish, fsm, 0, 1);
369 silc_schedule_wakeup(app_context);
370 silc_schedule_stop(fsm->schedule);
374 /* Normal FSM operation */
375 if (fsm->synchronous)
376 silc_fsm_finish(fsm->schedule, app_context, 0, 0, fsm);
378 silc_schedule_task_add_timeout(fsm->schedule, silc_fsm_finish,
387 /* Finishes the FSM. This is always executed in the main thread, even
388 for FSM threads that were run in real threads. */
390 SILC_TASK_CALLBACK(silc_fsm_finish)
392 SilcFSM fsm = context;
394 SILC_LOG_DEBUG(("%s %p, is finished", fsm->thread ? "Thread" : "FSM", fsm));
396 fsm->next_state = NULL;
399 /* This is thread, send signal */
401 silc_fsm_thread_termination_post(fsm->u.t.sema);
402 silc_fsm_sema_free(fsm->u.t.sema);
403 fsm->u.t.sema = NULL;
406 /* Remove the thread from machine */
407 fsm->u.t.fsm->u.m.threads--;
409 /* Call the destructor callback only if the underlaying machine is
411 if (fsm->destructor && fsm->u.t.fsm->finished == FALSE)
412 fsm->destructor(fsm, fsm->fsm_context, fsm->destructor_context);
416 silc_mutex_free(fsm->u.m.lock);
417 fsm->u.m.lock = NULL;
420 /* Call the destructor callback. */
422 fsm->destructor(fsm, fsm->fsm_context, fsm->destructor_context);
426 /* Allocate FSM semaphore */
428 SilcFSMSema silc_fsm_sema_alloc(SilcFSM fsm, SilcUInt32 value)
432 sema = silc_calloc(1, sizeof(*sema));
436 silc_fsm_sema_init(sema, fsm, value);
437 sema->allocated = TRUE;
442 /* Initializes FSM semaphore */
444 void silc_fsm_sema_init(SilcFSMSema sema, SilcFSM fsm, SilcUInt32 value)
446 SILC_LOG_DEBUG(("Initializing semaphore %p", sema));
447 #if defined(SILC_DEBUG)
448 assert(!fsm->thread);
449 #endif /* SILC_DEBUG */
450 memset(sema, 0, sizeof(*sema));
453 silc_list_init(sema->waiters, struct SilcFSMObject, next);
459 void silc_fsm_sema_free(SilcFSMSema sema)
461 if (sema->refcnt > 0)
463 #if defined(SILC_DEBUG)
464 assert(silc_list_count(sema->waiters) == 0);
465 #endif /* SILC_DEBUG */
469 /* Reference semaphore */
471 static void silc_fsm_sema_ref(SilcFSMSema sema)
476 /* Unreference semaphore */
478 static void silc_fsm_sema_unref(SilcFSMSema sema)
481 if (sema->refcnt == 0 && sema->allocated)
482 silc_fsm_sema_free(sema);
485 /* Wait until semaphore is non-zero. */
487 SilcUInt32 silc_fsm_sema_wait(SilcFSMSema sema, void *fsm)
489 SilcMutex lock = sema->fsm->u.m.lock;
491 silc_mutex_lock(lock);
494 #if defined(SILC_DEBUG)
496 silc_list_start(sema->waiters);
497 while ((entry = silc_list_get(sema->waiters)) != SILC_LIST_END)
498 assert(entry != fsm);
499 #endif /* SILC_DEBUG */
501 SILC_LOG_DEBUG(("Waiting for semaphore %p", sema));
503 /* Add the FSM to waiter list */
504 silc_list_add(sema->waiters, fsm);
505 silc_mutex_unlock(lock);
509 SILC_LOG_DEBUG(("Acquired semaphore %p", sema));
511 /* It is possible that this FSM is in the list so remove it */
512 silc_list_del(sema->waiters, fsm);
514 silc_mutex_unlock(lock);
518 /* Wait util semaphore is non-zero, or timeout occurs. */
520 SilcUInt32 silc_fsm_sema_timedwait(SilcFSMSema sema, void *fsm,
521 SilcUInt32 seconds, SilcUInt32 useconds,
524 SilcMutex lock = sema->fsm->u.m.lock;
528 silc_mutex_lock(lock);
530 if (f->sema_timedout) {
531 SILC_LOG_DEBUG(("Semaphore was timedout"));
532 f->sema_timedout = FALSE;
535 silc_mutex_unlock(lock);
539 silc_mutex_unlock(lock);
541 value = silc_fsm_sema_wait(sema, fsm);
543 silc_schedule_task_add_timeout(f->schedule, silc_fsm_sema_timedout,
544 f, seconds, useconds);
554 /* Semaphore timedout */
556 SILC_TASK_CALLBACK(silc_fsm_sema_timedout)
558 SilcFSM fsm = context;
559 SilcMutex lock = fsm->sema->fsm->u.m.lock;
561 SILC_LOG_DEBUG(("Semaphore %p timedout", fsm->sema));
563 /* Remove the waiter from the semaphore */
564 silc_mutex_lock(lock);
565 silc_list_del(fsm->sema->waiters, fsm);
569 silc_fsm_continue(fsm);
570 fsm->sema_timedout = TRUE;
574 silc_mutex_unlock(lock);
577 /* Signalled, semaphore */
579 SILC_TASK_CALLBACK(silc_fsm_signal)
581 SilcFSMSemaPost p = context;
582 SilcMutex lock = p->sema->fsm->u.m.lock;
584 /* If the semaphore value has went to zero while we've been waiting this
585 callback, sempahore has been been signalled already. It can happen
586 when using real threads because the FSM may not be waiting state when
587 the sempahore is posted. */
588 silc_mutex_lock(lock);
589 if (!p->sema->value) {
590 silc_mutex_unlock(lock);
591 silc_fsm_sema_unref(p->sema);
595 silc_mutex_unlock(lock);
597 SILC_LOG_DEBUG(("Signalled %s %p", p->fsm->thread ? "thread" : "FSM",
601 silc_fsm_continue_sync(p->fsm);
603 silc_fsm_sema_unref(p->sema);
607 /* Increase semaphore */
609 void silc_fsm_sema_post(SilcFSMSema sema)
613 SilcMutex lock = sema->fsm->u.m.lock;
615 SILC_LOG_DEBUG(("Posting semaphore %p", sema));
617 silc_mutex_lock(lock);
620 silc_list_start(sema->waiters);
621 while ((fsm = silc_list_get(sema->waiters)) != SILC_LIST_END) {
623 silc_schedule_task_del_by_all(fsm->schedule, 0, silc_fsm_sema_timedout,
628 p = silc_calloc(1, sizeof(*p));
633 silc_fsm_sema_ref(sema);
635 /* Signal through scheduler. Wake up destination scheduler in case
636 caller is a real thread. */
637 silc_schedule_task_add_timeout(fsm->schedule, silc_fsm_signal, p, 0, 1);
638 silc_schedule_wakeup(fsm->schedule);
641 silc_mutex_unlock(lock);
644 /* Post thread termination semaphore. Special function used only to
645 signal thread termination when SILC_FSM_THREAD_WAIT was used. */
647 static void silc_fsm_thread_termination_post(SilcFSMSema sema)
650 SilcMutex lock = sema->fsm->u.m.lock;
652 SILC_LOG_DEBUG(("Post thread termination semaphore %p", sema));
654 silc_mutex_lock(lock);
656 silc_list_start(sema->waiters);
657 while ((fsm = silc_list_get(sema->waiters)) != SILC_LIST_END) {
658 /* Signal on thread termination. Wake up destination scheduler in case
659 caller is a real thread. */
660 silc_list_del(sema->waiters, fsm);
661 silc_fsm_continue(fsm);
662 silc_schedule_wakeup(fsm->schedule);
665 silc_mutex_unlock(lock);
670 static void *silc_fsm_thread(void *context)
672 SilcFSM fsm = context;
673 SilcSchedule old = fsm->schedule;
675 SILC_LOG_DEBUG(("Starting FSM thread in real thread"));
677 /* We allocate new SilcSchedule for the FSM, as the old SilcSchedule
678 cannot be used in this thread. Application may still use it if it
679 wants but we use our own. */
680 fsm->schedule = silc_schedule_init(0, old);
684 /* Start the FSM thread */
685 if (!silc_schedule_task_add_timeout(fsm->schedule, silc_fsm_run, fsm, 0, 1))
688 /* Run the scheduler */
689 silc_schedule(fsm->schedule);
692 silc_schedule_uninit(fsm->schedule);