5 Author: Pekka Riikonen <priikone@silcnet.org>
7 Copyright (C) 2005 - 2006 Pekka Riikonen
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
22 SILC_TASK_CALLBACK(silc_fsm_run);
23 SILC_TASK_CALLBACK(silc_fsm_finish);
24 SILC_TASK_CALLBACK(silc_fsm_sema_timedout);
25 SILC_TASK_CALLBACK(silc_fsm_start_real_thread);
26 static void *silc_fsm_thread(void *context);
27 static void silc_fsm_thread_termination_post(SilcFSMSema sema);
28 static void silc_fsm_sema_ref(SilcFSMSema sema);
29 static void silc_fsm_sema_unref(SilcFSMSema sema);
33 SilcFSM silc_fsm_alloc(void *fsm_context,
34 SilcFSMDestructor destructor,
35 void *destructor_context,
36 SilcSchedule schedule)
40 fsm = silc_calloc(1, sizeof(*fsm));
44 if (!silc_fsm_init(fsm, fsm_context, destructor,
45 destructor_context, schedule)) {
55 SilcBool silc_fsm_init(SilcFSM fsm,
57 SilcFSMDestructor destructor,
58 void *destructor_context,
59 SilcSchedule schedule)
64 fsm->fsm_context = fsm_context;
65 fsm->state_context = NULL;
66 fsm->destructor = destructor;
67 fsm->destructor_context = destructor_context;
68 fsm->schedule = schedule;
70 fsm->async_call = FALSE;
77 /* Allocate FSM thread. Internally machine and thread use same context. */
79 SilcFSMThread silc_fsm_thread_alloc(SilcFSM fsm,
81 SilcFSMThreadDestructor destructor,
82 void *destructor_context,
87 thread = silc_calloc(1, sizeof(*thread));
91 silc_fsm_thread_init(thread, fsm, thread_context, destructor,
92 destructor_context, real_thread);
96 /* Initialize FSM thread. Internally machine and thread use same context. */
98 void silc_fsm_thread_init(SilcFSMThread thread,
100 void *thread_context,
101 SilcFSMThreadDestructor destructor,
102 void *destructor_context,
103 SilcBool real_thread)
105 SILC_LOG_DEBUG(("Initializing new thread %p (%s)",
106 thread, real_thread ? "real" : "FSM"));
108 SILC_ASSERT(!fsm->thread);
110 thread->fsm_context = thread_context;
111 thread->state_context = NULL;
112 thread->destructor = (SilcFSMDestructor)destructor;
113 thread->destructor_context = destructor_context;
114 thread->schedule = fsm->schedule;
115 thread->thread = TRUE;
116 thread->async_call = FALSE;
117 thread->real_thread = real_thread;
118 thread->u.t.fsm = fsm;
123 /* Allocate lock for the machine if using real threads. */
124 if (real_thread && !fsm->u.m.lock)
125 if (!silc_mutex_alloc(&fsm->u.m.lock))
126 thread->real_thread = FALSE;
129 /* FSM is destroyed through scheduler to make sure that all dying
130 real system threads will have their finish callbacks scheduled before
131 this one (when SILC_FSM_THREAD_WAIT was used). */
133 SILC_TASK_CALLBACK(silc_fsm_free_final)
137 #if defined(SILC_DEBUG)
138 /* We must be finished */
139 SILC_ASSERT(f->finished);
141 /* Machine must not have active threads */
142 if (!f->thread && f->u.m.threads)
143 SILC_ASSERT(f->u.m.threads == 0);
144 #endif /* SILC_DEBUG */
146 if (!f->thread && f->u.m.lock)
147 silc_mutex_free(f->u.m.lock);
149 if (f->thread && f->u.t.sema)
150 silc_fsm_sema_free(f->u.t.sema);
157 void silc_fsm_free(void *fsm)
160 silc_schedule_task_add_timeout(f->schedule, silc_fsm_free_final, f, 0, 1);
163 /* Task to start real thread. We start threads through scheduler, not
164 directly in silc_fsm_start. */
166 SILC_TASK_CALLBACK(silc_fsm_start_real_thread)
171 if (silc_thread_create(silc_fsm_thread, f, FALSE))
173 #endif /* SILC_THREADS */
175 SILC_LOG_DEBUG(("Could not create real thread, using normal FSM thread"));
177 /* Normal FSM operation */
178 f->real_thread = FALSE;
179 silc_fsm_continue_sync(f);
182 /* Start FSM in the specified state */
184 void silc_fsm_start(void *fsm, SilcFSMStateCallback start_state)
188 SILC_LOG_DEBUG(("Starting %s %p", f->thread ? "thread" : "FSM", fsm));
191 f->next_state = start_state;
192 f->synchronous = FALSE;
194 /* Start real thread through scheduler */
195 if (f->thread && f->real_thread) {
196 silc_schedule_task_add_timeout(f->schedule, silc_fsm_start_real_thread,
201 /* Normal FSM operation */
202 silc_schedule_task_add_timeout(f->schedule, silc_fsm_run, f, 0, 1);
205 /* Start FSM in the specified state synchronously */
207 void silc_fsm_start_sync(void *fsm, SilcFSMStateCallback start_state)
211 SILC_LOG_DEBUG(("Starting %s %p", f->thread ? "thread" : "FSM", fsm));
214 f->next_state = start_state;
215 f->synchronous = TRUE;
217 /* Start real thread directly */
218 if (f->thread && f->real_thread) {
219 silc_fsm_start_real_thread(f->schedule,
220 silc_schedule_get_context(f->schedule),
225 /* Normal FSM operation */
226 silc_fsm_run(f->schedule, silc_schedule_get_context(f->schedule), 0, 0, f);
229 /* Set next FSM state */
231 void silc_fsm_next(void *fsm, SilcFSMStateCallback next_state)
234 f->next_state = next_state;
237 /* Continue after timeout */
239 void silc_fsm_next_later(void *fsm, SilcFSMStateCallback next_state,
240 SilcUInt32 seconds, SilcUInt32 useconds)
243 f->next_state = next_state;
244 if (!seconds && !useconds)
246 silc_schedule_task_add_timeout(f->schedule, silc_fsm_run, f,
250 /* Continue after callback or async operation */
252 void silc_fsm_continue(void *fsm)
255 silc_schedule_task_add_timeout(f->schedule, silc_fsm_run, f, 0, 1);
258 /* Continue after callback or async operation immediately */
260 void silc_fsm_continue_sync(void *fsm)
263 silc_fsm_run(f->schedule, silc_schedule_get_context(f->schedule), 0, 0, f);
266 /* Return associated scheduler */
268 SilcSchedule silc_fsm_get_schedule(void *fsm)
274 /* Return thread's machine */
276 SilcFSM silc_fsm_get_machine(SilcFSMThread thread)
278 SILC_ASSERT(thread->thread);
279 return (SilcFSM)thread->u.t.fsm;
284 void silc_fsm_set_context(void *fsm, void *fsm_context)
287 f->fsm_context = fsm_context;
292 void *silc_fsm_get_context(void *fsm)
295 return f->fsm_context;
298 /* Set state context */
300 void silc_fsm_set_state_context(void *fsm, void *state_context)
303 f->state_context = state_context;
306 /* Get state context */
308 void *silc_fsm_get_state_context(void *fsm)
311 return f->state_context;
314 /* Wait for thread to terminate */
316 SilcBool silc_fsm_thread_wait(void *fsm, void *thread)
320 SILC_ASSERT(t->thread);
324 t->u.t.sema = silc_fsm_sema_alloc(t->u.t.fsm, 0);
328 SILC_LOG_DEBUG(("Waiting for thread %p to terminate", thread));
329 silc_fsm_sema_wait(t->u.t.sema, fsm);
335 SILC_TASK_CALLBACK(silc_fsm_run)
337 SilcFSM fsm = context;
338 SilcFSMStatus status;
340 SILC_LOG_DEBUG(("Running %s %p", fsm->thread ? "thread" : "FSM", fsm));
344 status = fsm->next_state(fsm, fsm->fsm_context, fsm->state_context);
345 while (status == SILC_FSM_CONTINUE);
349 /* Continue through scheduler */
350 silc_fsm_continue(fsm);
354 /* The machine is in hold */
355 SILC_LOG_DEBUG(("State wait %p", fsm));
356 fsm->synchronous = FALSE;
359 case SILC_FSM_FINISH:
360 /* Finish the state machine */
361 SILC_LOG_DEBUG(("State finish %p", fsm));
362 #if defined(SILC_DEBUG)
363 SILC_ASSERT(!fsm->finished);
364 #endif /* SILC_DEBUG */
365 fsm->finished = TRUE;
367 /* If we are thread and using real threads, the FSM thread will finish
368 after the real thread has finished, in the main thread. */
369 if (fsm->thread && fsm->real_thread) {
370 silc_schedule_stop(fsm->schedule);
374 /* Normal FSM operation */
375 if (fsm->synchronous)
376 silc_fsm_finish(fsm->schedule, app_context, 0, 0, fsm);
378 silc_schedule_task_add_timeout(fsm->schedule, silc_fsm_finish,
387 /* Finishes the FSM. This is always executed in the main thread, even
388 for FSM threads that were run in real threads. */
390 SILC_TASK_CALLBACK(silc_fsm_finish)
392 SilcFSM fsm = context;
394 SILC_LOG_DEBUG(("%s %p, is finished", fsm->thread ? "Thread" : "FSM", fsm));
396 fsm->next_state = NULL;
399 /* This is thread, send signal */
401 silc_fsm_thread_termination_post(fsm->u.t.sema);
402 silc_fsm_sema_free(fsm->u.t.sema);
403 fsm->u.t.sema = NULL;
406 /* Remove the thread from machine */
407 fsm->u.t.fsm->u.m.threads--;
409 /* Call the destructor callback only if the underlaying machine is
411 if (fsm->destructor && fsm->u.t.fsm->finished == FALSE)
412 fsm->destructor(fsm, fsm->fsm_context, fsm->destructor_context);
416 silc_mutex_free(fsm->u.m.lock);
417 fsm->u.m.lock = NULL;
420 /* Call the destructor callback. */
422 fsm->destructor(fsm, fsm->fsm_context, fsm->destructor_context);
426 /* Allocate FSM semaphore */
428 SilcFSMSema silc_fsm_sema_alloc(SilcFSM fsm, SilcUInt32 value)
432 sema = silc_calloc(1, sizeof(*sema));
436 silc_fsm_sema_init(sema, fsm, value);
437 sema->allocated = TRUE;
442 /* Initializes FSM semaphore */
444 void silc_fsm_sema_init(SilcFSMSema sema, SilcFSM fsm, SilcUInt32 value)
446 SILC_LOG_DEBUG(("Initializing semaphore %p", sema));
447 #if defined(SILC_DEBUG)
448 SILC_ASSERT(!fsm->thread);
449 #endif /* SILC_DEBUG */
450 memset(sema, 0, sizeof(*sema));
453 silc_list_init(sema->waiters, struct SilcFSMObject, next);
459 void silc_fsm_sema_free(SilcFSMSema sema)
461 if (sema->refcnt > 0)
463 if (silc_list_count(sema->waiters) > 0)
468 /* Reference semaphore */
470 static void silc_fsm_sema_ref(SilcFSMSema sema)
475 /* Unreference semaphore */
477 static void silc_fsm_sema_unref(SilcFSMSema sema)
480 if (sema->refcnt == 0 && sema->allocated)
481 silc_fsm_sema_free(sema);
484 /* Wait until semaphore is non-zero. */
486 SilcUInt32 silc_fsm_sema_wait(SilcFSMSema sema, void *fsm)
488 SilcMutex lock = sema->fsm->u.m.lock;
490 silc_mutex_lock(lock);
493 #if defined(SILC_DEBUG)
495 silc_list_start(sema->waiters);
496 while ((entry = silc_list_get(sema->waiters)) != SILC_LIST_END)
497 SILC_ASSERT(entry != fsm);
498 #endif /* SILC_DEBUG */
500 SILC_LOG_DEBUG(("Waiting for semaphore %p", sema));
502 /* Add the FSM to waiter list */
503 silc_list_add(sema->waiters, fsm);
504 silc_mutex_unlock(lock);
508 SILC_LOG_DEBUG(("Acquired semaphore %p", sema));
510 /* It is possible that this FSM is in the list so remove it */
511 silc_list_del(sema->waiters, fsm);
513 silc_mutex_unlock(lock);
517 /* Wait util semaphore is non-zero, or timeout occurs. */
519 SilcUInt32 silc_fsm_sema_timedwait(SilcFSMSema sema, void *fsm,
520 SilcUInt32 seconds, SilcUInt32 useconds,
523 SilcMutex lock = sema->fsm->u.m.lock;
527 silc_mutex_lock(lock);
529 if (f->sema_timedout) {
530 SILC_LOG_DEBUG(("Semaphore was timedout"));
531 f->sema_timedout = FALSE;
534 silc_mutex_unlock(lock);
538 silc_mutex_unlock(lock);
540 value = silc_fsm_sema_wait(sema, fsm);
542 silc_schedule_task_add_timeout(f->schedule, silc_fsm_sema_timedout,
543 f, seconds, useconds);
553 /* Semaphore timedout */
555 SILC_TASK_CALLBACK(silc_fsm_sema_timedout)
557 SilcFSM fsm = context;
558 SilcMutex lock = fsm->sema->fsm->u.m.lock;
560 SILC_LOG_DEBUG(("Semaphore %p timedout", fsm->sema));
562 /* Remove the waiter from the semaphore */
563 silc_mutex_lock(lock);
564 silc_list_del(fsm->sema->waiters, fsm);
568 silc_fsm_continue(fsm);
569 fsm->sema_timedout = TRUE;
573 silc_mutex_unlock(lock);
576 /* Signalled, semaphore */
578 SILC_TASK_CALLBACK(silc_fsm_signal)
580 SilcFSMSemaPost p = context;
581 SilcMutex lock = p->sema->fsm->u.m.lock;
583 /* If the semaphore value has went to zero while we've been waiting this
584 callback, sempahore has been been signalled already. It can happen
585 when using real threads because the FSM may not be waiting state when
586 the sempahore is posted. */
587 silc_mutex_lock(lock);
588 if (!p->sema->value) {
589 silc_mutex_unlock(lock);
590 silc_fsm_sema_unref(p->sema);
594 silc_mutex_unlock(lock);
596 SILC_LOG_DEBUG(("Signalled %s %p", p->fsm->thread ? "thread" : "FSM",
600 silc_fsm_continue_sync(p->fsm);
602 silc_fsm_sema_unref(p->sema);
606 /* Increase semaphore */
608 void silc_fsm_sema_post(SilcFSMSema sema)
612 SilcMutex lock = sema->fsm->u.m.lock;
614 SILC_LOG_DEBUG(("Posting semaphore %p", sema));
616 silc_mutex_lock(lock);
619 silc_list_start(sema->waiters);
620 while ((fsm = silc_list_get(sema->waiters)) != SILC_LIST_END) {
622 silc_schedule_task_del_by_all(fsm->schedule, 0, silc_fsm_sema_timedout,
627 p = silc_calloc(1, sizeof(*p));
632 silc_fsm_sema_ref(sema);
634 /* Signal through scheduler. Wake up destination scheduler in case
635 caller is a real thread. */
636 silc_schedule_task_add_timeout(fsm->schedule, silc_fsm_signal, p, 0, 1);
637 silc_schedule_wakeup(fsm->schedule);
640 silc_mutex_unlock(lock);
643 /* Post thread termination semaphore. Special function used only to
644 signal thread termination when SILC_FSM_THREAD_WAIT was used. */
646 static void silc_fsm_thread_termination_post(SilcFSMSema sema)
649 SilcMutex lock = sema->fsm->u.m.lock;
651 SILC_LOG_DEBUG(("Post thread terminate semaphore %p", sema));
653 silc_mutex_lock(lock);
655 silc_list_start(sema->waiters);
656 while ((fsm = silc_list_get(sema->waiters)) != SILC_LIST_END) {
657 /* Signal on thread termination. Wake up destination scheduler in case
658 caller is a real thread. */
659 silc_list_del(sema->waiters, fsm);
660 silc_fsm_continue(fsm);
661 silc_schedule_wakeup(fsm->schedule);
664 silc_mutex_unlock(lock);
669 static void *silc_fsm_thread(void *context)
671 SilcFSM fsm = context;
672 SilcSchedule old = fsm->schedule;
674 SILC_LOG_DEBUG(("Starting FSM thread in real thread"));
676 /* We allocate new SilcSchedule for the FSM, as the old SilcSchedule
677 cannot be used in this thread. Application may still use it if it
678 wants but we use our own. */
679 fsm->schedule = silc_schedule_init(0, old);
683 /* Start the FSM thread */
684 if (!silc_schedule_task_add_timeout(fsm->schedule, silc_fsm_run, fsm, 0, 1))
687 /* Run the scheduler */
688 silc_schedule(fsm->schedule);
691 silc_schedule_uninit(fsm->schedule);
695 /* Finish the FSM thread in the main thread */
696 SILC_ASSERT(fsm->finished);
697 silc_schedule_task_add_timeout(fsm->schedule, silc_fsm_finish, fsm, 0, 1);
698 silc_schedule_wakeup(fsm->schedule);