5 Author: Pekka Riikonen <priikone@silcnet.org>
7 Copyright (C) 2005 - 2006 Pekka Riikonen
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
22 SILC_TASK_CALLBACK(silc_fsm_run);
23 SILC_TASK_CALLBACK(silc_fsm_finish);
24 SILC_TASK_CALLBACK(silc_fsm_sema_timedout);
25 SILC_TASK_CALLBACK(silc_fsm_start_real_thread);
26 static void *silc_fsm_thread(void *context);
27 static void silc_fsm_thread_termination_post(SilcFSMSema sema);
28 static void silc_fsm_sema_ref(SilcFSMSema sema);
29 static void silc_fsm_sema_unref(SilcFSMSema sema);
33 SilcFSM silc_fsm_alloc(void *fsm_context,
34 SilcFSMDestructor destructor,
35 void *destructor_context,
36 SilcSchedule schedule)
40 fsm = silc_calloc(1, sizeof(*fsm));
44 if (!silc_fsm_init(fsm, fsm_context, destructor,
45 destructor_context, schedule)) {
55 SilcBool silc_fsm_init(SilcFSM fsm,
57 SilcFSMDestructor destructor,
58 void *destructor_context,
59 SilcSchedule schedule)
64 fsm->fsm_context = fsm_context;
65 fsm->state_context = NULL;
66 fsm->destructor = destructor;
67 fsm->destructor_context = destructor_context;
68 fsm->schedule = schedule;
70 fsm->async_call = FALSE;
77 /* Allocate FSM thread. Internally machine and thread use same context. */
79 SilcFSMThread silc_fsm_thread_alloc(SilcFSM fsm,
81 SilcFSMThreadDestructor destructor,
82 void *destructor_context,
87 thread = silc_calloc(1, sizeof(*thread));
91 silc_fsm_thread_init(thread, fsm, thread_context, destructor,
92 destructor_context, real_thread);
96 /* Initialize FSM thread. Internally machine and thread use same context. */
98 void silc_fsm_thread_init(SilcFSMThread thread,
100 void *thread_context,
101 SilcFSMThreadDestructor destructor,
102 void *destructor_context,
103 SilcBool real_thread)
105 SILC_LOG_DEBUG(("Initializing new thread %p (%s)",
106 thread, real_thread ? "real" : "FSM"));
108 #if defined(SILC_DEBUG)
109 SILC_ASSERT(!fsm->thread);
110 #endif /* SILC_DEBUG */
112 thread->fsm_context = thread_context;
113 thread->state_context = NULL;
114 thread->destructor = (SilcFSMDestructor)destructor;
115 thread->destructor_context = destructor_context;
116 thread->schedule = fsm->schedule;
117 thread->thread = TRUE;
118 thread->async_call = FALSE;
119 thread->real_thread = real_thread;
120 thread->u.t.fsm = fsm;
125 /* Allocate lock for the machine if using real threads. */
126 if (real_thread && !fsm->u.m.lock)
127 if (!silc_mutex_alloc(&fsm->u.m.lock))
128 thread->real_thread = FALSE;
131 /* FSM is destroyed through scheduler to make sure that all dying
132 real system threads will have their finish callbacks scheduled before
133 this one (when SILC_FSM_THREAD_WAIT was used). */
135 SILC_TASK_CALLBACK(silc_fsm_free_final)
139 #if defined(SILC_DEBUG)
140 /* We must be finished */
141 SILC_ASSERT(f->finished);
143 /* Machine must not have active threads */
144 if (!f->thread && f->u.m.threads)
145 SILC_ASSERT(f->u.m.threads == 0);
146 #endif /* SILC_DEBUG */
148 if (!f->thread && f->u.m.lock)
149 silc_mutex_free(f->u.m.lock);
151 if (f->thread && f->u.t.sema)
152 silc_fsm_sema_free(f->u.t.sema);
159 void silc_fsm_free(void *fsm)
162 silc_schedule_task_add_timeout(f->schedule, silc_fsm_free_final, f, 0, 1);
165 /* Task to start real thread. We start threads through scheduler, not
166 directly in silc_fsm_start. */
168 SILC_TASK_CALLBACK(silc_fsm_start_real_thread)
173 if (silc_thread_create(silc_fsm_thread, f, FALSE))
175 #endif /* SILC_THREADS */
177 SILC_LOG_DEBUG(("Could not create real thread, using normal FSM thread"));
179 /* Normal FSM operation */
180 f->real_thread = FALSE;
181 silc_fsm_continue_sync(f);
184 /* Start FSM in the specified state */
186 void silc_fsm_start(void *fsm, SilcFSMStateCallback start_state)
190 SILC_LOG_DEBUG(("Starting %s %p", f->thread ? "thread" : "FSM", fsm));
193 f->next_state = start_state;
194 f->synchronous = FALSE;
196 /* Start real thread through scheduler */
197 if (f->thread && f->real_thread) {
198 silc_schedule_task_add_timeout(f->schedule, silc_fsm_start_real_thread,
203 /* Normal FSM operation */
204 silc_schedule_task_add_timeout(f->schedule, silc_fsm_run, f, 0, 1);
207 /* Start FSM in the specified state synchronously */
209 void silc_fsm_start_sync(void *fsm, SilcFSMStateCallback start_state)
213 SILC_LOG_DEBUG(("Starting %s %p", f->thread ? "thread" : "FSM", fsm));
216 f->next_state = start_state;
217 f->synchronous = TRUE;
219 /* Start real thread directly */
220 if (f->thread && f->real_thread) {
221 silc_fsm_start_real_thread(f->schedule,
222 silc_schedule_get_context(f->schedule),
227 /* Normal FSM operation */
228 silc_fsm_run(f->schedule, silc_schedule_get_context(f->schedule), 0, 0, f);
231 /* Set next FSM state */
233 void silc_fsm_next(void *fsm, SilcFSMStateCallback next_state)
236 f->next_state = next_state;
239 /* Continue after timeout */
241 void silc_fsm_next_later(void *fsm, SilcFSMStateCallback next_state,
242 SilcUInt32 seconds, SilcUInt32 useconds)
245 f->next_state = next_state;
246 if (!seconds && !useconds)
248 silc_schedule_task_add_timeout(f->schedule, silc_fsm_run, f,
252 /* Continue after callback or async operation */
254 void silc_fsm_continue(void *fsm)
257 silc_schedule_task_add_timeout(f->schedule, silc_fsm_run, f, 0, 1);
260 /* Continue after callback or async operation immediately */
262 void silc_fsm_continue_sync(void *fsm)
265 silc_fsm_run(f->schedule, silc_schedule_get_context(f->schedule), 0, 0, f);
268 /* Return associated scheduler */
270 SilcSchedule silc_fsm_get_schedule(void *fsm)
276 /* Return thread's machine */
278 SilcFSM silc_fsm_get_machine(SilcFSMThread thread)
280 SILC_ASSERT(thread->thread);
281 return (SilcFSM)thread->u.t.fsm;
286 void silc_fsm_set_context(void *fsm, void *fsm_context)
289 f->fsm_context = fsm_context;
294 void *silc_fsm_get_context(void *fsm)
297 return f->fsm_context;
300 /* Set state context */
302 void silc_fsm_set_state_context(void *fsm, void *state_context)
305 f->state_context = state_context;
308 /* Get state context */
310 void *silc_fsm_get_state_context(void *fsm)
313 return f->state_context;
316 /* Wait for thread to terminate */
318 SilcBool silc_fsm_thread_wait(void *fsm, void *thread)
322 #if defined(SILC_DEBUG)
323 SILC_ASSERT(t->thread);
324 #endif /* SILC_DEBUG */
328 t->u.t.sema = silc_fsm_sema_alloc(t->u.t.fsm, 0);
332 SILC_LOG_DEBUG(("Waiting for thread %p to terminate", thread));
333 silc_fsm_sema_wait(t->u.t.sema, fsm);
339 SILC_TASK_CALLBACK(silc_fsm_run)
341 SilcFSM fsm = context;
342 SilcFSMStatus status;
344 SILC_LOG_DEBUG(("Running %s %p", fsm->thread ? "thread" : "FSM", fsm));
348 status = fsm->next_state(fsm, fsm->fsm_context, fsm->state_context);
349 while (status == SILC_FSM_CONTINUE);
353 /* Continue through scheduler */
354 silc_fsm_continue(fsm);
358 /* The machine is in hold */
359 SILC_LOG_DEBUG(("State wait %p", fsm));
360 fsm->synchronous = FALSE;
363 case SILC_FSM_FINISH:
364 /* Finish the state machine */
365 SILC_LOG_DEBUG(("State finish %p", fsm));
366 #if defined(SILC_DEBUG)
367 SILC_ASSERT(!fsm->finished);
368 #endif /* SILC_DEBUG */
369 fsm->finished = TRUE;
371 /* If we are thread and using real threads, the FSM thread will finish
372 in the main thread, not in the created thread. */
373 if (fsm->thread && fsm->real_thread) {
374 silc_schedule_stop(fsm->schedule);
375 silc_schedule_task_add_timeout(app_context, silc_fsm_finish, fsm, 0, 1);
376 silc_schedule_wakeup(app_context);
380 /* Normal FSM operation */
381 if (fsm->synchronous)
382 silc_fsm_finish(fsm->schedule, app_context, 0, 0, fsm);
384 silc_schedule_task_add_timeout(fsm->schedule, silc_fsm_finish,
393 /* Finishes the FSM. This is always executed in the main thread, even
394 for FSM threads that were run in real threads. */
396 SILC_TASK_CALLBACK(silc_fsm_finish)
398 SilcFSM fsm = context;
400 SILC_LOG_DEBUG(("%s %p, is finished", fsm->thread ? "Thread" : "FSM", fsm));
402 fsm->next_state = NULL;
405 /* This is thread, send signal */
407 silc_fsm_thread_termination_post(fsm->u.t.sema);
408 silc_fsm_sema_free(fsm->u.t.sema);
409 fsm->u.t.sema = NULL;
412 /* Remove the thread from machine */
413 fsm->u.t.fsm->u.m.threads--;
415 /* Call the destructor callback only if the underlaying machine is
417 if (fsm->destructor && fsm->u.t.fsm->finished == FALSE)
418 fsm->destructor(fsm, fsm->fsm_context, fsm->destructor_context);
422 silc_mutex_free(fsm->u.m.lock);
423 fsm->u.m.lock = NULL;
426 /* Call the destructor callback. */
428 fsm->destructor(fsm, fsm->fsm_context, fsm->destructor_context);
432 /* Allocate FSM semaphore */
434 SilcFSMSema silc_fsm_sema_alloc(SilcFSM fsm, SilcUInt32 value)
438 sema = silc_calloc(1, sizeof(*sema));
442 silc_fsm_sema_init(sema, fsm, value);
443 sema->allocated = TRUE;
448 /* Initializes FSM semaphore */
450 void silc_fsm_sema_init(SilcFSMSema sema, SilcFSM fsm, SilcUInt32 value)
452 SILC_LOG_DEBUG(("Initializing semaphore %p", sema));
453 #if defined(SILC_DEBUG)
454 SILC_ASSERT(!fsm->thread);
455 #endif /* SILC_DEBUG */
456 memset(sema, 0, sizeof(*sema));
459 silc_list_init(sema->waiters, struct SilcFSMObject, next);
465 void silc_fsm_sema_free(SilcFSMSema sema)
467 if (sema->refcnt > 0)
469 if (silc_list_count(sema->waiters) > 0)
474 /* Reference semaphore */
476 static void silc_fsm_sema_ref(SilcFSMSema sema)
481 /* Unreference semaphore */
483 static void silc_fsm_sema_unref(SilcFSMSema sema)
486 if (sema->refcnt == 0 && sema->allocated)
487 silc_fsm_sema_free(sema);
490 /* Wait until semaphore is non-zero. */
492 SilcUInt32 silc_fsm_sema_wait(SilcFSMSema sema, void *fsm)
494 SilcMutex lock = sema->fsm->u.m.lock;
496 silc_mutex_lock(lock);
499 #if defined(SILC_DEBUG)
501 silc_list_start(sema->waiters);
502 while ((entry = silc_list_get(sema->waiters)) != SILC_LIST_END)
503 SILC_ASSERT(entry != fsm);
504 #endif /* SILC_DEBUG */
506 SILC_LOG_DEBUG(("Waiting for semaphore %p", sema));
508 /* Add the FSM to waiter list */
509 silc_list_add(sema->waiters, fsm);
510 silc_mutex_unlock(lock);
514 SILC_LOG_DEBUG(("Acquired semaphore %p", sema));
516 /* It is possible that this FSM is in the list so remove it */
517 silc_list_del(sema->waiters, fsm);
519 silc_mutex_unlock(lock);
523 /* Wait util semaphore is non-zero, or timeout occurs. */
525 SilcUInt32 silc_fsm_sema_timedwait(SilcFSMSema sema, void *fsm,
526 SilcUInt32 seconds, SilcUInt32 useconds,
529 SilcMutex lock = sema->fsm->u.m.lock;
533 silc_mutex_lock(lock);
535 if (f->sema_timedout) {
536 SILC_LOG_DEBUG(("Semaphore was timedout"));
537 f->sema_timedout = FALSE;
540 silc_mutex_unlock(lock);
544 silc_mutex_unlock(lock);
546 value = silc_fsm_sema_wait(sema, fsm);
548 silc_schedule_task_add_timeout(f->schedule, silc_fsm_sema_timedout,
549 f, seconds, useconds);
559 /* Semaphore timedout */
561 SILC_TASK_CALLBACK(silc_fsm_sema_timedout)
563 SilcFSM fsm = context;
564 SilcMutex lock = fsm->sema->fsm->u.m.lock;
566 SILC_LOG_DEBUG(("Semaphore %p timedout", fsm->sema));
568 /* Remove the waiter from the semaphore */
569 silc_mutex_lock(lock);
570 silc_list_del(fsm->sema->waiters, fsm);
574 silc_fsm_continue(fsm);
575 fsm->sema_timedout = TRUE;
579 silc_mutex_unlock(lock);
582 /* Signalled, semaphore */
584 SILC_TASK_CALLBACK(silc_fsm_signal)
586 SilcFSMSemaPost p = context;
587 SilcMutex lock = p->sema->fsm->u.m.lock;
589 /* If the semaphore value has went to zero while we've been waiting this
590 callback, sempahore has been been signalled already. It can happen
591 when using real threads because the FSM may not be waiting state when
592 the sempahore is posted. */
593 silc_mutex_lock(lock);
594 if (!p->sema->value) {
595 silc_mutex_unlock(lock);
596 silc_fsm_sema_unref(p->sema);
600 silc_mutex_unlock(lock);
602 SILC_LOG_DEBUG(("Signalled %s %p", p->fsm->thread ? "thread" : "FSM",
606 silc_fsm_continue_sync(p->fsm);
608 silc_fsm_sema_unref(p->sema);
612 /* Increase semaphore */
614 void silc_fsm_sema_post(SilcFSMSema sema)
618 SilcMutex lock = sema->fsm->u.m.lock;
620 SILC_LOG_DEBUG(("Posting semaphore %p", sema));
622 silc_mutex_lock(lock);
625 silc_list_start(sema->waiters);
626 while ((fsm = silc_list_get(sema->waiters)) != SILC_LIST_END) {
628 silc_schedule_task_del_by_all(fsm->schedule, 0, silc_fsm_sema_timedout,
633 p = silc_calloc(1, sizeof(*p));
638 silc_fsm_sema_ref(sema);
640 /* Signal through scheduler. Wake up destination scheduler in case
641 caller is a real thread. */
642 silc_schedule_task_add_timeout(fsm->schedule, silc_fsm_signal, p, 0, 1);
643 silc_schedule_wakeup(fsm->schedule);
646 silc_mutex_unlock(lock);
649 /* Post thread termination semaphore. Special function used only to
650 signal thread termination when SILC_FSM_THREAD_WAIT was used. */
652 static void silc_fsm_thread_termination_post(SilcFSMSema sema)
655 SilcMutex lock = sema->fsm->u.m.lock;
657 SILC_LOG_DEBUG(("Post thread terminate semaphore %p", sema));
659 silc_mutex_lock(lock);
661 silc_list_start(sema->waiters);
662 while ((fsm = silc_list_get(sema->waiters)) != SILC_LIST_END) {
663 /* Signal on thread termination. Wake up destination scheduler in case
664 caller is a real thread. */
665 silc_list_del(sema->waiters, fsm);
666 silc_fsm_continue(fsm);
667 silc_schedule_wakeup(fsm->schedule);
670 silc_mutex_unlock(lock);
675 static void *silc_fsm_thread(void *context)
677 SilcFSM fsm = context;
678 SilcSchedule old = fsm->schedule;
680 SILC_LOG_DEBUG(("Starting FSM thread in real thread"));
682 /* We allocate new SilcSchedule for the FSM, as the old SilcSchedule
683 cannot be used in this thread. Application may still use it if it
684 wants but we use our own. */
685 fsm->schedule = silc_schedule_init(0, old);
689 /* Start the FSM thread */
690 if (!silc_schedule_task_add_timeout(fsm->schedule, silc_fsm_run, fsm, 0, 1))
693 /* Run the scheduler */
694 silc_schedule(fsm->schedule);
697 silc_schedule_uninit(fsm->schedule);