5 Author: Pekka Riikonen <priikone@silcnet.org>
7 Copyright (C) 2005 - 2006 Pekka Riikonen
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
22 SILC_TASK_CALLBACK(silc_fsm_run);
23 SILC_TASK_CALLBACK(silc_fsm_finish);
24 SILC_TASK_CALLBACK(silc_fsm_sema_timedout);
25 SILC_TASK_CALLBACK(silc_fsm_start_real_thread);
26 static void *silc_fsm_thread(void *context);
27 static void silc_fsm_thread_termination_post(SilcFSMSema sema);
28 static void silc_fsm_sema_ref(SilcFSMSema sema);
29 static void silc_fsm_sema_unref(SilcFSMSema sema);
33 SilcFSM silc_fsm_alloc(void *fsm_context,
34 SilcFSMDestructor destructor,
35 void *destructor_context,
36 SilcSchedule schedule)
40 fsm = silc_calloc(1, sizeof(*fsm));
44 if (!silc_fsm_init(fsm, fsm_context, destructor,
45 destructor_context, schedule)) {
55 SilcBool silc_fsm_init(SilcFSM fsm,
57 SilcFSMDestructor destructor,
58 void *destructor_context,
59 SilcSchedule schedule)
64 fsm->fsm_context = fsm_context;
65 fsm->state_context = NULL;
66 fsm->destructor = destructor;
67 fsm->destructor_context = destructor_context;
68 fsm->schedule = schedule;
70 fsm->async_call = FALSE;
77 /* Allocate FSM thread. Internally machine and thread use same context. */
79 SilcFSMThread silc_fsm_thread_alloc(SilcFSM fsm,
81 SilcFSMThreadDestructor destructor,
82 void *destructor_context,
87 thread = silc_calloc(1, sizeof(*thread));
91 silc_fsm_thread_init(thread, fsm, thread_context, destructor,
92 destructor_context, real_thread);
96 /* Initialize FSM thread. Internally machine and thread use same context. */
98 void silc_fsm_thread_init(SilcFSMThread thread,
100 void *thread_context,
101 SilcFSMThreadDestructor destructor,
102 void *destructor_context,
103 SilcBool real_thread)
105 SILC_LOG_DEBUG(("Initializing new thread %p (%s)",
106 thread, real_thread ? "real" : "FSM"));
108 #if defined(SILC_DEBUG)
109 assert(!fsm->thread);
110 #endif /* SILC_DEBUG */
112 thread->fsm_context = thread_context;
113 thread->state_context = NULL;
114 thread->destructor = (SilcFSMDestructor)destructor;
115 thread->destructor_context = destructor_context;
116 thread->schedule = fsm->schedule;
117 thread->thread = TRUE;
118 thread->async_call = FALSE;
119 thread->real_thread = real_thread;
120 thread->u.t.fsm = fsm;
125 /* Allocate lock for the machine if using real threads. */
126 if (real_thread && !fsm->u.m.lock)
127 if (!silc_mutex_alloc(&fsm->u.m.lock))
128 thread->real_thread = FALSE;
131 /* FSM is destroyed through scheduler to make sure that all dying
132 real system threads will have their finish callbacks scheduled before
133 this one (when SILC_FSM_THREAD_WAIT was used). */
135 SILC_TASK_CALLBACK(silc_fsm_free_final)
139 #if defined(SILC_DEBUG)
140 /* We must be finished */
143 /* Machine must not have active threads */
144 if (!f->thread && f->u.m.threads)
145 assert(f->u.m.threads == 0);
146 #endif /* SILC_DEBUG */
148 if (!f->thread && f->u.m.lock)
149 silc_mutex_free(f->u.m.lock);
151 if (f->thread && f->u.t.sema)
152 silc_fsm_sema_free(f->u.t.sema);
159 void silc_fsm_free(void *fsm)
162 silc_schedule_task_add_timeout(f->schedule, silc_fsm_free_final, f, 0, 1);
165 /* Task to start real thread. We start threads through scheduler, not
166 directly in silc_fsm_start. */
168 SILC_TASK_CALLBACK(silc_fsm_start_real_thread)
173 if (silc_thread_create(silc_fsm_thread, f, FALSE))
175 #endif /* SILC_THREADS */
177 SILC_LOG_DEBUG(("Could not create real thread, using normal FSM thread"));
179 /* Normal FSM operation */
180 f->real_thread = FALSE;
181 silc_fsm_continue_sync(f);
184 /* Start FSM in the specified state */
186 void silc_fsm_start(void *fsm, SilcFSMStateCallback start_state)
190 SILC_LOG_DEBUG(("Starting %s %p", f->thread ? "thread" : "FSM", fsm));
193 f->next_state = start_state;
194 f->synchronous = FALSE;
196 /* Start real thread through scheduler */
197 if (f->thread && f->real_thread) {
198 silc_schedule_task_add_timeout(f->schedule, silc_fsm_start_real_thread,
203 /* Normal FSM operation */
204 silc_schedule_task_add_timeout(f->schedule, silc_fsm_run, f, 0, 1);
207 /* Start FSM in the specified state synchronously */
209 void silc_fsm_start_sync(void *fsm, SilcFSMStateCallback start_state)
213 SILC_LOG_DEBUG(("Starting %s %p", f->thread ? "thread" : "FSM", fsm));
216 f->next_state = start_state;
217 f->synchronous = TRUE;
219 /* Start real thread directly */
220 if (f->thread && f->real_thread) {
221 silc_fsm_start_real_thread(f->schedule,
222 silc_schedule_get_context(f->schedule),
227 /* Normal FSM operation */
228 silc_fsm_run(f->schedule, silc_schedule_get_context(f->schedule), 0, 0, f);
231 /* Set next FSM state */
233 void silc_fsm_next(void *fsm, SilcFSMStateCallback next_state)
236 f->next_state = next_state;
239 /* Continue after timeout */
241 void silc_fsm_next_later(void *fsm, SilcFSMStateCallback next_state,
242 SilcUInt32 seconds, SilcUInt32 useconds)
245 f->next_state = next_state;
246 if (!seconds && !useconds)
248 silc_schedule_task_add_timeout(f->schedule, silc_fsm_run, f,
252 /* Continue after callback or async operation */
254 void silc_fsm_continue(void *fsm)
257 silc_schedule_task_add_timeout(f->schedule, silc_fsm_run, f, 0, 1);
260 /* Continue after callback or async operation immediately */
262 void silc_fsm_continue_sync(void *fsm)
265 silc_fsm_run(f->schedule, silc_schedule_get_context(f->schedule), 0, 0, f);
268 /* Return associated scheduler */
270 SilcSchedule silc_fsm_get_schedule(void *fsm)
276 /* Return thread's machine */
278 SilcFSM silc_fsm_get_machine(SilcFSMThread thread)
280 assert(thread->thread);
281 return (SilcFSM)thread->u.t.fsm;
286 void silc_fsm_set_context(void *fsm, void *fsm_context)
289 f->fsm_context = fsm_context;
294 void *silc_fsm_get_context(void *fsm)
297 return f->fsm_context;
300 /* Set state context */
302 void silc_fsm_set_state_context(void *fsm, void *state_context)
305 f->state_context = state_context;
308 /* Get state context */
310 void *silc_fsm_get_state_context(void *fsm)
313 return f->state_context;
316 /* Wait for thread to terminate */
318 SilcBool silc_fsm_thread_wait(void *fsm, void *thread)
321 #if defined(SILC_DEBUG)
323 #endif /* SILC_DEBUG */
324 t->u.t.sema = silc_fsm_sema_alloc(t->u.t.fsm, 0);
327 silc_fsm_sema_wait(t->u.t.sema, fsm);
333 SILC_TASK_CALLBACK(silc_fsm_run)
335 SilcFSM fsm = context;
336 SilcFSMStatus status;
338 SILC_LOG_DEBUG(("Running %s %p", fsm->thread ? "thread" : "FSM", fsm));
342 status = fsm->next_state(fsm, fsm->fsm_context, fsm->state_context);
343 while (status == SILC_FSM_CONTINUE);
347 /* The machine is in hold */
348 SILC_LOG_DEBUG(("State wait %p", fsm));
349 fsm->synchronous = FALSE;
352 case SILC_FSM_FINISH:
353 /* Finish the state machine */
354 SILC_LOG_DEBUG(("State finish %p", fsm));
355 #if defined(SILC_DEBUG)
356 assert(!fsm->finished);
357 #endif /* SILC_DEBUG */
358 fsm->finished = TRUE;
360 /* If we are thread and using real threads, the FSM thread will finish
361 in the main thread, not in the created thread. */
362 if (fsm->thread && fsm->real_thread) {
363 silc_schedule_task_add_timeout(app_context, silc_fsm_finish, fsm, 0, 1);
364 silc_schedule_wakeup(app_context);
365 silc_schedule_stop(fsm->schedule);
369 /* Normal FSM operation */
370 if (fsm->synchronous)
371 silc_fsm_finish(fsm->schedule, app_context, 0, 0, fsm);
373 silc_schedule_task_add_timeout(fsm->schedule, silc_fsm_finish,
382 /* Finishes the FSM. This is always executed in the main thread, even
383 for FSM threads that were run in real threads. */
385 SILC_TASK_CALLBACK(silc_fsm_finish)
387 SilcFSM fsm = context;
389 SILC_LOG_DEBUG(("%s %p, is finished", fsm->thread ? "Thread" : "FSM", fsm));
391 fsm->next_state = NULL;
394 /* This is thread, send signal */
396 silc_fsm_thread_termination_post(fsm->u.t.sema);
397 silc_fsm_sema_free(fsm->u.t.sema);
398 fsm->u.t.sema = NULL;
401 /* Remove the thread from machine */
402 fsm->u.t.fsm->u.m.threads--;
404 /* Call the destructor callback only if the underlaying machine is
406 if (fsm->destructor && fsm->u.t.fsm->finished == FALSE)
407 fsm->destructor(fsm, fsm->fsm_context, fsm->destructor_context);
411 silc_mutex_free(fsm->u.m.lock);
412 fsm->u.m.lock = NULL;
415 /* Call the destructor callback. */
417 fsm->destructor(fsm, fsm->fsm_context, fsm->destructor_context);
421 /* Allocate FSM semaphore */
423 SilcFSMSema silc_fsm_sema_alloc(SilcFSM fsm, SilcUInt32 value)
427 sema = silc_calloc(1, sizeof(*sema));
431 silc_fsm_sema_init(sema, fsm, value);
432 sema->allocated = TRUE;
437 /* Initializes FSM semaphore */
439 void silc_fsm_sema_init(SilcFSMSema sema, SilcFSM fsm, SilcUInt32 value)
441 SILC_LOG_DEBUG(("Initializing semaphore %p", sema));
442 #if defined(SILC_DEBUG)
443 assert(!fsm->thread);
444 #endif /* SILC_DEBUG */
445 memset(sema, 0, sizeof(*sema));
448 silc_list_init(sema->waiters, struct SilcFSMObject, next);
454 void silc_fsm_sema_free(SilcFSMSema sema)
456 if (sema->refcnt > 0)
458 #if defined(SILC_DEBUG)
459 assert(silc_list_count(sema->waiters) == 0);
460 #endif /* SILC_DEBUG */
464 /* Reference semaphore */
466 static void silc_fsm_sema_ref(SilcFSMSema sema)
471 /* Unreference semaphore */
473 static void silc_fsm_sema_unref(SilcFSMSema sema)
476 if (sema->refcnt == 0 && sema->allocated)
477 silc_fsm_sema_free(sema);
480 /* Wait until semaphore is non-zero. */
482 SilcUInt32 silc_fsm_sema_wait(SilcFSMSema sema, void *fsm)
484 SilcMutex lock = sema->fsm->u.m.lock;
486 silc_mutex_lock(lock);
489 #if defined(SILC_DEBUG)
491 silc_list_start(sema->waiters);
492 while ((entry = silc_list_get(sema->waiters)) != SILC_LIST_END)
493 assert(entry != fsm);
494 #endif /* SILC_DEBUG */
496 SILC_LOG_DEBUG(("Waiting for semaphore %p", sema));
498 /* Add the FSM to waiter list */
499 silc_list_add(sema->waiters, fsm);
500 silc_mutex_unlock(lock);
504 SILC_LOG_DEBUG(("Acquired semaphore %p", sema));
506 /* It is possible that this FSM is in the list so remove it */
507 silc_list_del(sema->waiters, fsm);
509 silc_mutex_unlock(lock);
513 /* Wait util semaphore is non-zero, or timeout occurs. */
515 SilcUInt32 silc_fsm_sema_timedwait(SilcFSMSema sema, void *fsm,
516 SilcUInt32 seconds, SilcUInt32 useconds,
519 SilcMutex lock = sema->fsm->u.m.lock;
523 silc_mutex_lock(lock);
525 if (f->sema_timedout) {
526 SILC_LOG_DEBUG(("Semaphore was timedout"));
527 f->sema_timedout = FALSE;
530 silc_mutex_unlock(lock);
534 silc_mutex_unlock(lock);
536 value = silc_fsm_sema_wait(sema, fsm);
538 silc_schedule_task_add_timeout(f->schedule, silc_fsm_sema_timedout,
539 f, seconds, useconds);
549 /* Semaphore timedout */
551 SILC_TASK_CALLBACK(silc_fsm_sema_timedout)
553 SilcFSM fsm = context;
554 SilcMutex lock = fsm->sema->fsm->u.m.lock;
556 SILC_LOG_DEBUG(("Semaphore %p timedout", fsm->sema));
558 /* Remove the waiter from the semaphore */
559 silc_mutex_lock(lock);
560 silc_list_del(fsm->sema->waiters, fsm);
564 silc_fsm_continue(fsm);
565 fsm->sema_timedout = TRUE;
569 silc_mutex_unlock(lock);
572 /* Signalled, semaphore */
574 SILC_TASK_CALLBACK(silc_fsm_signal)
576 SilcFSMSemaPost p = context;
577 SilcMutex lock = p->sema->fsm->u.m.lock;
579 /* If the semaphore value has went to zero while we've been waiting this
580 callback, sempahore has been been signalled already. It can happen
581 when using real threads because the FSM may not be waiting state when
582 the sempahore is posted. */
583 silc_mutex_lock(lock);
584 if (!p->sema->value) {
585 silc_mutex_unlock(lock);
586 silc_fsm_sema_unref(p->sema);
590 silc_mutex_unlock(lock);
592 SILC_LOG_DEBUG(("Signalled %s %p", p->fsm->thread ? "thread" : "FSM",
596 silc_fsm_continue_sync(p->fsm);
598 silc_fsm_sema_unref(p->sema);
602 /* Increase semaphore */
604 void silc_fsm_sema_post(SilcFSMSema sema)
608 SilcMutex lock = sema->fsm->u.m.lock;
610 SILC_LOG_DEBUG(("Posting semaphore %p", sema));
612 silc_mutex_lock(lock);
615 silc_list_start(sema->waiters);
616 while ((fsm = silc_list_get(sema->waiters)) != SILC_LIST_END) {
618 silc_schedule_task_del_by_all(fsm->schedule, 0, silc_fsm_sema_timedout,
623 p = silc_calloc(1, sizeof(*p));
628 silc_fsm_sema_ref(sema);
630 /* Signal through scheduler. Wake up destination scheduler in case
631 caller is a real thread. */
632 silc_schedule_task_add_timeout(fsm->schedule, silc_fsm_signal, p, 0, 1);
633 silc_schedule_wakeup(fsm->schedule);
636 silc_mutex_unlock(lock);
639 /* Post thread termination semaphore. Special function used only to
640 signal thread termination when SILC_FSM_THREAD_WAIT was used. */
642 static void silc_fsm_thread_termination_post(SilcFSMSema sema)
645 SilcMutex lock = sema->fsm->u.m.lock;
647 SILC_LOG_DEBUG(("Post thread termination semaphore %p", sema));
649 silc_mutex_lock(lock);
651 silc_list_start(sema->waiters);
652 while ((fsm = silc_list_get(sema->waiters)) != SILC_LIST_END) {
653 /* Signal on thread termination. Wake up destination scheduler in case
654 caller is a real thread. */
655 silc_list_del(sema->waiters, fsm);
656 silc_fsm_continue(fsm);
657 silc_schedule_wakeup(fsm->schedule);
660 silc_mutex_unlock(lock);
665 static void *silc_fsm_thread(void *context)
667 SilcFSM fsm = context;
668 SilcSchedule old = fsm->schedule;
670 SILC_LOG_DEBUG(("Starting FSM thread in real thread"));
672 /* We allocate new SilcSchedule for the FSM, as the old SilcSchedule
673 cannot be used in this thread. Application may still use it if it
674 wants but we use our own. */
675 fsm->schedule = silc_schedule_init(0, old);
679 /* Start the FSM thread */
680 if (!silc_schedule_task_add_timeout(fsm->schedule, silc_fsm_run, fsm, 0, 1))
683 /* Run the scheduler */
684 silc_schedule(fsm->schedule);
687 silc_schedule_uninit(fsm->schedule);