Merged from silc_1_0_branch.
[silc.git] / lib / silcutil / silcschedule.c
index fd53ffe237def1d36cb0e1761cbf3514a161a027..e0f658b08f52807e42fa93107a14f107155bbf59 100644 (file)
@@ -1,16 +1,15 @@
 /*
 
-  silcschedule.c
+  silcschedule.c 
 
   Author: Pekka Riikonen <priikone@silcnet.org>
 
-  Copyright (C) 1998 - 2001 Pekka Riikonen
+  Copyright (C) 1998 - 2002 Pekka Riikonen
 
   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 2 of the License, or
-  (at your option) any later version.
-  
+  the Free Software Foundation; version 2 of the License.
+
   This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 /* Forward declarations */
 typedef struct SilcTaskQueueStruct *SilcTaskQueue;
 
-/* System specific routines. Implemented under unix/ and win32/. */
+/* System specific routines. Implemented under unix/, win32/ and such. */
 
 /* System specific select(). Returns same values as normal select(). */
-int silc_select(SilcScheduleFd fds, uint32 fds_count, struct timeval *timeout);
+int silc_select(SilcScheduleFd fds, SilcUInt32 fds_count, 
+               struct timeval *timeout);
 
-/* Initializes the wakeup of the scheduler. In multi-threaded environment
+/* Initializes the platform specific scheduler.  This for example initializes
+   the wakeup mechanism of the scheduler.  In multi-threaded environment
    the scheduler needs to be wakenup when tasks are added or removed from
-   the task queues. This will initialize the wakeup for the scheduler.
-   Any tasks that needs to be registered must be registered to the `queue'.
-   It is guaranteed that the scheduler will automatically free any
-   registered tasks in this queue. This is system specific routine. */
-void *silc_schedule_wakeup_init(SilcSchedule schedule);
+   the task queues.  Returns context to the platform specific scheduler. */
+void *silc_schedule_internal_init(SilcSchedule schedule, void *context);
 
-/* Uninitializes the system specific wakeup. */
-void silc_schedule_wakeup_uninit(void *context);
+/* Uninitializes the platform specific scheduler context. */
+void silc_schedule_internal_uninit(void *context);
 
 /* Wakes up the scheduler. This is platform specific routine */
-void silc_schedule_wakeup_internal(void *context);
+void silc_schedule_internal_wakeup(void *context);
+
+/* Register signal */
+void silc_schedule_internal_signal_register(void *context,
+                                            SilcUInt32 signal,
+                                            SilcTaskCallback callback,
+                                            void *callback_context);
+
+/* Unregister signal */
+void silc_schedule_internal_signal_unregister(void *context,
+                                              SilcUInt32 signal,
+                                              SilcTaskCallback callback,
+                                              void *callback_context);
+
+/* Mark signal to be called later. */
+void silc_schedule_internal_signal_call(void *context, SilcUInt32 signal);
 
+/* Call all signals */
+void silc_schedule_internal_signals_call(void *context,
+                                        SilcSchedule schedule);
+
+/* Block registered signals in scheduler. */
+void silc_schedule_internal_signals_block(void *context);
+
+/* Unblock registered signals in schedule. */
+void silc_schedule_internal_signals_unblock(void *context);
 
 /* Internal task management routines. */
 
+static void silc_schedule_dispatch_timeout(SilcSchedule schedule,
+                                          bool dispatch_all);
 static void silc_task_queue_alloc(SilcTaskQueue *queue);
 static void silc_task_queue_free(SilcTaskQueue queue);
-static SilcTask silc_task_find(SilcTaskQueue queue, uint32 fd);
+static SilcTask silc_task_find(SilcTaskQueue queue, SilcUInt32 fd);
 static SilcTask silc_task_add(SilcTaskQueue queue, SilcTask newtask, 
                              SilcTaskPriority priority);
 static SilcTask silc_task_get_first(SilcTaskQueue queue, SilcTask first);
 static SilcTask silc_task_add_timeout(SilcTaskQueue queue, SilcTask newtask,
                                      SilcTaskPriority priority);
 static int silc_schedule_task_remove(SilcTaskQueue queue, SilcTask task);
-static int silc_schedule_task_timeout_compare(struct timeval *smaller, 
-                                             struct timeval *bigger);
 static void silc_task_del_by_context(SilcTaskQueue queue, void *context);
 static void silc_task_del_by_callback(SilcTaskQueue queue,
                                      SilcTaskCallback callback);
-static void silc_task_del_by_fd(SilcTaskQueue queue, uint32 fd);
+static void silc_task_del_by_fd(SilcTaskQueue queue, SilcUInt32 fd);
 
 /* Returns the task queue by task type */
-#define SILC_SCHEDULE_GET_QUEUE(type)                                  \
-  (type == SILC_TASK_FD ? schedule->fd_queue :                         \
-   type == SILC_TASK_TIMEOUT ? schedule->timeout_queue :               \
+#define SILC_SCHEDULE_GET_QUEUE(type)                          \
+  (type == SILC_TASK_FD ? schedule->fd_queue :                 \
+   type == SILC_TASK_TIMEOUT ? schedule->timeout_queue :       \
    schedule->generic_queue)
 
+/* Locks. These also blocks signals that we care about and thus guarantee
+   that while we are in scheduler no signals can happen.  This way we can
+   synchronise signals with SILC Scheduler. */
+#define SILC_SCHEDULE_LOCK(schedule)                           \
+do {                                                           \
+  silc_schedule_internal_signals_block(schedule->internal);    \
+  silc_mutex_lock(schedule->lock);                             \
+} while (0)
+#define SILC_SCHEDULE_UNLOCK(schedule)                         \
+do {                                                           \
+  silc_mutex_unlock(schedule->lock);                           \
+  silc_schedule_internal_signals_unblock(schedule->internal);  \
+} while (0)
+
 /* SILC Task object. Represents one task in the scheduler. */
 struct SilcTaskStruct {
-  uint32 fd;
-  struct timeval timeout;
-  SilcTaskCallback callback;
-  void *context;
-  bool valid;
-  SilcTaskPriority priority;
-  SilcTaskType type;
+  SilcUInt32 fd;
+  SilcTaskCallback callback;      /* Task callback */
+  void *context;                  /* Task callback context */
+  struct timeval timeout;         /* Set for timeout tasks */
+  unsigned int valid : 1;         /* Set when task is valid */
+  unsigned int priority : 2;      /* Priority of the task */
+  unsigned int type : 5;           /* Type of the task */
 
   /* Pointers forming doubly linked circular list */
   struct SilcTaskStruct *next;
@@ -131,8 +167,8 @@ struct SilcTaskQueueStruct {
        List of file descriptors the scheduler is supposed to be listenning.
        This is updated internally.
 
-   uint32 max_fd
-   uint32 last_fd
+   SilcUInt32 max_fd
+   SilcUInt32 last_fd
 
        Size of the fd_list list. There can be `max_fd' many tasks in
        the scheduler at once. The `last_fd' is the last valid entry
@@ -156,37 +192,43 @@ struct SilcTaskQueueStruct {
        File descriptor sets for select(). These are automatically managed
        by the scheduler and should not be touched otherwise.
 
-   void *wakeup
+   void *internal
 
-       System specific wakeup context. On multi-threaded environments the
-       scheduler needs to be wakenup (in the thread) when tasks are added
-       or removed. This is initialized by silc_schedule_wakeup_init.
+       System specific scheduler context.
 
    SILC_MUTEX_DEFINE(lock)
   
        Scheduler lock.
 
+   bool signal_tasks
+
+       TRUE when tasks has been registered from signals.  Next round in
+       scheduler will call the callbacks when this is TRUE.
+
 */
 struct SilcScheduleStruct {
+  void *app_context;           /* Application specific context */
   SilcTaskQueue fd_queue;
   SilcTaskQueue timeout_queue;
   SilcTaskQueue generic_queue;
   SilcScheduleFd fd_list;
-  uint32 max_fd;
-  uint32 last_fd;
+  SilcUInt32 max_fd;
+  SilcUInt32 last_fd;
   struct timeval *timeout;
   bool valid;
-  void *wakeup;
+  void *internal;
   SILC_MUTEX_DEFINE(lock);
   bool is_locked;
+  bool signal_tasks;
 };
 
 /* Initializes the scheduler. This returns the scheduler context that
    is given as arugment usually to all silc_schedule_* functions.
    The `max_tasks' indicates the number of maximum tasks that the
-   scheduler can handle. */
+   scheduler can handle. The `app_context' is application specific
+   context that is delivered to task callbacks. */
 
-SilcSchedule silc_schedule_init(int max_tasks)
+SilcSchedule silc_schedule_init(int max_tasks, void *app_context)
 {
   SilcSchedule schedule;
 
@@ -200,17 +242,21 @@ SilcSchedule silc_schedule_init(int max_tasks)
   silc_task_queue_alloc(&schedule->timeout_queue);
   silc_task_queue_alloc(&schedule->generic_queue);
 
+  if (!max_tasks)
+    max_tasks = 200;
+
   /* Initialize the scheduler */
   schedule->fd_list = silc_calloc(max_tasks, sizeof(*schedule->fd_list));
   schedule->max_fd = max_tasks;
   schedule->timeout = NULL;
   schedule->valid = TRUE;
+  schedule->app_context = app_context;
 
   /* Allocate scheduler lock */
   silc_mutex_alloc(&schedule->lock);
 
-  /* Initialize the wakeup, for multi-threads support */
-  schedule->wakeup = silc_schedule_wakeup_init(schedule);
+  /* Initialize the platform specific scheduler. */
+  schedule->internal = silc_schedule_internal_init(schedule, app_context);
 
   return schedule;
 }
@@ -227,6 +273,19 @@ bool silc_schedule_uninit(SilcSchedule schedule)
   if (schedule->valid == TRUE)
     return FALSE;
 
+  /* Dispatch all timeouts before going away */
+  SILC_SCHEDULE_LOCK(schedule);
+  silc_mutex_lock(schedule->timeout_queue->lock);
+  silc_schedule_dispatch_timeout(schedule, TRUE);
+  silc_mutex_unlock(schedule->timeout_queue->lock);
+  SILC_SCHEDULE_UNLOCK(schedule);
+
+  /* Deliver signals before going away */
+  if (schedule->signal_tasks) {
+    silc_schedule_internal_signals_call(schedule->internal, schedule);
+    schedule->signal_tasks = FALSE;
+  }
+
   /* Unregister all tasks */
   silc_schedule_task_remove(schedule->fd_queue, SILC_ALL_TASKS);
   silc_schedule_task_remove(schedule->timeout_queue, SILC_ALL_TASKS);
@@ -239,14 +298,29 @@ bool silc_schedule_uninit(SilcSchedule schedule)
 
   silc_free(schedule->fd_list);
 
-  /* Uninit the wakeup */
-  silc_schedule_wakeup_uninit(schedule->wakeup);
+  /* Uninit the platform specific scheduler. */
+  silc_schedule_internal_uninit(schedule->internal);
 
   silc_mutex_free(schedule->lock);
+  silc_free(schedule);
 
   return TRUE;
 }
 
+/* Enlarge the capabilities of the scheduler to handle tasks to `max_tasks'. */
+
+bool silc_schedule_reinit(SilcSchedule schedule, int max_tasks)
+{
+  SILC_SCHEDULE_LOCK(schedule);
+  if (schedule->max_fd <= max_tasks)
+    return FALSE;
+  schedule->fd_list = silc_realloc(schedule->fd_list, 
+                                  (sizeof(*schedule->fd_list) * max_tasks));
+  schedule->max_fd = max_tasks;
+  SILC_SCHEDULE_UNLOCK(schedule);
+  return TRUE;
+}
+
 /* Stops the schedule even if it is not supposed to be stopped yet. 
    After calling this, one should call silc_schedule_uninit (after the 
    silc_schedule has returned). */
@@ -254,9 +328,9 @@ bool silc_schedule_uninit(SilcSchedule schedule)
 void silc_schedule_stop(SilcSchedule schedule)
 {
   SILC_LOG_DEBUG(("Stopping scheduler"));
-  silc_mutex_lock(schedule->lock);
+  SILC_SCHEDULE_LOCK(schedule);
   schedule->valid = FALSE;
-  silc_mutex_unlock(schedule->lock);
+  SILC_SCHEDULE_UNLOCK(schedule);
 }
 
 /* Executes nontimeout tasks. It then checks whether any of ther fd tasks
@@ -268,16 +342,18 @@ void silc_schedule_stop(SilcSchedule schedule)
 static void silc_schedule_dispatch_nontimeout(SilcSchedule schedule)
 {
   SilcTask task;
-  int i, last_fd = schedule->last_fd;
+  int i;
+  SilcUInt32 fd, last_fd = schedule->last_fd;
 
   for (i = 0; i <= last_fd; i++) {
     if (schedule->fd_list[i].events == 0)
       continue;
 
+    fd = schedule->fd_list[i].fd;
+
     /* First check whether this fd has task in the fd queue */
     silc_mutex_lock(schedule->fd_queue->lock);
-    task = silc_task_find(schedule->fd_queue, schedule->fd_list[i].fd);
-    silc_mutex_unlock(schedule->fd_queue->lock);
+    task = silc_task_find(schedule->fd_queue, fd);
 
     /* If the task was found then execute its callbacks. If not then
        execute all generic tasks for that fd. */
@@ -285,23 +361,24 @@ static void silc_schedule_dispatch_nontimeout(SilcSchedule schedule)
       /* Validity of the task is checked always before and after
         execution beacuse the task might have been unregistered
         in the callback function, ie. it is not valid anymore. */
-      silc_mutex_lock(schedule->fd_queue->lock);
 
       /* Is the task ready for reading */
       if (task->valid && schedule->fd_list[i].revents & SILC_TASK_READ) {
        silc_mutex_unlock(schedule->fd_queue->lock);
-       silc_mutex_unlock(schedule->lock);
-       task->callback(schedule, SILC_TASK_READ, task->fd, task->context);
-       silc_mutex_lock(schedule->lock);
+       SILC_SCHEDULE_UNLOCK(schedule);
+       task->callback(schedule, schedule->app_context,
+                      SILC_TASK_READ, task->fd, task->context);
+       SILC_SCHEDULE_LOCK(schedule);
        silc_mutex_lock(schedule->fd_queue->lock);
       }
 
       /* Is the task ready for writing */
       if (task->valid && schedule->fd_list[i].revents & SILC_TASK_WRITE) {
        silc_mutex_unlock(schedule->fd_queue->lock);
-       silc_mutex_unlock(schedule->lock);
-       task->callback(schedule, SILC_TASK_WRITE, task->fd, task->context);
-       silc_mutex_lock(schedule->lock);
+       SILC_SCHEDULE_UNLOCK(schedule);
+       task->callback(schedule, schedule->app_context,
+                      SILC_TASK_WRITE, task->fd, task->context);
+       SILC_SCHEDULE_LOCK(schedule);
        silc_mutex_lock(schedule->fd_queue->lock);
       }
 
@@ -312,6 +389,8 @@ static void silc_schedule_dispatch_nontimeout(SilcSchedule schedule)
     } else {
       /* Run generic tasks for this fd. */
 
+      silc_mutex_unlock(schedule->fd_queue->lock);
+
       silc_mutex_lock(schedule->generic_queue->lock);
       if (!schedule->generic_queue->task) {
        silc_mutex_unlock(schedule->generic_queue->lock);
@@ -327,20 +406,20 @@ static void silc_schedule_dispatch_nontimeout(SilcSchedule schedule)
        /* Is the task ready for reading */                             
        if (task->valid && schedule->fd_list[i].revents & SILC_TASK_READ) {
          silc_mutex_unlock(schedule->generic_queue->lock);
-         silc_mutex_unlock(schedule->lock);
-         task->callback(schedule, SILC_TASK_READ, schedule->fd_list[i].fd, 
-                        task->context);
-         silc_mutex_lock(schedule->lock);
+         SILC_SCHEDULE_UNLOCK(schedule);
+         task->callback(schedule, schedule->app_context,
+                        SILC_TASK_READ, fd, task->context);
+         SILC_SCHEDULE_LOCK(schedule);
          silc_mutex_lock(schedule->generic_queue->lock);
        }
 
        /* Is the task ready for writing */                             
        if (task->valid && schedule->fd_list[i].revents & SILC_TASK_WRITE) {
          silc_mutex_unlock(schedule->generic_queue->lock);
-         silc_mutex_unlock(schedule->lock);
-         task->callback(schedule, SILC_TASK_WRITE, schedule->fd_list[i].fd, 
-                        task->context);
-         silc_mutex_lock(schedule->lock);
+         SILC_SCHEDULE_UNLOCK(schedule);
+         task->callback(schedule, schedule->app_context,
+                        SILC_TASK_WRITE, fd, task->context);
+         SILC_SCHEDULE_LOCK(schedule);
          silc_mutex_lock(schedule->generic_queue->lock);
        }
 
@@ -377,7 +456,8 @@ static void silc_schedule_dispatch_nontimeout(SilcSchedule schedule)
    phase. */
 /* This holds the schedule->lock and the schedule->timeout_queue->lock */
 
-static void silc_schedule_dispatch_timeout(SilcSchedule schedule)
+static void silc_schedule_dispatch_timeout(SilcSchedule schedule,
+                                          bool dispatch_all)
 {
   SilcTaskQueue queue = schedule->timeout_queue;
   SilcTask task;
@@ -395,12 +475,14 @@ static void silc_schedule_dispatch_timeout(SilcSchedule schedule)
        the expired tasks. */
     while(1) {
       /* Execute the task if the timeout has expired */
-      if (silc_schedule_task_timeout_compare(&task->timeout, &curtime)) {
+      if (dispatch_all ||
+         silc_compare_timeval(&task->timeout, &curtime)) {
         if (task->valid) {
          silc_mutex_unlock(queue->lock);
-         silc_mutex_unlock(schedule->lock);
-         task->callback(schedule, SILC_TASK_EXPIRE, task->fd, task->context);
-         silc_mutex_lock(schedule->lock);
+         SILC_SCHEDULE_UNLOCK(schedule);
+         task->callback(schedule, schedule->app_context,
+                        SILC_TASK_EXPIRE, task->fd, task->context);
+         SILC_SCHEDULE_LOCK(schedule);
          silc_mutex_lock(queue->lock);
        }
 
@@ -448,9 +530,9 @@ static void silc_schedule_select_timeout(SilcSchedule schedule)
     if (task && task->valid == TRUE) {
       /* If the timeout is in past, we will run the task and all other
         timeout tasks from the past. */
-      if (silc_schedule_task_timeout_compare(&task->timeout, &curtime)) {
-       silc_schedule_dispatch_timeout(schedule);
-                                               
+      if (silc_compare_timeval(&task->timeout, &curtime)) {
+       silc_schedule_dispatch_timeout(schedule, FALSE);
+
        /* The task(s) has expired and doesn't exist on the task queue
           anymore. We continue with new timeout. */
        queue = schedule->timeout_queue;
@@ -502,7 +584,15 @@ bool silc_schedule_one(SilcSchedule schedule, int timeout_usecs)
   SILC_LOG_DEBUG(("In scheduler loop"));
 
   if (!schedule->is_locked)
-    silc_mutex_lock(schedule->lock);
+    SILC_SCHEDULE_LOCK(schedule);
+
+  /* Deliver signals if any has been set to be called */
+  if (schedule->signal_tasks) {
+    SILC_SCHEDULE_UNLOCK(schedule);
+    silc_schedule_internal_signals_call(schedule->internal, schedule);
+    schedule->signal_tasks = FALSE;
+    SILC_SCHEDULE_LOCK(schedule);
+  }
 
   /* If the task queues aren't initialized or we aren't valid anymore
      we will return */
@@ -510,7 +600,7 @@ bool silc_schedule_one(SilcSchedule schedule, int timeout_usecs)
        && !schedule->generic_queue) || schedule->valid == FALSE) {
     SILC_LOG_DEBUG(("Scheduler not valid anymore, exiting"));
     if (!schedule->is_locked)
-      silc_mutex_unlock(schedule->lock);
+      SILC_SCHEDULE_UNLOCK(schedule);
     return FALSE;
   }
 
@@ -526,7 +616,7 @@ bool silc_schedule_one(SilcSchedule schedule, int timeout_usecs)
     schedule->timeout = &timeout;
   }
 
-  silc_mutex_unlock(schedule->lock);
+  SILC_SCHEDULE_UNLOCK(schedule);
 
   /* This is the main select(). The program blocks here until some
      of the selected file descriptors change status or the selected
@@ -535,7 +625,7 @@ bool silc_schedule_one(SilcSchedule schedule, int timeout_usecs)
   ret = silc_select(schedule->fd_list, schedule->last_fd + 1, 
                    schedule->timeout);
 
-  silc_mutex_lock(schedule->lock);
+  SILC_SCHEDULE_LOCK(schedule);
 
   switch (ret) {
   case -1:
@@ -547,7 +637,7 @@ bool silc_schedule_one(SilcSchedule schedule, int timeout_usecs)
   case 0:
     /* Timeout */
     silc_mutex_lock(schedule->timeout_queue->lock);
-    silc_schedule_dispatch_timeout(schedule);
+    silc_schedule_dispatch_timeout(schedule, FALSE);
     silc_mutex_unlock(schedule->timeout_queue->lock);
     break;
   default:
@@ -558,7 +648,7 @@ bool silc_schedule_one(SilcSchedule schedule, int timeout_usecs)
   }
 
   if (!schedule->is_locked)
-    silc_mutex_unlock(schedule->lock);
+    SILC_SCHEDULE_UNLOCK(schedule);
 
   return TRUE;
 }
@@ -576,14 +666,14 @@ void silc_schedule(SilcSchedule schedule)
     return;
   }
 
-  silc_mutex_lock(schedule->lock);
+  SILC_SCHEDULE_LOCK(schedule);
   schedule->is_locked = TRUE;
 
   /* Start the scheduler loop */
   while (silc_schedule_one(schedule, -1)) 
     ;
 
-  silc_mutex_unlock(schedule->lock);
+  SILC_SCHEDULE_UNLOCK(schedule);
 }
 
 /* Wakes up the scheduler. This is used only in multi-threaded
@@ -597,15 +687,25 @@ void silc_schedule_wakeup(SilcSchedule schedule)
 {
 #ifdef SILC_THREADS
   SILC_LOG_DEBUG(("Wakeup scheduler"));
-  silc_mutex_lock(schedule->lock);
-  silc_schedule_wakeup_internal(schedule->wakeup);
-  silc_mutex_unlock(schedule->lock);
+  SILC_SCHEDULE_LOCK(schedule);
+  silc_schedule_internal_wakeup(schedule->internal);
+  SILC_SCHEDULE_UNLOCK(schedule);
 #endif
 }
 
+/* Returns the application specific context that was saved into the
+   scheduler in silc_schedule_init function.  The context is also
+   returned to application in task callback functions, but this function
+   may be used to get it as well if needed. */
+
+void *silc_schedule_get_context(SilcSchedule schedule)
+{
+  return schedule->app_context;
+}
+
 /* Add new task to the scheduler */
 
-SilcTask silc_schedule_task_add(SilcSchedule schedule, uint32 fd,
+SilcTask silc_schedule_task_add(SilcSchedule schedule, SilcUInt32 fd,
                                SilcTaskCallback callback, void *context, 
                                long seconds, long useconds, 
                                SilcTaskType type, 
@@ -615,6 +715,9 @@ SilcTask silc_schedule_task_add(SilcSchedule schedule, uint32 fd,
   SilcTaskQueue queue;
   int timeout = FALSE;
 
+  if (!schedule->valid)
+    return NULL;
+
   SILC_LOG_DEBUG(("Registering new task, fd=%d type=%d priority=%d", fd, 
                  type, priority));
 
@@ -636,7 +739,7 @@ SilcTask silc_schedule_task_add(SilcSchedule schedule, uint32 fd,
 
          /* Add the fd to be listened, the task found now applies to this
             fd as well. */
-         silc_schedule_set_listen_fd(schedule, fd, SILC_TASK_READ);
+         silc_schedule_set_listen_fd(schedule, fd, SILC_TASK_READ, FALSE);
          return task;
        }
        
@@ -675,7 +778,7 @@ SilcTask silc_schedule_task_add(SilcSchedule schedule, uint32 fd,
   /* If the task is non-timeout task we have to tell the scheduler that we
      would like to have these tasks scheduled at some odd distant future. */
   if (type != SILC_TASK_TIMEOUT)
-    silc_schedule_set_listen_fd(schedule, fd, SILC_TASK_READ);
+    silc_schedule_set_listen_fd(schedule, fd, SILC_TASK_READ, FALSE);
 
   silc_mutex_lock(queue->lock);
 
@@ -741,8 +844,10 @@ void silc_schedule_task_del(SilcSchedule schedule, SilcTask task)
 
 /* Remove task by fd */
 
-void silc_schedule_task_del_by_fd(SilcSchedule schedule, uint32 fd)
+void silc_schedule_task_del_by_fd(SilcSchedule schedule, SilcUInt32 fd)
 {
+  SILC_LOG_DEBUG(("Unregister task by fd %d", fd));
+
   silc_task_del_by_fd(schedule->timeout_queue, fd);
   silc_task_del_by_fd(schedule->fd_queue, fd);
 }
@@ -752,6 +857,8 @@ void silc_schedule_task_del_by_fd(SilcSchedule schedule, uint32 fd)
 void silc_schedule_task_del_by_callback(SilcSchedule schedule,
                                        SilcTaskCallback callback)
 {
+  SILC_LOG_DEBUG(("Unregister task by callback"));
+
   silc_task_del_by_callback(schedule->timeout_queue, callback);
   silc_task_del_by_callback(schedule->fd_queue, callback);
   silc_task_del_by_callback(schedule->generic_queue, callback);
@@ -761,6 +868,8 @@ void silc_schedule_task_del_by_callback(SilcSchedule schedule,
 
 void silc_schedule_task_del_by_context(SilcSchedule schedule, void *context)
 {
+  SILC_LOG_DEBUG(("Unregister task by context"));
+
   silc_task_del_by_context(schedule->timeout_queue, context);
   silc_task_del_by_context(schedule->fd_queue, context);
   silc_task_del_by_context(schedule->generic_queue, context);
@@ -770,21 +879,28 @@ void silc_schedule_task_del_by_context(SilcSchedule schedule, void *context)
    call this directly if wanted. This can be called multiple times for
    one file descriptor to set different iomasks. */
 
-void silc_schedule_set_listen_fd(SilcSchedule schedule,
-                                uint32 fd, SilcTaskEvent iomask)
+void silc_schedule_set_listen_fd(SilcSchedule schedule, SilcUInt32 fd,
+                                SilcTaskEvent mask, bool send_events)
 {
   int i;
   bool found = FALSE;
 
-  silc_mutex_lock(schedule->lock);
+  if (!schedule->valid)
+    return;
+
+  SILC_SCHEDULE_LOCK(schedule);
 
   for (i = 0; i < schedule->max_fd; i++)
     if (schedule->fd_list[i].fd == fd) {
       schedule->fd_list[i].fd = fd;
-      schedule->fd_list[i].events = iomask;
+      schedule->fd_list[i].events = mask;
       if (i > schedule->last_fd)
        schedule->last_fd = i;
       found = TRUE;
+      if (send_events) {
+       schedule->fd_list[i].revents = mask;
+       silc_schedule_dispatch_nontimeout(schedule);
+      }
       break;
     }
 
@@ -792,22 +908,28 @@ void silc_schedule_set_listen_fd(SilcSchedule schedule,
     for (i = 0; i < schedule->max_fd; i++)
       if (schedule->fd_list[i].events == 0) {
        schedule->fd_list[i].fd = fd;
-       schedule->fd_list[i].events = iomask;
+       schedule->fd_list[i].events = mask;
        if (i > schedule->last_fd)
          schedule->last_fd = i;
+       if (send_events) {
+         schedule->fd_list[i].revents = mask;
+         silc_schedule_dispatch_nontimeout(schedule);
+       }
        break;
       }
 
-  silc_mutex_unlock(schedule->lock);
+  SILC_SCHEDULE_UNLOCK(schedule);
 }
 
 /* Removes a file descriptor from listen list. */
 
-void silc_schedule_unset_listen_fd(SilcSchedule schedule, uint32 fd)
+void silc_schedule_unset_listen_fd(SilcSchedule schedule, SilcUInt32 fd)
 {
   int i;
 
-  silc_mutex_lock(schedule->lock);
+  SILC_SCHEDULE_LOCK(schedule);
+
+  SILC_LOG_DEBUG(("Unset listen fd %d", fd));
 
   for (i = 0; i < schedule->max_fd; i++)
     if (schedule->fd_list[i].fd == fd) {
@@ -818,7 +940,34 @@ void silc_schedule_unset_listen_fd(SilcSchedule schedule, uint32 fd)
       break;
     }
 
-  silc_mutex_unlock(schedule->lock);
+  SILC_SCHEDULE_UNLOCK(schedule);
+}
+
+/* Register a new signal */
+
+void silc_schedule_signal_register(SilcSchedule schedule, SilcUInt32 signal,
+                                  SilcTaskCallback callback, void *context)
+{
+  silc_schedule_internal_signal_register(schedule->internal, signal,
+                                        callback, context);
+}
+
+/* Unregister a new signal */
+
+void silc_schedule_signal_unregister(SilcSchedule schedule, SilcUInt32 signal,
+                                    SilcTaskCallback callback, void *context)
+{
+  silc_schedule_internal_signal_unregister(schedule->internal, signal,
+                                          callback, context);
+}
+
+/* Call signal indicated by `signal'. */
+
+void silc_schedule_signal_call(SilcSchedule schedule, SilcUInt32 signal)
+{
+  /* Mark that signals needs to be delivered later. */
+  silc_schedule_internal_signal_call(schedule->internal, signal);
+  schedule->signal_tasks = TRUE;
 }
 
 /* Allocates a newtask task queue into the scheduler */
@@ -834,12 +983,13 @@ static void silc_task_queue_alloc(SilcTaskQueue *queue)
 static void silc_task_queue_free(SilcTaskQueue queue)
 {
   silc_mutex_free(queue->lock);
+  memset(queue, 'F', sizeof(*queue));
   silc_free(queue);
 }
 
 /* Return task by its fd. */
 
-static SilcTask silc_task_find(SilcTaskQueue queue, uint32 fd)
+static SilcTask silc_task_find(SilcTaskQueue queue, SilcUInt32 fd)
 {
   SilcTask next;
 
@@ -933,7 +1083,7 @@ static SilcTask silc_task_get_first(SilcTaskQueue queue, SilcTask first)
     if (first == prev)
       break;
 
-    if (silc_schedule_task_timeout_compare(&prev->timeout, &task->timeout))
+    if (silc_compare_timeval(&prev->timeout, &task->timeout))
       task = prev;
 
     prev = prev->prev;
@@ -966,13 +1116,11 @@ static SilcTask silc_task_add_timeout(SilcTaskQueue queue, SilcTask newtask,
 
       /* If we have longer timeout than with the task head of us
         we have found our spot. */
-      if (silc_schedule_task_timeout_compare(&prev->timeout, 
-                                            &newtask->timeout))
+      if (silc_compare_timeval(&prev->timeout, &newtask->timeout))
        break;
 
       /* If we are equal size of timeout we will be after it. */
-      if (!silc_schedule_task_timeout_compare(&newtask->timeout, 
-                                             &prev->timeout))
+      if (!silc_compare_timeval(&newtask->timeout, &prev->timeout))
        break;
 
       /* We have shorter timeout, compare to next one. */
@@ -987,11 +1135,9 @@ static SilcTask silc_task_add_timeout(SilcTaskQueue queue, SilcTask newtask,
     
     if (prev == task) {
       /* Check if we are going to be the first task in the queue */
-      if (silc_schedule_task_timeout_compare(&prev->timeout, 
-                                            &newtask->timeout))
+      if (silc_compare_timeval(&prev->timeout, &newtask->timeout))
        break;
-      if (!silc_schedule_task_timeout_compare(&newtask->timeout, 
-                                             &prev->timeout))
+      if (!silc_compare_timeval(&newtask->timeout, &prev->timeout))
        break;
 
       /* We are now the first task in queue */
@@ -1005,13 +1151,11 @@ static SilcTask silc_task_add_timeout(SilcTaskQueue queue, SilcTask newtask,
 
       /* If we have longer timeout than with the task head of us
         we have found our spot. */
-      if (silc_schedule_task_timeout_compare(&prev->timeout, 
-                                            &newtask->timeout))
+      if (silc_compare_timeval(&prev->timeout, &newtask->timeout))
        break;
 
       /* If we are equal size of timeout, priority kicks in place. */
-      if (!silc_schedule_task_timeout_compare(&newtask->timeout, 
-                                             &prev->timeout))
+      if (!silc_compare_timeval(&newtask->timeout, &prev->timeout))
        if (prev->priority >= SILC_TASK_PRI_NORMAL)
          break;
 
@@ -1027,11 +1171,9 @@ static SilcTask silc_task_add_timeout(SilcTaskQueue queue, SilcTask newtask,
     
     if (prev == task) {
       /* Check if we are going to be the first task in the queue */
-      if (silc_schedule_task_timeout_compare(&prev->timeout, 
-                                            &newtask->timeout))
+      if (silc_compare_timeval(&prev->timeout, &newtask->timeout))
        break;
-      if (!silc_schedule_task_timeout_compare(&newtask->timeout, 
-                                             &prev->timeout))
+      if (!silc_compare_timeval(&newtask->timeout, &prev->timeout))
        if (prev->priority >= SILC_TASK_PRI_NORMAL)
          break;
 
@@ -1070,10 +1212,11 @@ static int silc_schedule_task_remove(SilcTaskQueue queue, SilcTask task)
     next = first;
 
     while(1) {
-      next = next->next;
-      silc_free(next->prev);
-      if (next == first)
+      old = next->next;
+      silc_free(next);
+      if (old == first)
        break;
+      next = old;
     }
 
     queue->task = NULL;
@@ -1109,26 +1252,10 @@ static int silc_schedule_task_remove(SilcTaskQueue queue, SilcTask task)
   }
 }
 
-/* Compare two time values. If the first argument is smaller than the
-   second this function returns TRUE. */
-
-static int silc_schedule_task_timeout_compare(struct timeval *smaller, 
-                                             struct timeval *bigger)
-{
-  if ((smaller->tv_sec < bigger->tv_sec) ||
-      ((smaller->tv_sec == bigger->tv_sec) &&
-       (smaller->tv_usec < bigger->tv_usec)))
-    return TRUE;
-
-  return FALSE;
-}
-
-static void silc_task_del_by_fd(SilcTaskQueue queue, uint32 fd)
+static void silc_task_del_by_fd(SilcTaskQueue queue, SilcUInt32 fd)
 {
   SilcTask next;
 
-  SILC_LOG_DEBUG(("Unregister task by fd"));
-
   silc_mutex_lock(queue->lock);
 
   if (!queue->task) {
@@ -1154,8 +1281,6 @@ static void silc_task_del_by_callback(SilcTaskQueue queue,
 {
   SilcTask next;
 
-  SILC_LOG_DEBUG(("Unregister task by callback"));
-
   silc_mutex_lock(queue->lock);
 
   if (!queue->task) {
@@ -1180,8 +1305,6 @@ static void silc_task_del_by_context(SilcTaskQueue queue, void *context)
 {
   SilcTask next;
 
-  SILC_LOG_DEBUG(("Unregister task by context"));
-
   silc_mutex_lock(queue->lock);
 
   if (!queue->task) {