Added new SILC_STATUS_ERR_TIMEDOUT status type.
[silc.git] / lib / silcutil / silcschedule.c
index 0305893e27679a9a706d09820d9c616cdeed8b14..af37e9fc10729166dfd591d78ed0d5e3bdae2d50 100644 (file)
 /* Forward declarations */
 typedef struct SilcTaskQueueStruct *SilcTaskQueue;
 
-/* System specific routines. Implemented under unix/ and win32/. */
+/* System specific routines. Implemented under unix/, win32/ and such. */
 
 /* System specific select(). Returns same values as normal select(). */
 int silc_select(SilcScheduleFd fds, SilcUInt32 fds_count, 
                struct timeval *timeout);
 
-/* Initializes the wakeup of the scheduler. In multi-threaded environment
+/* Initializes the platform specific scheduler.  This for example initializes
+   the wakeup mechanism of the scheduler.  In multi-threaded environment
    the scheduler needs to be wakenup when tasks are added or removed from
-   the task queues. This will initialize the wakeup for the scheduler.
-   Any tasks that needs to be registered must be registered to the `queue'.
-   It is guaranteed that the scheduler will automatically free any
-   registered tasks in this queue. This is system specific routine. */
-void *silc_schedule_wakeup_init(SilcSchedule schedule);
+   the task queues.  Returns context to the platform specific scheduler. */
+void *silc_schedule_internal_init(SilcSchedule schedule, void *context);
 
-/* Uninitializes the system specific wakeup. */
-void silc_schedule_wakeup_uninit(void *context);
+/* Uninitializes the platform specific scheduler context. */
+void silc_schedule_internal_uninit(void *context);
 
 /* Wakes up the scheduler. This is platform specific routine */
-void silc_schedule_wakeup_internal(void *context);
+void silc_schedule_internal_wakeup(void *context);
 
+/* Register signal */
+void silc_schedule_internal_signal_register(void *context,
+                                            SilcUInt32 signal,
+                                            SilcTaskCallback callback,
+                                            void *callback_context);
+
+/* Unregister signal */
+void silc_schedule_internal_signal_unregister(void *context,
+                                              SilcUInt32 signal,
+                                              SilcTaskCallback callback,
+                                              void *callback_context);
+
+/* Mark signal to be called later. */
+void silc_schedule_internal_signal_call(void *context, SilcUInt32 signal);
+
+/* Call all signals */
+void silc_schedule_internal_signals_call(void *context,
+                                        SilcSchedule schedule);
+
+/* Block registered signals in scheduler. */
+void silc_schedule_internal_signals_block(void *context);
+
+/* Unblock registered signals in schedule. */
+void silc_schedule_internal_signals_unblock(void *context);
 
 /* Internal task management routines. */
 
+static void silc_schedule_dispatch_timeout(SilcSchedule schedule,
+                                          bool dispatch_all);
 static void silc_task_queue_alloc(SilcTaskQueue *queue);
 static void silc_task_queue_free(SilcTaskQueue queue);
 static SilcTask silc_task_find(SilcTaskQueue queue, SilcUInt32 fd);
@@ -65,24 +89,34 @@ static void silc_task_del_by_callback(SilcTaskQueue queue,
 static void silc_task_del_by_fd(SilcTaskQueue queue, SilcUInt32 fd);
 
 /* Returns the task queue by task type */
-#define SILC_SCHEDULE_GET_QUEUE(type)                                  \
-  (type == SILC_TASK_FD ? schedule->fd_queue :                         \
-   type == SILC_TASK_TIMEOUT ? schedule->timeout_queue :               \
+#define SILC_SCHEDULE_GET_QUEUE(type)                          \
+  (type == SILC_TASK_FD ? schedule->fd_queue :                 \
+   type == SILC_TASK_TIMEOUT ? schedule->timeout_queue :       \
    schedule->generic_queue)
 
-/* Locks */
-#define SILC_SCHEDULE_LOCK(schedule) silc_mutex_lock(schedule->lock)
-#define SILC_SCHEDULE_UNLOCK(schedule) silc_mutex_unlock(schedule->lock)
+/* Locks. These also blocks signals that we care about and thus guarantee
+   that while we are in scheduler no signals can happen.  This way we can
+   synchronise signals with SILC Scheduler. */
+#define SILC_SCHEDULE_LOCK(schedule)                           \
+do {                                                           \
+  silc_schedule_internal_signals_block(schedule->internal);    \
+  silc_mutex_lock(schedule->lock);                             \
+} while (0)
+#define SILC_SCHEDULE_UNLOCK(schedule)                         \
+do {                                                           \
+  silc_mutex_unlock(schedule->lock);                           \
+  silc_schedule_internal_signals_unblock(schedule->internal);  \
+} while (0)
 
 /* SILC Task object. Represents one task in the scheduler. */
 struct SilcTaskStruct {
   SilcUInt32 fd;
-  struct timeval timeout;
-  SilcTaskCallback callback;
-  void *context;
-  bool valid;
-  SilcTaskPriority priority;
-  SilcTaskType type;
+  SilcTaskCallback callback;      /* Task callback */
+  void *context;                  /* Task callback context */
+  struct timeval timeout;         /* Set for timeout tasks */
+  unsigned int valid : 1;         /* Set when task is valid */
+  unsigned int priority : 2;      /* Priority of the task */
+  unsigned int type : 5;           /* Type of the task */
 
   /* Pointers forming doubly linked circular list */
   struct SilcTaskStruct *next;
@@ -161,18 +195,22 @@ struct SilcTaskQueueStruct {
        File descriptor sets for select(). These are automatically managed
        by the scheduler and should not be touched otherwise.
 
-   void *wakeup
+   void *internal
 
-       System specific wakeup context. On multi-threaded environments the
-       scheduler needs to be wakenup (in the thread) when tasks are added
-       or removed. This is initialized by silc_schedule_wakeup_init.
+       System specific scheduler context.
 
    SILC_MUTEX_DEFINE(lock)
   
        Scheduler lock.
 
+   bool signal_tasks
+
+       TRUE when tasks has been registered from signals.  Next round in
+       scheduler will call the callbacks when this is TRUE.
+
 */
 struct SilcScheduleStruct {
+  void *app_context;           /* Application specific context */
   SilcTaskQueue fd_queue;
   SilcTaskQueue timeout_queue;
   SilcTaskQueue generic_queue;
@@ -181,17 +219,19 @@ struct SilcScheduleStruct {
   SilcUInt32 last_fd;
   struct timeval *timeout;
   bool valid;
-  void *wakeup;
+  void *internal;
   SILC_MUTEX_DEFINE(lock);
   bool is_locked;
+  bool signal_tasks;
 };
 
 /* Initializes the scheduler. This returns the scheduler context that
    is given as arugment usually to all silc_schedule_* functions.
    The `max_tasks' indicates the number of maximum tasks that the
-   scheduler can handle. */
+   scheduler can handle. The `app_context' is application specific
+   context that is delivered to task callbacks. */
 
-SilcSchedule silc_schedule_init(int max_tasks)
+SilcSchedule silc_schedule_init(int max_tasks, void *app_context)
 {
   SilcSchedule schedule;
 
@@ -213,12 +253,13 @@ SilcSchedule silc_schedule_init(int max_tasks)
   schedule->max_fd = max_tasks;
   schedule->timeout = NULL;
   schedule->valid = TRUE;
+  schedule->app_context = app_context;
 
   /* Allocate scheduler lock */
   silc_mutex_alloc(&schedule->lock);
 
-  /* Initialize the wakeup, for multi-threads support */
-  schedule->wakeup = silc_schedule_wakeup_init(schedule);
+  /* Initialize the platform specific scheduler. */
+  schedule->internal = silc_schedule_internal_init(schedule, app_context);
 
   return schedule;
 }
@@ -235,6 +276,19 @@ bool silc_schedule_uninit(SilcSchedule schedule)
   if (schedule->valid == TRUE)
     return FALSE;
 
+  /* Dispatch all timeouts before going away */
+  silc_mutex_lock(schedule->timeout_queue->lock);
+  silc_schedule_dispatch_timeout(schedule, TRUE);
+  silc_mutex_unlock(schedule->timeout_queue->lock);
+
+  /* Deliver signals before going away */
+  if (schedule->signal_tasks) {
+    SILC_SCHEDULE_UNLOCK(schedule);
+    silc_schedule_internal_signals_call(schedule->internal, schedule);
+    schedule->signal_tasks = FALSE;
+    SILC_SCHEDULE_LOCK(schedule);
+  }
+
   /* Unregister all tasks */
   silc_schedule_task_remove(schedule->fd_queue, SILC_ALL_TASKS);
   silc_schedule_task_remove(schedule->timeout_queue, SILC_ALL_TASKS);
@@ -247,10 +301,11 @@ bool silc_schedule_uninit(SilcSchedule schedule)
 
   silc_free(schedule->fd_list);
 
-  /* Uninit the wakeup */
-  silc_schedule_wakeup_uninit(schedule->wakeup);
+  /* Uninit the platform specific scheduler. */
+  silc_schedule_internal_uninit(schedule->internal);
 
   silc_mutex_free(schedule->lock);
+  silc_free(schedule);
 
   return TRUE;
 }
@@ -276,7 +331,9 @@ bool silc_schedule_reinit(SilcSchedule schedule, int max_tasks)
 void silc_schedule_stop(SilcSchedule schedule)
 {
   SILC_LOG_DEBUG(("Stopping scheduler"));
+  SILC_SCHEDULE_LOCK(schedule);
   schedule->valid = FALSE;
+  SILC_SCHEDULE_UNLOCK(schedule);
 }
 
 /* Executes nontimeout tasks. It then checks whether any of ther fd tasks
@@ -312,7 +369,8 @@ static void silc_schedule_dispatch_nontimeout(SilcSchedule schedule)
       if (task->valid && schedule->fd_list[i].revents & SILC_TASK_READ) {
        silc_mutex_unlock(schedule->fd_queue->lock);
        SILC_SCHEDULE_UNLOCK(schedule);
-       task->callback(schedule, SILC_TASK_READ, task->fd, task->context);
+       task->callback(schedule, schedule->app_context,
+                      SILC_TASK_READ, task->fd, task->context);
        SILC_SCHEDULE_LOCK(schedule);
        silc_mutex_lock(schedule->fd_queue->lock);
       }
@@ -321,7 +379,8 @@ static void silc_schedule_dispatch_nontimeout(SilcSchedule schedule)
       if (task->valid && schedule->fd_list[i].revents & SILC_TASK_WRITE) {
        silc_mutex_unlock(schedule->fd_queue->lock);
        SILC_SCHEDULE_UNLOCK(schedule);
-       task->callback(schedule, SILC_TASK_WRITE, task->fd, task->context);
+       task->callback(schedule, schedule->app_context,
+                      SILC_TASK_WRITE, task->fd, task->context);
        SILC_SCHEDULE_LOCK(schedule);
        silc_mutex_lock(schedule->fd_queue->lock);
       }
@@ -351,7 +410,8 @@ static void silc_schedule_dispatch_nontimeout(SilcSchedule schedule)
        if (task->valid && schedule->fd_list[i].revents & SILC_TASK_READ) {
          silc_mutex_unlock(schedule->generic_queue->lock);
          SILC_SCHEDULE_UNLOCK(schedule);
-         task->callback(schedule, SILC_TASK_READ, fd, task->context);
+         task->callback(schedule, schedule->app_context,
+                        SILC_TASK_READ, fd, task->context);
          SILC_SCHEDULE_LOCK(schedule);
          silc_mutex_lock(schedule->generic_queue->lock);
        }
@@ -360,7 +420,8 @@ static void silc_schedule_dispatch_nontimeout(SilcSchedule schedule)
        if (task->valid && schedule->fd_list[i].revents & SILC_TASK_WRITE) {
          silc_mutex_unlock(schedule->generic_queue->lock);
          SILC_SCHEDULE_UNLOCK(schedule);
-         task->callback(schedule, SILC_TASK_WRITE, fd, task->context);
+         task->callback(schedule, schedule->app_context,
+                        SILC_TASK_WRITE, fd, task->context);
          SILC_SCHEDULE_LOCK(schedule);
          silc_mutex_lock(schedule->generic_queue->lock);
        }
@@ -398,7 +459,8 @@ static void silc_schedule_dispatch_nontimeout(SilcSchedule schedule)
    phase. */
 /* This holds the schedule->lock and the schedule->timeout_queue->lock */
 
-static void silc_schedule_dispatch_timeout(SilcSchedule schedule)
+static void silc_schedule_dispatch_timeout(SilcSchedule schedule,
+                                          bool dispatch_all)
 {
   SilcTaskQueue queue = schedule->timeout_queue;
   SilcTask task;
@@ -416,11 +478,13 @@ static void silc_schedule_dispatch_timeout(SilcSchedule schedule)
        the expired tasks. */
     while(1) {
       /* Execute the task if the timeout has expired */
-      if (silc_schedule_task_timeout_compare(&task->timeout, &curtime)) {
+      if (dispatch_all ||
+         silc_schedule_task_timeout_compare(&task->timeout, &curtime)) {
         if (task->valid) {
          silc_mutex_unlock(queue->lock);
          SILC_SCHEDULE_UNLOCK(schedule);
-         task->callback(schedule, SILC_TASK_EXPIRE, task->fd, task->context);
+         task->callback(schedule, schedule->app_context,
+                        SILC_TASK_EXPIRE, task->fd, task->context);
          SILC_SCHEDULE_LOCK(schedule);
          silc_mutex_lock(queue->lock);
        }
@@ -470,8 +534,8 @@ static void silc_schedule_select_timeout(SilcSchedule schedule)
       /* If the timeout is in past, we will run the task and all other
         timeout tasks from the past. */
       if (silc_schedule_task_timeout_compare(&task->timeout, &curtime)) {
-       silc_schedule_dispatch_timeout(schedule);
-                                               
+       silc_schedule_dispatch_timeout(schedule, FALSE);
+
        /* The task(s) has expired and doesn't exist on the task queue
           anymore. We continue with new timeout. */
        queue = schedule->timeout_queue;
@@ -525,6 +589,14 @@ bool silc_schedule_one(SilcSchedule schedule, int timeout_usecs)
   if (!schedule->is_locked)
     SILC_SCHEDULE_LOCK(schedule);
 
+  /* Deliver signals if any has been set to be called */
+  if (schedule->signal_tasks) {
+    SILC_SCHEDULE_UNLOCK(schedule);
+    silc_schedule_internal_signals_call(schedule->internal, schedule);
+    schedule->signal_tasks = FALSE;
+    SILC_SCHEDULE_LOCK(schedule);
+  }
+
   /* If the task queues aren't initialized or we aren't valid anymore
      we will return */
   if ((!schedule->fd_queue && !schedule->timeout_queue 
@@ -568,7 +640,7 @@ bool silc_schedule_one(SilcSchedule schedule, int timeout_usecs)
   case 0:
     /* Timeout */
     silc_mutex_lock(schedule->timeout_queue->lock);
-    silc_schedule_dispatch_timeout(schedule);
+    silc_schedule_dispatch_timeout(schedule, FALSE);
     silc_mutex_unlock(schedule->timeout_queue->lock);
     break;
   default:
@@ -619,11 +691,21 @@ void silc_schedule_wakeup(SilcSchedule schedule)
 #ifdef SILC_THREADS
   SILC_LOG_DEBUG(("Wakeup scheduler"));
   SILC_SCHEDULE_LOCK(schedule);
-  silc_schedule_wakeup_internal(schedule->wakeup);
+  silc_schedule_internal_wakeup(schedule->internal);
   SILC_SCHEDULE_UNLOCK(schedule);
 #endif
 }
 
+/* Returns the application specific context that was saved into the
+   scheduler in silc_schedule_init function.  The context is also
+   returned to application in task callback functions, but this function
+   may be used to get it as well if needed. */
+
+void *silc_schedule_get_context(SilcSchedule schedule)
+{
+  return schedule->app_context;
+}
+
 /* Add new task to the scheduler */
 
 SilcTask silc_schedule_task_add(SilcSchedule schedule, SilcUInt32 fd,
@@ -636,6 +718,9 @@ SilcTask silc_schedule_task_add(SilcSchedule schedule, SilcUInt32 fd,
   SilcTaskQueue queue;
   int timeout = FALSE;
 
+  if (!schedule->valid)
+    return NULL;
+
   SILC_LOG_DEBUG(("Registering new task, fd=%d type=%d priority=%d", fd, 
                  type, priority));
 
@@ -803,6 +888,9 @@ void silc_schedule_set_listen_fd(SilcSchedule schedule,
   int i;
   bool found = FALSE;
 
+  if (!schedule->valid)
+    return;
+
   SILC_SCHEDULE_LOCK(schedule);
 
   for (i = 0; i < schedule->max_fd; i++)
@@ -850,6 +938,33 @@ void silc_schedule_unset_listen_fd(SilcSchedule schedule, SilcUInt32 fd)
   SILC_SCHEDULE_UNLOCK(schedule);
 }
 
+/* Register a new signal */
+
+void silc_schedule_signal_register(SilcSchedule schedule, SilcUInt32 signal,
+                                  SilcTaskCallback callback, void *context)
+{
+  silc_schedule_internal_signal_register(schedule->internal, signal,
+                                        callback, context);
+}
+
+/* Unregister a new signal */
+
+void silc_schedule_signal_unregister(SilcSchedule schedule, SilcUInt32 signal,
+                                    SilcTaskCallback callback, void *context)
+{
+  silc_schedule_internal_signal_unregister(schedule->internal, signal,
+                                          callback, context);
+}
+
+/* Call signal indicated by `signal'. */
+
+void silc_schedule_signal_call(SilcSchedule schedule, SilcUInt32 signal)
+{
+  /* Mark that signals needs to be delivered later. */
+  silc_schedule_internal_signal_call(schedule->internal, signal);
+  schedule->signal_tasks = TRUE;
+}
+
 /* Allocates a newtask task queue into the scheduler */
 
 static void silc_task_queue_alloc(SilcTaskQueue *queue)
@@ -863,6 +978,7 @@ static void silc_task_queue_alloc(SilcTaskQueue *queue)
 static void silc_task_queue_free(SilcTaskQueue queue)
 {
   silc_mutex_free(queue->lock);
+  memset(queue, 'F', sizeof(*queue));
   silc_free(queue);
 }
 
@@ -1099,10 +1215,11 @@ static int silc_schedule_task_remove(SilcTaskQueue queue, SilcTask task)
     next = first;
 
     while(1) {
-      next = next->next;
-      silc_free(next->prev);
-      if (next == first)
+      old = next->next;
+      silc_free(next);
+      if (old == first)
        break;
+      next = old;
     }
 
     queue->task = NULL;