Added asynchronous event tasks to SILC Scheduler. Added
[crypto.git] / lib / silcutil / unix / silcunixschedule.c
index ccf410e9205fe4f07d38b5b1b00277629fc55a02..d81cfa751668172aa9d3789a7a51c26bac0a473a 100644 (file)
@@ -4,7 +4,7 @@
 
   Author: Pekka Riikonen <priikone@silcnet.org>
 
-  Copyright (C) 1998 - 2006 Pekka Riikonen
+  Copyright (C) 1998 - 2007 Pekka Riikonen
 
   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
@@ -20,7 +20,9 @@
 
 #include "silc.h"
 
-#if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
+#if defined(HAVE_EPOLL_WAIT)
+#include <sys/epoll.h>
+#elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
 #include <poll.h>
 #endif
 
@@ -28,7 +30,11 @@ const SilcScheduleOps schedule_ops;
 
 /* Internal context. */
 typedef struct {
-#if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
+#if defined(HAVE_EPOLL_WAIT)
+  struct epoll_event *fds;
+  SilcUInt32 fds_count;
+  int epfd;
+#elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
   struct rlimit nofile;
   struct pollfd *fds;
   SilcUInt32 fds_count;
@@ -41,16 +47,69 @@ typedef struct {
 } *SilcUnixScheduler;
 
 typedef struct {
-  SilcUInt32 signal;
+  SilcUInt32 sig;
   SilcTaskCallback callback;
   void *context;
   SilcBool call;
+  SilcSchedule schedule;
 } SilcUnixSignal;
 
 #define SIGNAL_COUNT 32
 SilcUnixSignal signal_call[SIGNAL_COUNT];
 
-#if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
+#if defined(HAVE_EPOLL_WAIT)
+
+/* Linux's fast epoll system (level triggered) */
+
+int silc_epoll(SilcSchedule schedule, void *context)
+{
+  SilcUnixScheduler internal = context;
+  SilcTaskFd task;
+  struct epoll_event *fds = internal->fds;
+  SilcUInt32 fds_count = internal->fds_count;
+  int ret, i, timeout = -1;
+
+  /* Allocate larger fd table if needed */
+  i = silc_hash_table_count(schedule->fd_queue);
+  if (i > fds_count) {
+    fds = silc_realloc(internal->fds, sizeof(*internal->fds) *
+                      (fds_count + (i / 2)));
+    if (silc_likely(fds)) {
+      internal->fds = fds;
+      internal->fds_count = fds_count = fds_count + (i / 2);
+    }
+  }
+
+  if (schedule->has_timeout)
+    timeout = ((schedule->timeout.tv_sec * 1000) +
+              (schedule->timeout.tv_usec / 1000));
+
+  SILC_SCHEDULE_UNLOCK(schedule);
+  ret = epoll_wait(internal->epfd, fds, fds_count, timeout);
+  SILC_SCHEDULE_LOCK(schedule);
+  if (ret <= 0)
+    return ret;
+
+  silc_list_init(schedule->fd_dispatch, struct SilcTaskStruct, next);
+
+  for (i = 0; i < ret; i++) {
+    task = fds[i].data.ptr;
+    task->revents = 0;
+    if (!task->header.valid || !task->events) {
+      epoll_ctl(internal->epfd, EPOLL_CTL_DEL, task->fd, &fds[i]);
+      continue;
+    }
+    if (fds[i].events & (EPOLLIN | EPOLLPRI | EPOLLHUP | EPOLLERR))
+      task->revents |= SILC_TASK_READ;
+    if (fds[i].events & EPOLLOUT)
+      task->revents |= SILC_TASK_WRITE;
+    silc_list_add(schedule->fd_dispatch, task);
+  }
+
+  return ret;
+}
+
+#elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
 
 /* Calls normal poll() system call. */
 
@@ -62,11 +121,13 @@ int silc_poll(SilcSchedule schedule, void *context)
   struct pollfd *fds = internal->fds;
   SilcUInt32 fds_count = internal->fds_count;
   int fd, ret, i = 0, timeout = -1;
+  void *fdp;
 
   silc_hash_table_list(schedule->fd_queue, &htl);
-  while (silc_hash_table_get(&htl, (void **)&fd, (void **)&task)) {
+  while (silc_hash_table_get(&htl, &fdp, (void *)&task)) {
     if (!task->events)
       continue;
+    fd = SILC_PTR_TO_32(fdp);
 
     /* Allocate larger fd table if needed */
     if (i >= fds_count) {
@@ -74,7 +135,7 @@ int silc_poll(SilcSchedule schedule, void *context)
 
       fds = silc_realloc(internal->fds, sizeof(*internal->fds) *
                         (fds_count + (fds_count / 2)));
-      if (!fds)
+      if (silc_unlikely(!fds))
        break;
       internal->fds = fds;
       internal->fds_count = fds_count = fds_count + (fds_count / 2);
@@ -96,6 +157,7 @@ int silc_poll(SilcSchedule schedule, void *context)
     i++;
   }
   silc_hash_table_list_reset(&htl);
+  silc_list_init(schedule->fd_dispatch, struct SilcTaskStruct, next);
 
   if (schedule->has_timeout)
     timeout = ((schedule->timeout.tv_sec * 1000) +
@@ -112,7 +174,9 @@ int silc_poll(SilcSchedule schedule, void *context)
     if (!fds[i].revents)
       continue;
     if (!silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fds[i].fd),
-                             NULL, (void **)&task))
+                             NULL, (void *)&task))
+      continue;
+    if (!task->header.valid || !task->events)
       continue;
 
     fd = fds[i].revents;
@@ -120,6 +184,7 @@ int silc_poll(SilcSchedule schedule, void *context)
       task->revents |= SILC_TASK_READ;
     if (fd & POLLOUT)
       task->revents |= SILC_TASK_WRITE;
+    silc_list_add(schedule->fd_dispatch, task);
   }
 
   return ret;
@@ -135,14 +200,16 @@ int silc_select(SilcSchedule schedule, void *context)
   SilcTaskFd task;
   fd_set in, out;
   int fd, max_fd = 0, ret;
+  void *fdp;
 
   FD_ZERO(&in);
   FD_ZERO(&out);
 
   silc_hash_table_list(schedule->fd_queue, &htl);
-  while (silc_hash_table_get(&htl, (void **)&fd, (void **)&task)) {
+  while (silc_hash_table_get(&htl, &fdp, (void *)&task)) {
     if (!task->events)
       continue;
+    fd = SILC_PTR_TO_32(fdp);
 
 #ifdef FD_SETSIZE
     if (fd >= FD_SETSIZE)
@@ -160,6 +227,7 @@ int silc_select(SilcSchedule schedule, void *context)
     task->revents = 0;
   }
   silc_hash_table_list_reset(&htl);
+  silc_list_init(schedule->fd_dispatch, struct SilcTaskStruct, next);
 
   SILC_SCHEDULE_UNLOCK(schedule);
   ret = select(max_fd + 1, &in, &out, NULL, (schedule->has_timeout ?
@@ -169,9 +237,10 @@ int silc_select(SilcSchedule schedule, void *context)
     return ret;
 
   silc_hash_table_list(schedule->fd_queue, &htl);
-  while (silc_hash_table_get(&htl, (void **)&fd, (void **)&task)) {
-    if (!task->events)
+  while (silc_hash_table_get(&htl, &fdp, (void *)&task)) {
+    if (!task->header.valid || !task->events)
       continue;
+    fd = SILC_PTR_TO_32(fdp);
 
 #ifdef FD_SETSIZE
     if (fd >= FD_SETSIZE)
@@ -182,6 +251,7 @@ int silc_select(SilcSchedule schedule, void *context)
       task->revents |= SILC_TASK_READ;
     if (FD_ISSET(fd, &out))
       task->revents |= SILC_TASK_WRITE;
+    silc_list_add(schedule->fd_dispatch, task);
   }
   silc_hash_table_list_reset(&htl);
 
@@ -190,6 +260,58 @@ int silc_select(SilcSchedule schedule, void *context)
 
 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
 
+/* Schedule `task' with events `event_mask'. Zero `event_mask' unschedules. */
+
+SilcBool silc_schedule_internal_schedule_fd(SilcSchedule schedule,
+                                           void *context,
+                                           SilcTaskFd task,
+                                           SilcTaskEvent event_mask)
+{
+#if defined(HAVE_EPOLL_WAIT)
+  SilcUnixScheduler internal = (SilcUnixScheduler)context;
+  struct epoll_event event;
+
+  if (!internal)
+    return TRUE;
+
+  SILC_LOG_DEBUG(("Scheduling fd %lu, mask %x", task->fd, event_mask));
+
+  memset(&event, 0, sizeof(event));
+  if (event_mask & SILC_TASK_READ)
+    event.events |= (EPOLLIN | EPOLLPRI);
+  if (event_mask & SILC_TASK_WRITE)
+    event.events |= EPOLLOUT;
+
+  /* Zero mask unschedules task */
+  if (silc_unlikely(!event.events)) {
+    if (epoll_ctl(internal->epfd, EPOLL_CTL_DEL, task->fd, &event)) {
+      SILC_LOG_DEBUG(("epoll_ctl (DEL): %s", strerror(errno)));
+      return FALSE;
+    }
+    return TRUE;
+  }
+
+  /* Schedule the task */
+  if (silc_unlikely(!task->scheduled)) {
+    event.data.ptr = task;
+    if (epoll_ctl(internal->epfd, EPOLL_CTL_ADD, task->fd, &event)) {
+      SILC_LOG_DEBUG(("epoll_ctl (ADD): %s", strerror(errno)));
+      return FALSE;
+    }
+    task->scheduled = TRUE;
+    return TRUE;
+  }
+
+  /* Schedule for specific mask */
+  event.data.ptr = task;
+  if (epoll_ctl(internal->epfd, EPOLL_CTL_MOD, task->fd, &event)) {
+    SILC_LOG_DEBUG(("epoll_ctl (MOD): %s", strerror(errno)));
+    return FALSE;
+  }
+#endif /* HAVE_EPOLL_WAIT */
+  return TRUE;
+}
+
 #ifdef SILC_THREADS
 
 SILC_TASK_CALLBACK(silc_schedule_wakeup_cb)
@@ -199,26 +321,56 @@ SILC_TASK_CALLBACK(silc_schedule_wakeup_cb)
 
   SILC_LOG_DEBUG(("Wokeup"));
 
-  read(internal->wakeup_pipe[0], &c, 1);
+  (void)read(internal->wakeup_pipe[0], &c, 1);
 }
 
+SILC_TASK_CALLBACK(silc_schedule_wakeup_init)
+{
+  SilcUnixScheduler internal = schedule->internal;
+
+  internal->wakeup_task =
+    silc_schedule_task_add(schedule, internal->wakeup_pipe[0],
+                          silc_schedule_wakeup_cb, internal,
+                          0, 0, SILC_TASK_FD);
+  if (!internal->wakeup_task) {
+    SILC_LOG_WARNING(("Could not add a wakeup task, threads won't work"));
+    close(internal->wakeup_pipe[0]);
+    return;
+  }
+  silc_schedule_internal_schedule_fd(schedule, internal,
+                                    (SilcTaskFd)internal->wakeup_task,
+                                    SILC_TASK_READ);
+}
 #endif /* SILC_THREADS */
 
 /* Initializes the platform specific scheduler.  This for example initializes
    the wakeup mechanism of the scheduler.  In multi-threaded environment
-   the scheduler needs to be wakenup when tasks are added or removed from
+   the scheduler needs to be woken up when tasks are added or removed from
    the task queues.  Returns context to the platform specific scheduler. */
 
 void *silc_schedule_internal_init(SilcSchedule schedule,
                                  void *app_context)
 {
   SilcUnixScheduler internal;
+  int i;
 
-  internal = silc_calloc(1, sizeof(*internal));
+  internal = silc_scalloc(schedule->stack, 1, sizeof(*internal));
   if (!internal)
     return NULL;
 
-#if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
+#if defined(HAVE_EPOLL_WAIT)
+  internal->epfd = epoll_create(4);
+  if (internal->epfd < 0) {
+    SILC_LOG_ERROR(("epoll_create() failed: %s", strerror(errno)));
+    return NULL;
+  }
+  internal->fds = silc_calloc(4, sizeof(*internal->fds));
+  if (!internal->fds) {
+    close(internal->epfd);
+    return NULL;
+  }
+  internal->fds_count = 4;
+#elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
   getrlimit(RLIMIT_NOFILE, &internal->nofile);
 
   if (schedule->max_tasks > 0) {
@@ -242,26 +394,20 @@ void *silc_schedule_internal_init(SilcSchedule schedule,
 #ifdef SILC_THREADS
   if (pipe(internal->wakeup_pipe)) {
     SILC_LOG_ERROR(("pipe() fails: %s", strerror(errno)));
-    silc_free(internal);
     return NULL;
   }
 
-  internal->wakeup_task =
-    silc_schedule_task_add(schedule, internal->wakeup_pipe[0],
-                          silc_schedule_wakeup_cb, internal,
-                          0, 0, SILC_TASK_FD);
-  if (!internal->wakeup_task) {
-    SILC_LOG_ERROR(("Could not add a wakeup task, threads won't work"));
-    close(internal->wakeup_pipe[0]);
-    close(internal->wakeup_pipe[1]);
-    silc_free(internal);
-    return NULL;
-  }
-#endif
+  silc_schedule_task_add_timeout(schedule, silc_schedule_wakeup_init,
+                                internal, 0, 0);
+#endif /* SILC_THREADS */
 
   internal->app_context = app_context;
 
-  memset(signal_call, 0, sizeof(signal_call) / sizeof(signal_call[0]));
+  for (i = 0; i < SIGNAL_COUNT; i++) {
+    signal_call[i].sig = 0;
+    signal_call[i].call = FALSE;
+    signal_call[i].schedule = schedule;
+  }
 
   return (void *)internal;
 }
@@ -285,11 +431,12 @@ void silc_schedule_internal_uninit(SilcSchedule schedule, void *context)
   close(internal->wakeup_pipe[1]);
 #endif
 
-#if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
+#if defined(HAVE_EPOLL_WAIT)
+  close(internal->epfd);
+  silc_free(internal->fds);
+#elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
   silc_free(internal->fds);
 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
-
-  silc_free(internal);
 }
 
 /* Wakes up the scheduler */
@@ -299,12 +446,12 @@ void silc_schedule_internal_wakeup(SilcSchedule schedule, void *context)
 #ifdef SILC_THREADS
   SilcUnixScheduler internal = (SilcUnixScheduler)context;
 
-  if (!internal)
+  if (!internal || !internal->wakeup_task)
     return;
 
   SILC_LOG_DEBUG(("Wakeup"));
 
-  write(internal->wakeup_pipe[1], "!", 1);
+  (void)write(internal->wakeup_pipe[1], "!", 1);
 #endif
 }
 
@@ -314,11 +461,14 @@ static void silc_schedule_internal_sighandler(int signal)
 {
   int i;
 
+  SILC_LOG_DEBUG(("Start"));
+
   for (i = 0; i < SIGNAL_COUNT; i++) {
-    if (signal_call[i].signal == signal) {
+    if (signal_call[i].sig == signal) {
       signal_call[i].call = TRUE;
+      signal_call[i].schedule->signal_tasks = TRUE;
       SILC_LOG_DEBUG(("Scheduling signal %d to be called",
-                     signal_call[i].signal));
+                     signal_call[i].sig));
       break;
     }
   }
@@ -336,15 +486,16 @@ void silc_schedule_internal_signal_register(SilcSchedule schedule,
   if (!internal)
     return;
 
-  SILC_LOG_DEBUG(("Registering signal %d", signal));
+  SILC_LOG_DEBUG(("Registering signal %d", sig));
 
   silc_schedule_internal_signals_block(schedule, context);
 
   for (i = 0; i < SIGNAL_COUNT; i++) {
-    if (!signal_call[i].signal) {
-      signal_call[i].signal = sig;
+    if (!signal_call[i].sig) {
+      signal_call[i].sig = sig;
       signal_call[i].callback = callback;
       signal_call[i].context = callback_context;
+      signal_call[i].schedule = schedule;
       signal_call[i].call = FALSE;
       signal(sig, silc_schedule_internal_sighandler);
       break;
@@ -365,15 +516,16 @@ void silc_schedule_internal_signal_unregister(SilcSchedule schedule,
   if (!internal)
     return;
 
-  SILC_LOG_DEBUG(("Unregistering signal %d", signal));
+  SILC_LOG_DEBUG(("Unregistering signal %d", sig));
 
   silc_schedule_internal_signals_block(schedule, context);
 
   for (i = 0; i < SIGNAL_COUNT; i++) {
-    if (signal_call[i].signal == sig) {
-      signal_call[i].signal = 0;
+    if (signal_call[i].sig == sig) {
+      signal_call[i].sig = 0;
       signal_call[i].callback = NULL;
       signal_call[i].context = NULL;
+      signal_call[i].schedule = NULL;
       signal_call[i].call = FALSE;
       signal(sig, SIG_DFL);
     }
@@ -401,12 +553,14 @@ void silc_schedule_internal_signals_call(SilcSchedule schedule, void *context)
     if (signal_call[i].call &&
         signal_call[i].callback) {
       SILC_LOG_DEBUG(("Calling signal %d callback",
-                     signal_call[i].signal));
+                     signal_call[i].sig));
+      silc_schedule_internal_signals_unblock(schedule, context);
       signal_call[i].callback(schedule, internal->app_context,
                              SILC_TASK_INTERRUPT,
-                             signal_call[i].signal,
+                             signal_call[i].sig,
                              signal_call[i].context);
       signal_call[i].call = FALSE;
+      silc_schedule_internal_signals_block(schedule, context);
     }
   }
 
@@ -442,11 +596,14 @@ const SilcScheduleOps schedule_ops =
 {
   silc_schedule_internal_init,
   silc_schedule_internal_uninit,
-#if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
+#if defined(HAVE_EPOLL_WAIT)
+  silc_epoll,
+#elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
   silc_poll,
 #else
   silc_select,
 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
+  silc_schedule_internal_schedule_fd,
   silc_schedule_internal_wakeup,
   silc_schedule_internal_signal_register,
   silc_schedule_internal_signal_unregister,