/* Forward declarations */
typedef struct SilcTaskQueueStruct *SilcTaskQueue;
-/* System specific routines. Implemented under unix/ and win32/. */
+/* System specific routines. Implemented under unix/, win32/ and such. */
/* System specific select(). Returns same values as normal select(). */
-int silc_select(SilcScheduleFd fds, uint32 fds_count, struct timeval *timeout);
+int silc_select(SilcScheduleFd fds, SilcUInt32 fds_count,
+ struct timeval *timeout);
-/* Initializes the wakeup of the scheduler. In multi-threaded environment
+/* Initializes the platform specific scheduler. This for example initializes
+ the wakeup mechanism of the scheduler. In multi-threaded environment
the scheduler needs to be wakenup when tasks are added or removed from
- the task queues. This will initialize the wakeup for the scheduler.
- Any tasks that needs to be registered must be registered to the `queue'.
- It is guaranteed that the scheduler will automatically free any
- registered tasks in this queue. This is system specific routine. */
-void *silc_schedule_wakeup_init(SilcSchedule schedule);
+ the task queues. Returns context to the platform specific scheduler. */
+void *silc_schedule_internal_init(SilcSchedule schedule);
-/* Uninitializes the system specific wakeup. */
-void silc_schedule_wakeup_uninit(void *context);
+/* Uninitializes the platform specific scheduler context. */
+void silc_schedule_internal_uninit(void *context);
/* Wakes up the scheduler. This is platform specific routine */
-void silc_schedule_wakeup_internal(void *context);
+void silc_schedule_internal_wakeup(void *context);
+/* Register signal */
+void silc_schedule_internal_signal_register(void *context,
+ SilcUInt32 signal,
+ SilcTaskCallback callback,
+ void *callback_context);
+
+/* Unregister signal */
+void silc_schedule_internal_signal_unregister(void *context,
+ SilcUInt32 signal,
+ SilcTaskCallback callback,
+ void *callback_context);
+
+/* Mark signal to be called later. */
+void silc_schedule_internal_signal_call(void *context, SilcUInt32 signal);
+
+/* Call all signals */
+void silc_schedule_internal_signals_call(void *context,
+ SilcSchedule schedule);
+
+/* Block registered signals in scheduler. */
+void silc_schedule_internal_signals_block(void *context);
+
+/* Unblock registered signals in schedule. */
+void silc_schedule_internal_signals_unblock(void *context);
/* Internal task management routines. */
static void silc_task_queue_alloc(SilcTaskQueue *queue);
static void silc_task_queue_free(SilcTaskQueue queue);
-static SilcTask silc_task_find(SilcTaskQueue queue, uint32 fd);
+static SilcTask silc_task_find(SilcTaskQueue queue, SilcUInt32 fd);
static SilcTask silc_task_add(SilcTaskQueue queue, SilcTask newtask,
SilcTaskPriority priority);
static SilcTask silc_task_get_first(SilcTaskQueue queue, SilcTask first);
static void silc_task_del_by_context(SilcTaskQueue queue, void *context);
static void silc_task_del_by_callback(SilcTaskQueue queue,
SilcTaskCallback callback);
-static void silc_task_del_by_fd(SilcTaskQueue queue, uint32 fd);
+static void silc_task_del_by_fd(SilcTaskQueue queue, SilcUInt32 fd);
/* Returns the task queue by task type */
-#define SILC_SCHEDULE_GET_QUEUE(type) \
- (type == SILC_TASK_FD ? schedule->fd_queue : \
- type == SILC_TASK_TIMEOUT ? schedule->timeout_queue : \
+#define SILC_SCHEDULE_GET_QUEUE(type) \
+ (type == SILC_TASK_FD ? schedule->fd_queue : \
+ type == SILC_TASK_TIMEOUT ? schedule->timeout_queue : \
schedule->generic_queue)
+/* Locks. These also blocks signals that we care about and thus guarantee
+ that while we are in scheduler no signals can happen. This way we can
+ synchronise signals with SILC Scheduler. */
+#define SILC_SCHEDULE_LOCK(schedule) \
+do { \
+ silc_schedule_internal_signals_block(schedule->internal); \
+ silc_mutex_lock(schedule->lock); \
+} while (0)
+#define SILC_SCHEDULE_UNLOCK(schedule) \
+do { \
+ silc_mutex_unlock(schedule->lock); \
+ silc_schedule_internal_signals_unblock(schedule->internal); \
+} while (0)
+
/* SILC Task object. Represents one task in the scheduler. */
struct SilcTaskStruct {
- uint32 fd;
- struct timeval timeout;
- SilcTaskCallback callback;
- void *context;
- bool valid;
- SilcTaskPriority priority;
- SilcTaskType type;
+ SilcUInt32 fd;
+ SilcTaskCallback callback; /* Task callback */
+ void *context; /* Task callback context */
+ struct timeval timeout; /* Set for timeout tasks */
+ unsigned int valid : 1; /* Set when task is valid */
+ unsigned int priority : 2; /* Priority of the task */
+ unsigned int type : 5; /* Type of the task */
/* Pointers forming doubly linked circular list */
struct SilcTaskStruct *next;
List of file descriptors the scheduler is supposed to be listenning.
This is updated internally.
- uint32 max_fd
- uint32 last_fd
+ SilcUInt32 max_fd
+ SilcUInt32 last_fd
Size of the fd_list list. There can be `max_fd' many tasks in
the scheduler at once. The `last_fd' is the last valid entry
File descriptor sets for select(). These are automatically managed
by the scheduler and should not be touched otherwise.
- void *wakeup
+ void *internal
- System specific wakeup context. On multi-threaded environments the
- scheduler needs to be wakenup (in the thread) when tasks are added
- or removed. This is initialized by silc_schedule_wakeup_init.
+ System specific scheduler context.
SILC_MUTEX_DEFINE(lock)
Scheduler lock.
+ bool signal_tasks
+
+ TRUE when tasks has been registered from signals. Next round in
+ scheduler will call the callbacks when this is TRUE.
+
*/
struct SilcScheduleStruct {
SilcTaskQueue fd_queue;
SilcTaskQueue timeout_queue;
SilcTaskQueue generic_queue;
SilcScheduleFd fd_list;
- uint32 max_fd;
- uint32 last_fd;
+ SilcUInt32 max_fd;
+ SilcUInt32 last_fd;
struct timeval *timeout;
bool valid;
- void *wakeup;
+ void *internal;
SILC_MUTEX_DEFINE(lock);
bool is_locked;
+ bool signal_tasks;
};
/* Initializes the scheduler. This returns the scheduler context that
silc_task_queue_alloc(&schedule->timeout_queue);
silc_task_queue_alloc(&schedule->generic_queue);
+ if (!max_tasks)
+ max_tasks = 200;
+
/* Initialize the scheduler */
schedule->fd_list = silc_calloc(max_tasks, sizeof(*schedule->fd_list));
schedule->max_fd = max_tasks;
/* Allocate scheduler lock */
silc_mutex_alloc(&schedule->lock);
- /* Initialize the wakeup, for multi-threads support */
- schedule->wakeup = silc_schedule_wakeup_init(schedule);
+ /* Initialize the platform specific scheduler. */
+ schedule->internal = silc_schedule_internal_init(schedule);
return schedule;
}
silc_free(schedule->fd_list);
- /* Uninit the wakeup */
- silc_schedule_wakeup_uninit(schedule->wakeup);
+ /* Uninit the platform specific scheduler. */
+ silc_schedule_internal_uninit(schedule->internal);
silc_mutex_free(schedule->lock);
return TRUE;
}
+/* Enlarge the capabilities of the scheduler to handle tasks to `max_tasks'. */
+
+bool silc_schedule_reinit(SilcSchedule schedule, int max_tasks)
+{
+ SILC_SCHEDULE_LOCK(schedule);
+ if (schedule->max_fd <= max_tasks)
+ return FALSE;
+ schedule->fd_list = silc_realloc(schedule->fd_list,
+ (sizeof(*schedule->fd_list) * max_tasks));
+ schedule->max_fd = max_tasks;
+ SILC_SCHEDULE_UNLOCK(schedule);
+ return TRUE;
+}
+
/* Stops the schedule even if it is not supposed to be stopped yet.
After calling this, one should call silc_schedule_uninit (after the
silc_schedule has returned). */
void silc_schedule_stop(SilcSchedule schedule)
{
SILC_LOG_DEBUG(("Stopping scheduler"));
- silc_mutex_lock(schedule->lock);
+ SILC_SCHEDULE_LOCK(schedule);
schedule->valid = FALSE;
- silc_mutex_unlock(schedule->lock);
+ SILC_SCHEDULE_UNLOCK(schedule);
}
/* Executes nontimeout tasks. It then checks whether any of ther fd tasks
static void silc_schedule_dispatch_nontimeout(SilcSchedule schedule)
{
SilcTask task;
- int i;
+ int i, last_fd = schedule->last_fd;
+ SilcUInt32 fd;
- for (i = 0; i <= schedule->last_fd; i++) {
+ for (i = 0; i <= last_fd; i++) {
if (schedule->fd_list[i].events == 0)
continue;
+ fd = schedule->fd_list[i].fd;
+
/* First check whether this fd has task in the fd queue */
silc_mutex_lock(schedule->fd_queue->lock);
- task = silc_task_find(schedule->fd_queue, schedule->fd_list[i].fd);
- silc_mutex_unlock(schedule->fd_queue->lock);
+ task = silc_task_find(schedule->fd_queue, fd);
/* If the task was found then execute its callbacks. If not then
execute all generic tasks for that fd. */
/* Validity of the task is checked always before and after
execution beacuse the task might have been unregistered
in the callback function, ie. it is not valid anymore. */
- silc_mutex_lock(schedule->fd_queue->lock);
/* Is the task ready for reading */
if (task->valid && schedule->fd_list[i].revents & SILC_TASK_READ) {
silc_mutex_unlock(schedule->fd_queue->lock);
- silc_mutex_unlock(schedule->lock);
+ SILC_SCHEDULE_UNLOCK(schedule);
task->callback(schedule, SILC_TASK_READ, task->fd, task->context);
- silc_mutex_lock(schedule->lock);
+ SILC_SCHEDULE_LOCK(schedule);
silc_mutex_lock(schedule->fd_queue->lock);
}
/* Is the task ready for writing */
if (task->valid && schedule->fd_list[i].revents & SILC_TASK_WRITE) {
silc_mutex_unlock(schedule->fd_queue->lock);
- silc_mutex_unlock(schedule->lock);
+ SILC_SCHEDULE_UNLOCK(schedule);
task->callback(schedule, SILC_TASK_WRITE, task->fd, task->context);
- silc_mutex_lock(schedule->lock);
+ SILC_SCHEDULE_LOCK(schedule);
silc_mutex_lock(schedule->fd_queue->lock);
}
} else {
/* Run generic tasks for this fd. */
+ silc_mutex_unlock(schedule->fd_queue->lock);
+
silc_mutex_lock(schedule->generic_queue->lock);
if (!schedule->generic_queue->task) {
silc_mutex_unlock(schedule->generic_queue->lock);
/* Is the task ready for reading */
if (task->valid && schedule->fd_list[i].revents & SILC_TASK_READ) {
silc_mutex_unlock(schedule->generic_queue->lock);
- silc_mutex_unlock(schedule->lock);
- task->callback(schedule, SILC_TASK_READ, schedule->fd_list[i].fd,
- task->context);
- silc_mutex_lock(schedule->lock);
+ SILC_SCHEDULE_UNLOCK(schedule);
+ task->callback(schedule, SILC_TASK_READ, fd, task->context);
+ SILC_SCHEDULE_LOCK(schedule);
silc_mutex_lock(schedule->generic_queue->lock);
}
/* Is the task ready for writing */
if (task->valid && schedule->fd_list[i].revents & SILC_TASK_WRITE) {
silc_mutex_unlock(schedule->generic_queue->lock);
- silc_mutex_unlock(schedule->lock);
- task->callback(schedule, SILC_TASK_WRITE, schedule->fd_list[i].fd,
- task->context);
- silc_mutex_lock(schedule->lock);
+ SILC_SCHEDULE_UNLOCK(schedule);
+ task->callback(schedule, SILC_TASK_WRITE, fd, task->context);
+ SILC_SCHEDULE_LOCK(schedule);
silc_mutex_lock(schedule->generic_queue->lock);
}
if (silc_schedule_task_timeout_compare(&task->timeout, &curtime)) {
if (task->valid) {
silc_mutex_unlock(queue->lock);
- silc_mutex_unlock(schedule->lock);
+ SILC_SCHEDULE_UNLOCK(schedule);
task->callback(schedule, SILC_TASK_EXPIRE, task->fd, task->context);
- silc_mutex_lock(schedule->lock);
+ SILC_SCHEDULE_LOCK(schedule);
silc_mutex_lock(queue->lock);
}
SILC_LOG_DEBUG(("In scheduler loop"));
if (!schedule->is_locked)
- silc_mutex_lock(schedule->lock);
+ SILC_SCHEDULE_LOCK(schedule);
+
+ /* Deliver signals if any has been set to be called */
+ if (schedule->signal_tasks) {
+ SILC_SCHEDULE_UNLOCK(schedule);
+ silc_schedule_internal_signals_call(schedule->internal, schedule);
+ schedule->signal_tasks = FALSE;
+ SILC_SCHEDULE_LOCK(schedule);
+ }
/* If the task queues aren't initialized or we aren't valid anymore
we will return */
&& !schedule->generic_queue) || schedule->valid == FALSE) {
SILC_LOG_DEBUG(("Scheduler not valid anymore, exiting"));
if (!schedule->is_locked)
- silc_mutex_unlock(schedule->lock);
+ SILC_SCHEDULE_UNLOCK(schedule);
return FALSE;
}
schedule->timeout = &timeout;
}
- silc_mutex_unlock(schedule->lock);
+ SILC_SCHEDULE_UNLOCK(schedule);
/* This is the main select(). The program blocks here until some
of the selected file descriptors change status or the selected
ret = silc_select(schedule->fd_list, schedule->last_fd + 1,
schedule->timeout);
- silc_mutex_lock(schedule->lock);
+ SILC_SCHEDULE_LOCK(schedule);
switch (ret) {
case -1:
}
if (!schedule->is_locked)
- silc_mutex_unlock(schedule->lock);
+ SILC_SCHEDULE_UNLOCK(schedule);
return TRUE;
}
return;
}
- silc_mutex_lock(schedule->lock);
+ SILC_SCHEDULE_LOCK(schedule);
schedule->is_locked = TRUE;
/* Start the scheduler loop */
while (silc_schedule_one(schedule, -1))
;
- silc_mutex_unlock(schedule->lock);
+ SILC_SCHEDULE_UNLOCK(schedule);
}
/* Wakes up the scheduler. This is used only in multi-threaded
{
#ifdef SILC_THREADS
SILC_LOG_DEBUG(("Wakeup scheduler"));
- silc_mutex_lock(schedule->lock);
- silc_schedule_wakeup_internal(schedule->wakeup);
- silc_mutex_unlock(schedule->lock);
+ SILC_SCHEDULE_LOCK(schedule);
+ silc_schedule_internal_wakeup(schedule->internal);
+ SILC_SCHEDULE_UNLOCK(schedule);
#endif
}
/* Add new task to the scheduler */
-SilcTask silc_schedule_task_add(SilcSchedule schedule, uint32 fd,
+SilcTask silc_schedule_task_add(SilcSchedule schedule, SilcUInt32 fd,
SilcTaskCallback callback, void *context,
long seconds, long useconds,
SilcTaskType type,
/* Remove task by fd */
-void silc_schedule_task_del_by_fd(SilcSchedule schedule, uint32 fd)
+void silc_schedule_task_del_by_fd(SilcSchedule schedule, SilcUInt32 fd)
{
+ SILC_LOG_DEBUG(("Unregister task by fd %d", fd));
+
silc_task_del_by_fd(schedule->timeout_queue, fd);
silc_task_del_by_fd(schedule->fd_queue, fd);
- silc_task_del_by_fd(schedule->generic_queue, fd);
}
/* Remove task by task callback. */
void silc_schedule_task_del_by_callback(SilcSchedule schedule,
SilcTaskCallback callback)
{
+ SILC_LOG_DEBUG(("Unregister task by callback"));
+
silc_task_del_by_callback(schedule->timeout_queue, callback);
silc_task_del_by_callback(schedule->fd_queue, callback);
silc_task_del_by_callback(schedule->generic_queue, callback);
void silc_schedule_task_del_by_context(SilcSchedule schedule, void *context)
{
+ SILC_LOG_DEBUG(("Unregister task by context"));
+
silc_task_del_by_context(schedule->timeout_queue, context);
silc_task_del_by_context(schedule->fd_queue, context);
silc_task_del_by_context(schedule->generic_queue, context);
one file descriptor to set different iomasks. */
void silc_schedule_set_listen_fd(SilcSchedule schedule,
- uint32 fd, SilcTaskEvent iomask)
+ SilcUInt32 fd, SilcTaskEvent iomask)
{
int i;
bool found = FALSE;
- silc_mutex_lock(schedule->lock);
+ SILC_SCHEDULE_LOCK(schedule);
for (i = 0; i < schedule->max_fd; i++)
if (schedule->fd_list[i].fd == fd) {
break;
}
- silc_mutex_unlock(schedule->lock);
+ SILC_SCHEDULE_UNLOCK(schedule);
}
/* Removes a file descriptor from listen list. */
-void silc_schedule_unset_listen_fd(SilcSchedule schedule, uint32 fd)
+void silc_schedule_unset_listen_fd(SilcSchedule schedule, SilcUInt32 fd)
{
int i;
- silc_mutex_lock(schedule->lock);
+ SILC_SCHEDULE_LOCK(schedule);
+
+ SILC_LOG_DEBUG(("Unset listen fd %d", fd));
for (i = 0; i < schedule->max_fd; i++)
if (schedule->fd_list[i].fd == fd) {
break;
}
- silc_mutex_unlock(schedule->lock);
+ SILC_SCHEDULE_UNLOCK(schedule);
+}
+
+/* Register a new signal */
+
+void silc_schedule_signal_register(SilcSchedule schedule, SilcUInt32 signal,
+ SilcTaskCallback callback, void *context)
+{
+ silc_schedule_internal_signal_register(schedule->internal, signal,
+ callback, context);
+}
+
+/* Unregister a new signal */
+
+void silc_schedule_signal_unregister(SilcSchedule schedule, SilcUInt32 signal,
+ SilcTaskCallback callback, void *context)
+{
+ silc_schedule_internal_signal_unregister(schedule->internal, signal,
+ callback, context);
+}
+
+/* Call signal indicated by `signal'. */
+
+void silc_schedule_signal_call(SilcSchedule schedule, SilcUInt32 signal)
+{
+ /* Mark that signals needs to be delivered later. */
+ silc_schedule_internal_signal_call(schedule->internal, signal);
+ schedule->signal_tasks = TRUE;
}
/* Allocates a newtask task queue into the scheduler */
static void silc_task_queue_free(SilcTaskQueue queue)
{
silc_mutex_free(queue->lock);
+ memset(queue, 'F', sizeof(*queue));
silc_free(queue);
}
/* Return task by its fd. */
-static SilcTask silc_task_find(SilcTaskQueue queue, uint32 fd)
+static SilcTask silc_task_find(SilcTaskQueue queue, SilcUInt32 fd)
{
SilcTask next;
{
SilcTask first, old, next;
- if (!queue)
+ if (!queue || !task)
return FALSE;
if (!queue->task) {
next = first;
while(1) {
- next = next->next;
- silc_free(next->prev);
- if (next == first)
+ old = next->next;
+ silc_free(next);
+ if (old == first)
break;
+ next = old;
}
queue->task = NULL;
return FALSE;
}
-static void silc_task_del_by_fd(SilcTaskQueue queue, uint32 fd)
+static void silc_task_del_by_fd(SilcTaskQueue queue, SilcUInt32 fd)
{
SilcTask next;
- SILC_LOG_DEBUG(("Unregister task by fd"));
-
silc_mutex_lock(queue->lock);
if (!queue->task) {
{
SilcTask next;
- SILC_LOG_DEBUG(("Unregister task by callback"));
-
silc_mutex_lock(queue->lock);
if (!queue->task) {
{
SilcTask next;
- SILC_LOG_DEBUG(("Unregister task by context"));
-
silc_mutex_lock(queue->lock);
if (!queue->task) {