/* Forward declarations */
typedef struct SilcTaskQueueStruct *SilcTaskQueue;
-/* System specific routines. Implemented under unix/ and win32/. */
+/* System specific routines. Implemented under unix/, win32/ and such. */
/* System specific select(). Returns same values as normal select(). */
-int silc_select(SilcScheduleFd fds, uint32 fds_count, struct timeval *timeout);
+int silc_select(SilcScheduleFd fds, SilcUInt32 fds_count,
+ struct timeval *timeout);
-/* Initializes the wakeup of the scheduler. In multi-threaded environment
+/* Initializes the platform specific scheduler. This for example initializes
+ the wakeup mechanism of the scheduler. In multi-threaded environment
the scheduler needs to be wakenup when tasks are added or removed from
- the task queues. This will initialize the wakeup for the scheduler.
- Any tasks that needs to be registered must be registered to the `queue'.
- It is guaranteed that the scheduler will automatically free any
- registered tasks in this queue. This is system specific routine. */
-void *silc_schedule_wakeup_init(SilcSchedule schedule);
+ the task queues. Returns context to the platform specific scheduler. */
+void *silc_schedule_internal_init(SilcSchedule schedule, void *context);
-/* Uninitializes the system specific wakeup. */
-void silc_schedule_wakeup_uninit(void *context);
+/* Uninitializes the platform specific scheduler context. */
+void silc_schedule_internal_uninit(void *context);
/* Wakes up the scheduler. This is platform specific routine */
-void silc_schedule_wakeup_internal(void *context);
+void silc_schedule_internal_wakeup(void *context);
+/* Register signal */
+void silc_schedule_internal_signal_register(void *context,
+ SilcUInt32 signal,
+ SilcTaskCallback callback,
+ void *callback_context);
+
+/* Unregister signal */
+void silc_schedule_internal_signal_unregister(void *context,
+ SilcUInt32 signal,
+ SilcTaskCallback callback,
+ void *callback_context);
+
+/* Mark signal to be called later. */
+void silc_schedule_internal_signal_call(void *context, SilcUInt32 signal);
+
+/* Call all signals */
+void silc_schedule_internal_signals_call(void *context,
+ SilcSchedule schedule);
+
+/* Block registered signals in scheduler. */
+void silc_schedule_internal_signals_block(void *context);
+
+/* Unblock registered signals in schedule. */
+void silc_schedule_internal_signals_unblock(void *context);
/* Internal task management routines. */
+static void silc_schedule_dispatch_timeout(SilcSchedule schedule,
+ bool dispatch_all);
static void silc_task_queue_alloc(SilcTaskQueue *queue);
static void silc_task_queue_free(SilcTaskQueue queue);
-static SilcTask silc_task_find(SilcTaskQueue queue, uint32 fd);
+static SilcTask silc_task_find(SilcTaskQueue queue, SilcUInt32 fd);
static SilcTask silc_task_add(SilcTaskQueue queue, SilcTask newtask,
SilcTaskPriority priority);
static SilcTask silc_task_get_first(SilcTaskQueue queue, SilcTask first);
static SilcTask silc_task_add_timeout(SilcTaskQueue queue, SilcTask newtask,
SilcTaskPriority priority);
static int silc_schedule_task_remove(SilcTaskQueue queue, SilcTask task);
-static int silc_schedule_task_timeout_compare(struct timeval *smaller,
- struct timeval *bigger);
static void silc_task_del_by_context(SilcTaskQueue queue, void *context);
static void silc_task_del_by_callback(SilcTaskQueue queue,
SilcTaskCallback callback);
-static void silc_task_del_by_fd(SilcTaskQueue queue, uint32 fd);
+static void silc_task_del_by_fd(SilcTaskQueue queue, SilcUInt32 fd);
/* Returns the task queue by task type */
-#define SILC_SCHEDULE_GET_QUEUE(type) \
- (type == SILC_TASK_FD ? schedule->fd_queue : \
- type == SILC_TASK_TIMEOUT ? schedule->timeout_queue : \
+#define SILC_SCHEDULE_GET_QUEUE(type) \
+ (type == SILC_TASK_FD ? schedule->fd_queue : \
+ type == SILC_TASK_TIMEOUT ? schedule->timeout_queue : \
schedule->generic_queue)
+/* Locks. These also blocks signals that we care about and thus guarantee
+ that while we are in scheduler no signals can happen. This way we can
+ synchronise signals with SILC Scheduler. */
+#define SILC_SCHEDULE_LOCK(schedule) \
+do { \
+ silc_schedule_internal_signals_block(schedule->internal); \
+ silc_mutex_lock(schedule->lock); \
+} while (0)
+#define SILC_SCHEDULE_UNLOCK(schedule) \
+do { \
+ silc_mutex_unlock(schedule->lock); \
+ silc_schedule_internal_signals_unblock(schedule->internal); \
+} while (0)
+
/* SILC Task object. Represents one task in the scheduler. */
struct SilcTaskStruct {
- uint32 fd;
- struct timeval timeout;
- SilcTaskCallback callback;
- void *context;
- bool valid;
- SilcTaskPriority priority;
- SilcTaskType type;
+ SilcUInt32 fd;
+ SilcTaskCallback callback; /* Task callback */
+ void *context; /* Task callback context */
+ struct timeval timeout; /* Set for timeout tasks */
+ unsigned int valid : 1; /* Set when task is valid */
+ unsigned int priority : 2; /* Priority of the task */
+ unsigned int type : 5; /* Type of the task */
/* Pointers forming doubly linked circular list */
struct SilcTaskStruct *next;
List of file descriptors the scheduler is supposed to be listenning.
This is updated internally.
- uint32 max_fd
- uint32 last_fd
+ SilcUInt32 max_fd
+ SilcUInt32 last_fd
Size of the fd_list list. There can be `max_fd' many tasks in
the scheduler at once. The `last_fd' is the last valid entry
File descriptor sets for select(). These are automatically managed
by the scheduler and should not be touched otherwise.
- void *wakeup
+ void *internal
- System specific wakeup context. On multi-threaded environments the
- scheduler needs to be wakenup (in the thread) when tasks are added
- or removed. This is initialized by silc_schedule_wakeup_init.
+ System specific scheduler context.
SILC_MUTEX_DEFINE(lock)
Scheduler lock.
+ bool signal_tasks
+
+ TRUE when tasks has been registered from signals. Next round in
+ scheduler will call the callbacks when this is TRUE.
+
*/
struct SilcScheduleStruct {
+ void *app_context; /* Application specific context */
SilcTaskQueue fd_queue;
SilcTaskQueue timeout_queue;
SilcTaskQueue generic_queue;
SilcScheduleFd fd_list;
- uint32 max_fd;
- uint32 last_fd;
+ SilcUInt32 max_fd;
+ SilcUInt32 last_fd;
struct timeval *timeout;
bool valid;
- void *wakeup;
+ void *internal;
SILC_MUTEX_DEFINE(lock);
bool is_locked;
+ bool signal_tasks;
};
/* Initializes the scheduler. This returns the scheduler context that
is given as arugment usually to all silc_schedule_* functions.
The `max_tasks' indicates the number of maximum tasks that the
- scheduler can handle. */
+ scheduler can handle. The `app_context' is application specific
+ context that is delivered to task callbacks. */
-SilcSchedule silc_schedule_init(int max_tasks)
+SilcSchedule silc_schedule_init(int max_tasks, void *app_context)
{
SilcSchedule schedule;
silc_task_queue_alloc(&schedule->timeout_queue);
silc_task_queue_alloc(&schedule->generic_queue);
+ if (!max_tasks)
+ max_tasks = 200;
+
/* Initialize the scheduler */
schedule->fd_list = silc_calloc(max_tasks, sizeof(*schedule->fd_list));
schedule->max_fd = max_tasks;
schedule->timeout = NULL;
schedule->valid = TRUE;
+ schedule->app_context = app_context;
/* Allocate scheduler lock */
silc_mutex_alloc(&schedule->lock);
- /* Initialize the wakeup, for multi-threads support */
- schedule->wakeup = silc_schedule_wakeup_init(schedule);
+ /* Initialize the platform specific scheduler. */
+ schedule->internal = silc_schedule_internal_init(schedule, app_context);
return schedule;
}
if (schedule->valid == TRUE)
return FALSE;
+ /* Dispatch all timeouts before going away */
+ silc_mutex_lock(schedule->timeout_queue->lock);
+ silc_schedule_dispatch_timeout(schedule, TRUE);
+ silc_mutex_unlock(schedule->timeout_queue->lock);
+
+ /* Deliver signals before going away */
+ if (schedule->signal_tasks) {
+ SILC_SCHEDULE_UNLOCK(schedule);
+ silc_schedule_internal_signals_call(schedule->internal, schedule);
+ schedule->signal_tasks = FALSE;
+ SILC_SCHEDULE_LOCK(schedule);
+ }
+
/* Unregister all tasks */
silc_schedule_task_remove(schedule->fd_queue, SILC_ALL_TASKS);
silc_schedule_task_remove(schedule->timeout_queue, SILC_ALL_TASKS);
silc_free(schedule->fd_list);
- /* Uninit the wakeup */
- silc_schedule_wakeup_uninit(schedule->wakeup);
+ /* Uninit the platform specific scheduler. */
+ silc_schedule_internal_uninit(schedule->internal);
silc_mutex_free(schedule->lock);
+ silc_free(schedule);
return TRUE;
}
+/* Enlarge the capabilities of the scheduler to handle tasks to `max_tasks'. */
+
+bool silc_schedule_reinit(SilcSchedule schedule, int max_tasks)
+{
+ SILC_SCHEDULE_LOCK(schedule);
+ if (schedule->max_fd <= max_tasks)
+ return FALSE;
+ schedule->fd_list = silc_realloc(schedule->fd_list,
+ (sizeof(*schedule->fd_list) * max_tasks));
+ schedule->max_fd = max_tasks;
+ SILC_SCHEDULE_UNLOCK(schedule);
+ return TRUE;
+}
+
/* Stops the schedule even if it is not supposed to be stopped yet.
After calling this, one should call silc_schedule_uninit (after the
silc_schedule has returned). */
void silc_schedule_stop(SilcSchedule schedule)
{
SILC_LOG_DEBUG(("Stopping scheduler"));
- silc_mutex_lock(schedule->lock);
+ SILC_SCHEDULE_LOCK(schedule);
schedule->valid = FALSE;
- silc_mutex_unlock(schedule->lock);
+ SILC_SCHEDULE_UNLOCK(schedule);
}
/* Executes nontimeout tasks. It then checks whether any of ther fd tasks
{
SilcTask task;
int i, last_fd = schedule->last_fd;
+ SilcUInt32 fd;
for (i = 0; i <= last_fd; i++) {
if (schedule->fd_list[i].events == 0)
continue;
+ fd = schedule->fd_list[i].fd;
+
/* First check whether this fd has task in the fd queue */
silc_mutex_lock(schedule->fd_queue->lock);
- task = silc_task_find(schedule->fd_queue, schedule->fd_list[i].fd);
+ task = silc_task_find(schedule->fd_queue, fd);
/* If the task was found then execute its callbacks. If not then
execute all generic tasks for that fd. */
/* Is the task ready for reading */
if (task->valid && schedule->fd_list[i].revents & SILC_TASK_READ) {
silc_mutex_unlock(schedule->fd_queue->lock);
- silc_mutex_unlock(schedule->lock);
- task->callback(schedule, SILC_TASK_READ, task->fd, task->context);
- silc_mutex_lock(schedule->lock);
+ SILC_SCHEDULE_UNLOCK(schedule);
+ task->callback(schedule, schedule->app_context,
+ SILC_TASK_READ, task->fd, task->context);
+ SILC_SCHEDULE_LOCK(schedule);
silc_mutex_lock(schedule->fd_queue->lock);
}
/* Is the task ready for writing */
if (task->valid && schedule->fd_list[i].revents & SILC_TASK_WRITE) {
silc_mutex_unlock(schedule->fd_queue->lock);
- silc_mutex_unlock(schedule->lock);
- task->callback(schedule, SILC_TASK_WRITE, task->fd, task->context);
- silc_mutex_lock(schedule->lock);
+ SILC_SCHEDULE_UNLOCK(schedule);
+ task->callback(schedule, schedule->app_context,
+ SILC_TASK_WRITE, task->fd, task->context);
+ SILC_SCHEDULE_LOCK(schedule);
silc_mutex_lock(schedule->fd_queue->lock);
}
/* Is the task ready for reading */
if (task->valid && schedule->fd_list[i].revents & SILC_TASK_READ) {
silc_mutex_unlock(schedule->generic_queue->lock);
- silc_mutex_unlock(schedule->lock);
- task->callback(schedule, SILC_TASK_READ, schedule->fd_list[i].fd,
- task->context);
- silc_mutex_lock(schedule->lock);
+ SILC_SCHEDULE_UNLOCK(schedule);
+ task->callback(schedule, schedule->app_context,
+ SILC_TASK_READ, fd, task->context);
+ SILC_SCHEDULE_LOCK(schedule);
silc_mutex_lock(schedule->generic_queue->lock);
}
/* Is the task ready for writing */
if (task->valid && schedule->fd_list[i].revents & SILC_TASK_WRITE) {
silc_mutex_unlock(schedule->generic_queue->lock);
- silc_mutex_unlock(schedule->lock);
- task->callback(schedule, SILC_TASK_WRITE, schedule->fd_list[i].fd,
- task->context);
- silc_mutex_lock(schedule->lock);
+ SILC_SCHEDULE_UNLOCK(schedule);
+ task->callback(schedule, schedule->app_context,
+ SILC_TASK_WRITE, fd, task->context);
+ SILC_SCHEDULE_LOCK(schedule);
silc_mutex_lock(schedule->generic_queue->lock);
}
phase. */
/* This holds the schedule->lock and the schedule->timeout_queue->lock */
-static void silc_schedule_dispatch_timeout(SilcSchedule schedule)
+static void silc_schedule_dispatch_timeout(SilcSchedule schedule,
+ bool dispatch_all)
{
SilcTaskQueue queue = schedule->timeout_queue;
SilcTask task;
the expired tasks. */
while(1) {
/* Execute the task if the timeout has expired */
- if (silc_schedule_task_timeout_compare(&task->timeout, &curtime)) {
+ if (dispatch_all ||
+ silc_compare_timeval(&task->timeout, &curtime)) {
if (task->valid) {
silc_mutex_unlock(queue->lock);
- silc_mutex_unlock(schedule->lock);
- task->callback(schedule, SILC_TASK_EXPIRE, task->fd, task->context);
- silc_mutex_lock(schedule->lock);
+ SILC_SCHEDULE_UNLOCK(schedule);
+ task->callback(schedule, schedule->app_context,
+ SILC_TASK_EXPIRE, task->fd, task->context);
+ SILC_SCHEDULE_LOCK(schedule);
silc_mutex_lock(queue->lock);
}
if (task && task->valid == TRUE) {
/* If the timeout is in past, we will run the task and all other
timeout tasks from the past. */
- if (silc_schedule_task_timeout_compare(&task->timeout, &curtime)) {
- silc_schedule_dispatch_timeout(schedule);
-
+ if (silc_compare_timeval(&task->timeout, &curtime)) {
+ silc_schedule_dispatch_timeout(schedule, FALSE);
+
/* The task(s) has expired and doesn't exist on the task queue
anymore. We continue with new timeout. */
queue = schedule->timeout_queue;
SILC_LOG_DEBUG(("In scheduler loop"));
if (!schedule->is_locked)
- silc_mutex_lock(schedule->lock);
+ SILC_SCHEDULE_LOCK(schedule);
+
+ /* Deliver signals if any has been set to be called */
+ if (schedule->signal_tasks) {
+ SILC_SCHEDULE_UNLOCK(schedule);
+ silc_schedule_internal_signals_call(schedule->internal, schedule);
+ schedule->signal_tasks = FALSE;
+ SILC_SCHEDULE_LOCK(schedule);
+ }
/* If the task queues aren't initialized or we aren't valid anymore
we will return */
&& !schedule->generic_queue) || schedule->valid == FALSE) {
SILC_LOG_DEBUG(("Scheduler not valid anymore, exiting"));
if (!schedule->is_locked)
- silc_mutex_unlock(schedule->lock);
+ SILC_SCHEDULE_UNLOCK(schedule);
return FALSE;
}
schedule->timeout = &timeout;
}
- silc_mutex_unlock(schedule->lock);
+ SILC_SCHEDULE_UNLOCK(schedule);
/* This is the main select(). The program blocks here until some
of the selected file descriptors change status or the selected
ret = silc_select(schedule->fd_list, schedule->last_fd + 1,
schedule->timeout);
- silc_mutex_lock(schedule->lock);
+ SILC_SCHEDULE_LOCK(schedule);
switch (ret) {
case -1:
case 0:
/* Timeout */
silc_mutex_lock(schedule->timeout_queue->lock);
- silc_schedule_dispatch_timeout(schedule);
+ silc_schedule_dispatch_timeout(schedule, FALSE);
silc_mutex_unlock(schedule->timeout_queue->lock);
break;
default:
}
if (!schedule->is_locked)
- silc_mutex_unlock(schedule->lock);
+ SILC_SCHEDULE_UNLOCK(schedule);
return TRUE;
}
return;
}
- silc_mutex_lock(schedule->lock);
+ SILC_SCHEDULE_LOCK(schedule);
schedule->is_locked = TRUE;
/* Start the scheduler loop */
while (silc_schedule_one(schedule, -1))
;
- silc_mutex_unlock(schedule->lock);
+ SILC_SCHEDULE_UNLOCK(schedule);
}
/* Wakes up the scheduler. This is used only in multi-threaded
{
#ifdef SILC_THREADS
SILC_LOG_DEBUG(("Wakeup scheduler"));
- silc_mutex_lock(schedule->lock);
- silc_schedule_wakeup_internal(schedule->wakeup);
- silc_mutex_unlock(schedule->lock);
+ SILC_SCHEDULE_LOCK(schedule);
+ silc_schedule_internal_wakeup(schedule->internal);
+ SILC_SCHEDULE_UNLOCK(schedule);
#endif
}
+/* Returns the application specific context that was saved into the
+ scheduler in silc_schedule_init function. The context is also
+ returned to application in task callback functions, but this function
+ may be used to get it as well if needed. */
+
+void *silc_schedule_get_context(SilcSchedule schedule)
+{
+ return schedule->app_context;
+}
+
/* Add new task to the scheduler */
-SilcTask silc_schedule_task_add(SilcSchedule schedule, uint32 fd,
+SilcTask silc_schedule_task_add(SilcSchedule schedule, SilcUInt32 fd,
SilcTaskCallback callback, void *context,
long seconds, long useconds,
SilcTaskType type,
SilcTaskQueue queue;
int timeout = FALSE;
+ if (!schedule->valid)
+ return NULL;
+
SILC_LOG_DEBUG(("Registering new task, fd=%d type=%d priority=%d", fd,
type, priority));
/* Add the fd to be listened, the task found now applies to this
fd as well. */
- silc_schedule_set_listen_fd(schedule, fd, SILC_TASK_READ);
+ silc_schedule_set_listen_fd(schedule, fd, SILC_TASK_READ, FALSE);
return task;
}
/* If the task is non-timeout task we have to tell the scheduler that we
would like to have these tasks scheduled at some odd distant future. */
if (type != SILC_TASK_TIMEOUT)
- silc_schedule_set_listen_fd(schedule, fd, SILC_TASK_READ);
+ silc_schedule_set_listen_fd(schedule, fd, SILC_TASK_READ, FALSE);
silc_mutex_lock(queue->lock);
/* Remove task by fd */
-void silc_schedule_task_del_by_fd(SilcSchedule schedule, uint32 fd)
+void silc_schedule_task_del_by_fd(SilcSchedule schedule, SilcUInt32 fd)
{
- SILC_LOG_DEBUG(("Unregister task by fd"));
+ SILC_LOG_DEBUG(("Unregister task by fd %d", fd));
silc_task_del_by_fd(schedule->timeout_queue, fd);
silc_task_del_by_fd(schedule->fd_queue, fd);
call this directly if wanted. This can be called multiple times for
one file descriptor to set different iomasks. */
-void silc_schedule_set_listen_fd(SilcSchedule schedule,
- uint32 fd, SilcTaskEvent iomask)
+void silc_schedule_set_listen_fd(SilcSchedule schedule, SilcUInt32 fd,
+ SilcTaskEvent mask, bool send_events)
{
int i;
bool found = FALSE;
- silc_mutex_lock(schedule->lock);
+ if (!schedule->valid)
+ return;
+
+ SILC_SCHEDULE_LOCK(schedule);
for (i = 0; i < schedule->max_fd; i++)
if (schedule->fd_list[i].fd == fd) {
schedule->fd_list[i].fd = fd;
- schedule->fd_list[i].events = iomask;
+ schedule->fd_list[i].events = mask;
if (i > schedule->last_fd)
schedule->last_fd = i;
found = TRUE;
+ if (send_events) {
+ schedule->fd_list[i].revents = mask;
+ silc_schedule_dispatch_nontimeout(schedule);
+ }
break;
}
for (i = 0; i < schedule->max_fd; i++)
if (schedule->fd_list[i].events == 0) {
schedule->fd_list[i].fd = fd;
- schedule->fd_list[i].events = iomask;
+ schedule->fd_list[i].events = mask;
if (i > schedule->last_fd)
schedule->last_fd = i;
+ if (send_events) {
+ schedule->fd_list[i].revents = mask;
+ silc_schedule_dispatch_nontimeout(schedule);
+ }
break;
}
- silc_mutex_unlock(schedule->lock);
+ SILC_SCHEDULE_UNLOCK(schedule);
}
/* Removes a file descriptor from listen list. */
-void silc_schedule_unset_listen_fd(SilcSchedule schedule, uint32 fd)
+void silc_schedule_unset_listen_fd(SilcSchedule schedule, SilcUInt32 fd)
{
int i;
- silc_mutex_lock(schedule->lock);
+ SILC_SCHEDULE_LOCK(schedule);
+
+ SILC_LOG_DEBUG(("Unset listen fd %d", fd));
for (i = 0; i < schedule->max_fd; i++)
if (schedule->fd_list[i].fd == fd) {
break;
}
- silc_mutex_unlock(schedule->lock);
+ SILC_SCHEDULE_UNLOCK(schedule);
+}
+
+/* Register a new signal */
+
+void silc_schedule_signal_register(SilcSchedule schedule, SilcUInt32 signal,
+ SilcTaskCallback callback, void *context)
+{
+ silc_schedule_internal_signal_register(schedule->internal, signal,
+ callback, context);
+}
+
+/* Unregister a new signal */
+
+void silc_schedule_signal_unregister(SilcSchedule schedule, SilcUInt32 signal,
+ SilcTaskCallback callback, void *context)
+{
+ silc_schedule_internal_signal_unregister(schedule->internal, signal,
+ callback, context);
+}
+
+/* Call signal indicated by `signal'. */
+
+void silc_schedule_signal_call(SilcSchedule schedule, SilcUInt32 signal)
+{
+ /* Mark that signals needs to be delivered later. */
+ silc_schedule_internal_signal_call(schedule->internal, signal);
+ schedule->signal_tasks = TRUE;
}
/* Allocates a newtask task queue into the scheduler */
static void silc_task_queue_free(SilcTaskQueue queue)
{
silc_mutex_free(queue->lock);
+ memset(queue, 'F', sizeof(*queue));
silc_free(queue);
}
/* Return task by its fd. */
-static SilcTask silc_task_find(SilcTaskQueue queue, uint32 fd)
+static SilcTask silc_task_find(SilcTaskQueue queue, SilcUInt32 fd)
{
SilcTask next;
if (first == prev)
break;
- if (silc_schedule_task_timeout_compare(&prev->timeout, &task->timeout))
+ if (silc_compare_timeval(&prev->timeout, &task->timeout))
task = prev;
prev = prev->prev;
/* If we have longer timeout than with the task head of us
we have found our spot. */
- if (silc_schedule_task_timeout_compare(&prev->timeout,
- &newtask->timeout))
+ if (silc_compare_timeval(&prev->timeout, &newtask->timeout))
break;
/* If we are equal size of timeout we will be after it. */
- if (!silc_schedule_task_timeout_compare(&newtask->timeout,
- &prev->timeout))
+ if (!silc_compare_timeval(&newtask->timeout, &prev->timeout))
break;
/* We have shorter timeout, compare to next one. */
if (prev == task) {
/* Check if we are going to be the first task in the queue */
- if (silc_schedule_task_timeout_compare(&prev->timeout,
- &newtask->timeout))
+ if (silc_compare_timeval(&prev->timeout, &newtask->timeout))
break;
- if (!silc_schedule_task_timeout_compare(&newtask->timeout,
- &prev->timeout))
+ if (!silc_compare_timeval(&newtask->timeout, &prev->timeout))
break;
/* We are now the first task in queue */
/* If we have longer timeout than with the task head of us
we have found our spot. */
- if (silc_schedule_task_timeout_compare(&prev->timeout,
- &newtask->timeout))
+ if (silc_compare_timeval(&prev->timeout, &newtask->timeout))
break;
/* If we are equal size of timeout, priority kicks in place. */
- if (!silc_schedule_task_timeout_compare(&newtask->timeout,
- &prev->timeout))
+ if (!silc_compare_timeval(&newtask->timeout, &prev->timeout))
if (prev->priority >= SILC_TASK_PRI_NORMAL)
break;
if (prev == task) {
/* Check if we are going to be the first task in the queue */
- if (silc_schedule_task_timeout_compare(&prev->timeout,
- &newtask->timeout))
+ if (silc_compare_timeval(&prev->timeout, &newtask->timeout))
break;
- if (!silc_schedule_task_timeout_compare(&newtask->timeout,
- &prev->timeout))
+ if (!silc_compare_timeval(&newtask->timeout, &prev->timeout))
if (prev->priority >= SILC_TASK_PRI_NORMAL)
break;
next = first;
while(1) {
- next = next->next;
- silc_free(next->prev);
- if (next == first)
+ old = next->next;
+ silc_free(next);
+ if (old == first)
break;
+ next = old;
}
queue->task = NULL;
}
}
-/* Compare two time values. If the first argument is smaller than the
- second this function returns TRUE. */
-
-static int silc_schedule_task_timeout_compare(struct timeval *smaller,
- struct timeval *bigger)
-{
- if ((smaller->tv_sec < bigger->tv_sec) ||
- ((smaller->tv_sec == bigger->tv_sec) &&
- (smaller->tv_usec < bigger->tv_usec)))
- return TRUE;
-
- return FALSE;
-}
-
-static void silc_task_del_by_fd(SilcTaskQueue queue, uint32 fd)
+static void silc_task_del_by_fd(SilcTaskQueue queue, SilcUInt32 fd)
{
SilcTask next;