5 Author: Pekka Riikonen <priikone@silcnet.org>
7 Copyright (C) 1998 - 2001 Pekka Riikonen
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
22 #include "silcincludes.h"
23 #include "silcschedule_i.h"
25 /* Forward declarations */
26 typedef struct SilcTaskQueueStruct *SilcTaskQueue;
28 /* System specific routines. Implemented under unix/, win32/ and such. */
30 /* System specific select(). Returns same values as normal select(). */
31 int silc_select(SilcScheduleFd fds, SilcUInt32 fds_count,
32 struct timeval *timeout);
34 /* Initializes the platform specific scheduler. This for example initializes
35 the wakeup mechanism of the scheduler. In multi-threaded environment
36 the scheduler needs to be wakenup when tasks are added or removed from
37 the task queues. Returns context to the platform specific scheduler. */
38 void *silc_schedule_internal_init(SilcSchedule schedule);
40 /* Uninitializes the platform specific scheduler context. */
41 void silc_schedule_internal_uninit(void *context);
43 /* Wakes up the scheduler. This is platform specific routine */
44 void silc_schedule_internal_wakeup(void *context);
47 void silc_schedule_internal_signal_register(void *context,
50 /* Unregister signal */
51 void silc_schedule_internal_signal_unregister(void *context,
54 /* Block registered signals in scheduler. */
55 void silc_schedule_internal_signals_block(void *context);
57 /* Unblock registered signals in schedule. */
58 void silc_schedule_internal_signals_unblock(void *context);
60 /* Internal task management routines. */
62 static void silc_task_queue_alloc(SilcTaskQueue *queue);
63 static void silc_task_queue_free(SilcTaskQueue queue);
64 static SilcTask silc_task_find(SilcTaskQueue queue, SilcUInt32 fd);
65 static SilcTask silc_task_add(SilcTaskQueue queue, SilcTask newtask,
66 SilcTaskPriority priority);
67 static SilcTask silc_task_get_first(SilcTaskQueue queue, SilcTask first);
68 static SilcTask silc_task_add_timeout(SilcTaskQueue queue, SilcTask newtask,
69 SilcTaskPriority priority);
70 static int silc_schedule_task_remove(SilcTaskQueue queue, SilcTask task);
71 static int silc_schedule_task_timeout_compare(struct timeval *smaller,
72 struct timeval *bigger);
73 static void silc_task_del_by_context(SilcTaskQueue queue, void *context);
74 static void silc_task_del_by_callback(SilcTaskQueue queue,
75 SilcTaskCallback callback);
76 static void silc_task_del_by_fd(SilcTaskQueue queue, SilcUInt32 fd);
78 /* Returns the task queue by task type */
79 #define SILC_SCHEDULE_GET_QUEUE(type) \
80 (type == SILC_TASK_FD ? schedule->fd_queue : \
81 type == SILC_TASK_TIMEOUT ? schedule->timeout_queue : \
82 schedule->generic_queue)
84 /* Locks. These also blocks signals that we care about and thus guarantee
85 that while we are in scheduler no signals can happen. This way we can
86 synchronise signals with SILC Scheduler. */
87 #define SILC_SCHEDULE_LOCK(schedule) \
89 silc_schedule_internal_signals_block(schedule->internal); \
90 silc_mutex_lock(schedule->lock); \
92 #define SILC_SCHEDULE_UNLOCK(schedule) \
94 silc_mutex_unlock(schedule->lock); \
95 silc_schedule_internal_signals_unblock(schedule->internal); \
98 /* SILC Task object. Represents one task in the scheduler. */
99 struct SilcTaskStruct {
101 struct timeval timeout;
102 SilcTaskCallback callback;
105 SilcTaskPriority priority;
108 /* Pointers forming doubly linked circular list */
109 struct SilcTaskStruct *next;
110 struct SilcTaskStruct *prev;
113 /* SILC Task Queue object. The queue holds all the tasks in the scheduler.
114 There are always three task queues in the scheduler. One for non-timeout
115 tasks (fd tasks performing tasks over specified file descriptor),
116 one for timeout tasks and one for generic tasks. */
117 struct SilcTaskQueueStruct {
118 SilcTask task; /* Pointer to all tasks */
119 struct timeval timeout; /* Current timeout */
120 SILC_MUTEX_DEFINE(lock); /* Queue's lock */
124 SILC Scheduler structure.
126 This is the actual schedule object in SILC. Both SILC client and server
127 uses this same scheduler. Actually, this scheduler could be used by any
128 program needing scheduling.
130 Following short description of the fields:
132 SilcTaskQueue fd_queue
134 Task queue hook for non-timeout tasks. Usually this means that these
135 tasks perform different kind of I/O on file descriptors. File
136 descriptors are usually network sockets but they actually can be
137 any file descriptors. This hook is initialized in silc_schedule_init
138 function. Timeout tasks should not be added to this queue because
139 they will never expire.
141 SilcTaskQueue timeout_queue
143 Task queue hook for timeout tasks. This hook is reserved specificly
144 for tasks with timeout. Non-timeout tasks should not be added to this
145 queue because they will never get scheduled. This hook is also
146 initialized in silc_schedule_init function.
148 SilcTaskQueue generic_queue
150 Task queue hook for generic tasks. This hook is reserved specificly
151 for generic tasks, tasks that apply to all file descriptors, except
152 to those that have specificly registered a non-timeout task. This hook
153 is also initialized in silc_schedule_init function.
155 SilcScheduleFd fd_list
157 List of file descriptors the scheduler is supposed to be listenning.
158 This is updated internally.
163 Size of the fd_list list. There can be `max_fd' many tasks in
164 the scheduler at once. The `last_fd' is the last valid entry
167 struct timeval *timeout;
169 Pointer to the schedules next timeout. Value of this timeout is
170 automatically updated in the silc_schedule function.
174 Marks validity of the scheduler. This is a boolean value. When this
175 is false the scheduler is terminated and the program will end. This
176 set to true when the scheduler is initialized with silc_schedule_init
182 File descriptor sets for select(). These are automatically managed
183 by the scheduler and should not be touched otherwise.
187 System specific scheduler context.
189 SILC_MUTEX_DEFINE(lock)
194 struct SilcScheduleStruct {
195 SilcTaskQueue fd_queue;
196 SilcTaskQueue timeout_queue;
197 SilcTaskQueue generic_queue;
198 SilcScheduleFd fd_list;
201 struct timeval *timeout;
204 SILC_MUTEX_DEFINE(lock);
208 /* Initializes the scheduler. This returns the scheduler context that
209 is given as arugment usually to all silc_schedule_* functions.
210 The `max_tasks' indicates the number of maximum tasks that the
211 scheduler can handle. */
213 SilcSchedule silc_schedule_init(int max_tasks)
215 SilcSchedule schedule;
217 SILC_LOG_DEBUG(("Initializing scheduler"));
219 schedule = silc_calloc(1, sizeof(*schedule));
221 /* Allocate three task queues, one for file descriptor based tasks,
222 one for timeout tasks and one for generic tasks. */
223 silc_task_queue_alloc(&schedule->fd_queue);
224 silc_task_queue_alloc(&schedule->timeout_queue);
225 silc_task_queue_alloc(&schedule->generic_queue);
230 /* Initialize the scheduler */
231 schedule->fd_list = silc_calloc(max_tasks, sizeof(*schedule->fd_list));
232 schedule->max_fd = max_tasks;
233 schedule->timeout = NULL;
234 schedule->valid = TRUE;
236 /* Allocate scheduler lock */
237 silc_mutex_alloc(&schedule->lock);
239 /* Initialize the platform specific scheduler. */
240 schedule->internal = silc_schedule_internal_init(schedule);
241 silc_schedule_signal_register(schedule, SIGALRM);
246 /* Uninitializes the schedule. This is called when the program is ready
247 to end. This removes all tasks and task queues. Returns FALSE if the
248 scheduler could not be uninitialized. This happens when the scheduler
249 is still valid and silc_schedule_stop has not been called. */
251 bool silc_schedule_uninit(SilcSchedule schedule)
253 SILC_LOG_DEBUG(("Uninitializing scheduler"));
255 if (schedule->valid == TRUE)
258 /* Unregister all tasks */
259 silc_schedule_task_remove(schedule->fd_queue, SILC_ALL_TASKS);
260 silc_schedule_task_remove(schedule->timeout_queue, SILC_ALL_TASKS);
261 silc_schedule_task_remove(schedule->generic_queue, SILC_ALL_TASKS);
263 /* Unregister all task queues */
264 silc_task_queue_free(schedule->fd_queue);
265 silc_task_queue_free(schedule->timeout_queue);
266 silc_task_queue_free(schedule->generic_queue);
268 silc_free(schedule->fd_list);
270 /* Uninit the platform specific scheduler. */
271 silc_schedule_internal_uninit(schedule->internal);
273 silc_mutex_free(schedule->lock);
278 /* Enlarge the capabilities of the scheduler to handle tasks to `max_tasks'. */
280 bool silc_schedule_reinit(SilcSchedule schedule, int max_tasks)
282 SILC_SCHEDULE_LOCK(schedule);
283 if (schedule->max_fd <= max_tasks)
285 schedule->fd_list = silc_realloc(schedule->fd_list,
286 (sizeof(*schedule->fd_list) * max_tasks));
287 schedule->max_fd = max_tasks;
288 SILC_SCHEDULE_UNLOCK(schedule);
292 /* Stops the schedule even if it is not supposed to be stopped yet.
293 After calling this, one should call silc_schedule_uninit (after the
294 silc_schedule has returned). */
296 void silc_schedule_stop(SilcSchedule schedule)
298 SILC_LOG_DEBUG(("Stopping scheduler"));
299 SILC_SCHEDULE_LOCK(schedule);
300 schedule->valid = FALSE;
301 SILC_SCHEDULE_UNLOCK(schedule);
304 /* Executes nontimeout tasks. It then checks whether any of ther fd tasks
305 was signaled by the silc_select. If some task was not signaled then
306 all generic tasks are executed for that task. The generic tasks are
307 never executed for task that has explicit fd task set. */
308 /* This holds the schedule->lock and the queue locks. */
310 static void silc_schedule_dispatch_nontimeout(SilcSchedule schedule)
313 int i, last_fd = schedule->last_fd;
316 for (i = 0; i <= last_fd; i++) {
317 if (schedule->fd_list[i].events == 0)
320 fd = schedule->fd_list[i].fd;
322 /* First check whether this fd has task in the fd queue */
323 silc_mutex_lock(schedule->fd_queue->lock);
324 task = silc_task_find(schedule->fd_queue, fd);
326 /* If the task was found then execute its callbacks. If not then
327 execute all generic tasks for that fd. */
329 /* Validity of the task is checked always before and after
330 execution beacuse the task might have been unregistered
331 in the callback function, ie. it is not valid anymore. */
333 /* Is the task ready for reading */
334 if (task->valid && schedule->fd_list[i].revents & SILC_TASK_READ) {
335 silc_mutex_unlock(schedule->fd_queue->lock);
336 SILC_SCHEDULE_UNLOCK(schedule);
337 task->callback(schedule, SILC_TASK_READ, task->fd, task->context);
338 SILC_SCHEDULE_LOCK(schedule);
339 silc_mutex_lock(schedule->fd_queue->lock);
342 /* Is the task ready for writing */
343 if (task->valid && schedule->fd_list[i].revents & SILC_TASK_WRITE) {
344 silc_mutex_unlock(schedule->fd_queue->lock);
345 SILC_SCHEDULE_UNLOCK(schedule);
346 task->callback(schedule, SILC_TASK_WRITE, task->fd, task->context);
347 SILC_SCHEDULE_LOCK(schedule);
348 silc_mutex_lock(schedule->fd_queue->lock);
352 silc_schedule_task_remove(schedule->fd_queue, task);
354 silc_mutex_unlock(schedule->fd_queue->lock);
356 /* Run generic tasks for this fd. */
358 silc_mutex_unlock(schedule->fd_queue->lock);
360 silc_mutex_lock(schedule->generic_queue->lock);
361 if (!schedule->generic_queue->task) {
362 silc_mutex_unlock(schedule->generic_queue->lock);
366 task = schedule->generic_queue->task;
368 /* Validity of the task is checked always before and after
369 execution beacuse the task might have been unregistered
370 in the callback function, ie. it is not valid anymore. */
372 /* Is the task ready for reading */
373 if (task->valid && schedule->fd_list[i].revents & SILC_TASK_READ) {
374 silc_mutex_unlock(schedule->generic_queue->lock);
375 SILC_SCHEDULE_UNLOCK(schedule);
376 task->callback(schedule, SILC_TASK_READ, fd, task->context);
377 SILC_SCHEDULE_LOCK(schedule);
378 silc_mutex_lock(schedule->generic_queue->lock);
381 /* Is the task ready for writing */
382 if (task->valid && schedule->fd_list[i].revents & SILC_TASK_WRITE) {
383 silc_mutex_unlock(schedule->generic_queue->lock);
384 SILC_SCHEDULE_UNLOCK(schedule);
385 task->callback(schedule, SILC_TASK_WRITE, fd, task->context);
386 SILC_SCHEDULE_LOCK(schedule);
387 silc_mutex_lock(schedule->generic_queue->lock);
391 /* Invalid (unregistered) tasks are removed from the
393 if (schedule->generic_queue->task == task->next) {
394 silc_schedule_task_remove(schedule->generic_queue, task);
395 silc_mutex_unlock(schedule->generic_queue->lock);
400 silc_schedule_task_remove(schedule->generic_queue, task);
404 /* Break if there isn't more tasks in the queue */
405 if (schedule->generic_queue->task == task->next)
411 silc_mutex_unlock(schedule->generic_queue->lock);
416 /* Executes all tasks whose timeout has expired. The task is removed from
417 the task queue after the callback function has returned. Also, invalid
418 tasks are removed here. We don't have to care about priorities because
419 tasks are already sorted in their priority order at the registration
421 /* This holds the schedule->lock and the schedule->timeout_queue->lock */
423 static void silc_schedule_dispatch_timeout(SilcSchedule schedule)
425 SilcTaskQueue queue = schedule->timeout_queue;
427 struct timeval curtime;
429 SILC_LOG_DEBUG(("Running timeout tasks"));
431 silc_gettimeofday(&curtime);
433 queue = schedule->timeout_queue;
434 if (queue && queue->task) {
437 /* Walk thorugh all tasks in the particular task queue and run all
438 the expired tasks. */
440 /* Execute the task if the timeout has expired */
441 if (silc_schedule_task_timeout_compare(&task->timeout, &curtime)) {
443 silc_mutex_unlock(queue->lock);
444 SILC_SCHEDULE_UNLOCK(schedule);
445 task->callback(schedule, SILC_TASK_EXPIRE, task->fd, task->context);
446 SILC_SCHEDULE_LOCK(schedule);
447 silc_mutex_lock(queue->lock);
450 /* Break if there isn't more tasks in the queue */
451 if (queue->task == task->next) {
452 silc_schedule_task_remove(queue, task);
458 /* Remove the task from queue */
459 silc_schedule_task_remove(queue, task->prev);
461 /* The timeout hasn't expired, check for next one */
463 /* Break if there isn't more tasks in the queue */
464 if (queue->task == task->next)
473 /* Calculates next timeout for select(). This is the timeout value
474 when at earliest some of the timeout tasks expire. If this is in the
475 past, they will be run now. */
476 /* This holds the schedule->lock and the schedule->timeout_queue->lock */
478 static void silc_schedule_select_timeout(SilcSchedule schedule)
480 SilcTaskQueue queue = schedule->timeout_queue;
482 struct timeval curtime;
484 /* Get the current time */
485 silc_gettimeofday(&curtime);
486 schedule->timeout = NULL;
488 /* First task in the task queue has always the smallest timeout. */
491 if (task && task->valid == TRUE) {
492 /* If the timeout is in past, we will run the task and all other
493 timeout tasks from the past. */
494 if (silc_schedule_task_timeout_compare(&task->timeout, &curtime)) {
495 silc_schedule_dispatch_timeout(schedule);
497 /* The task(s) has expired and doesn't exist on the task queue
498 anymore. We continue with new timeout. */
499 queue = schedule->timeout_queue;
501 if (task == NULL || task->valid == FALSE)
505 /* Calculate the next timeout for select() */
506 queue->timeout.tv_sec = task->timeout.tv_sec - curtime.tv_sec;
507 queue->timeout.tv_usec = task->timeout.tv_usec - curtime.tv_usec;
508 if (queue->timeout.tv_sec < 0)
509 queue->timeout.tv_sec = 0;
511 /* We wouldn't want to go under zero, check for it. */
512 if (queue->timeout.tv_usec < 0) {
513 queue->timeout.tv_sec -= 1;
514 if (queue->timeout.tv_sec < 0)
515 queue->timeout.tv_sec = 0;
516 queue->timeout.tv_usec += 1000000L;
519 /* We've got the timeout value */
522 /* Task is not valid, remove it and try next one. */
523 silc_schedule_task_remove(queue, task);
525 if (queue->task == NULL)
530 /* Save the timeout */
532 schedule->timeout = &queue->timeout;
533 SILC_LOG_DEBUG(("timeout: sec=%d, usec=%d", schedule->timeout->tv_sec,
534 schedule->timeout->tv_usec));
538 /* Runs the scheduler once and then returns. */
540 bool silc_schedule_one(SilcSchedule schedule, int timeout_usecs)
542 struct timeval timeout;
545 SILC_LOG_DEBUG(("In scheduler loop"));
547 if (!schedule->is_locked)
548 SILC_SCHEDULE_LOCK(schedule);
550 /* If the task queues aren't initialized or we aren't valid anymore
552 if ((!schedule->fd_queue && !schedule->timeout_queue
553 && !schedule->generic_queue) || schedule->valid == FALSE) {
554 SILC_LOG_DEBUG(("Scheduler not valid anymore, exiting"));
555 if (!schedule->is_locked)
556 SILC_SCHEDULE_UNLOCK(schedule);
560 /* Calculate next timeout for silc_select(). This is the timeout value
561 when at earliest some of the timeout tasks expire. */
562 silc_mutex_lock(schedule->timeout_queue->lock);
563 silc_schedule_select_timeout(schedule);
564 silc_mutex_unlock(schedule->timeout_queue->lock);
566 if (timeout_usecs >= 0) {
568 timeout.tv_usec = timeout_usecs;
569 schedule->timeout = &timeout;
572 SILC_SCHEDULE_UNLOCK(schedule);
574 /* This is the main select(). The program blocks here until some
575 of the selected file descriptors change status or the selected
577 SILC_LOG_DEBUG(("Select"));
578 ret = silc_select(schedule->fd_list, schedule->last_fd + 1,
581 SILC_SCHEDULE_LOCK(schedule);
588 SILC_LOG_ERROR(("Error in select(): %s", strerror(errno)));
592 silc_mutex_lock(schedule->timeout_queue->lock);
593 silc_schedule_dispatch_timeout(schedule);
594 silc_mutex_unlock(schedule->timeout_queue->lock);
597 /* There is some data available now */
598 SILC_LOG_DEBUG(("Running non-timeout tasks"));
599 silc_schedule_dispatch_nontimeout(schedule);
603 if (!schedule->is_locked)
604 SILC_SCHEDULE_UNLOCK(schedule);
609 /* The SILC scheduler. This is actually the main routine in SILC programs.
610 When this returns the program is to be ended. Before this function can
611 be called, one must call silc_schedule_init function. */
613 void silc_schedule(SilcSchedule schedule)
615 SILC_LOG_DEBUG(("Running scheduler"));
617 if (schedule->valid == FALSE) {
618 SILC_LOG_ERROR(("Scheduler is not valid, stopping"));
622 SILC_SCHEDULE_LOCK(schedule);
623 schedule->is_locked = TRUE;
625 /* Start the scheduler loop */
626 while (silc_schedule_one(schedule, -1))
629 SILC_SCHEDULE_UNLOCK(schedule);
632 /* Wakes up the scheduler. This is used only in multi-threaded
633 environments where threads may add new tasks or remove old tasks
634 from task queues. This is called to wake up the scheduler in the
635 main thread so that it detects the changes in the task queues.
636 If threads support is not compiled in this function has no effect.
637 Implementation of this function is platform specific. */
639 void silc_schedule_wakeup(SilcSchedule schedule)
642 SILC_LOG_DEBUG(("Wakeup scheduler"));
643 SILC_SCHEDULE_LOCK(schedule);
644 silc_schedule_internal_wakeup(schedule->internal);
645 SILC_SCHEDULE_UNLOCK(schedule);
649 /* Add new task to the scheduler */
651 SilcTask silc_schedule_task_add(SilcSchedule schedule, SilcUInt32 fd,
652 SilcTaskCallback callback, void *context,
653 long seconds, long useconds,
655 SilcTaskPriority priority)
661 SILC_LOG_DEBUG(("Registering new task, fd=%d type=%d priority=%d", fd,
664 queue = SILC_SCHEDULE_GET_QUEUE(type);
666 /* If the task is generic task, we check whether this task has already
667 been registered. Generic tasks are registered only once and after that
668 the same task applies to all file descriptors to be registered. */
669 if (type == SILC_TASK_GENERIC) {
670 silc_mutex_lock(queue->lock);
673 SilcTask task = queue->task;
675 if ((task->callback == callback) && (task->context == context)) {
676 SILC_LOG_DEBUG(("Found matching generic task, using the match"));
678 silc_mutex_unlock(queue->lock);
680 /* Add the fd to be listened, the task found now applies to this
682 silc_schedule_set_listen_fd(schedule, fd, SILC_TASK_READ);
686 if (queue->task == task->next)
693 silc_mutex_unlock(queue->lock);
696 newtask = silc_calloc(1, sizeof(*newtask));
698 newtask->context = context;
699 newtask->callback = callback;
700 newtask->valid = TRUE;
701 newtask->priority = priority;
702 newtask->type = type;
703 newtask->next = newtask;
704 newtask->prev = newtask;
706 /* Create timeout if marked to be timeout task */
707 if (((seconds + useconds) > 0) && (type == SILC_TASK_TIMEOUT)) {
708 silc_gettimeofday(&newtask->timeout);
709 newtask->timeout.tv_sec += seconds + (useconds / 1000000L);
710 newtask->timeout.tv_usec += (useconds % 1000000L);
711 if (newtask->timeout.tv_usec > 999999L) {
712 newtask->timeout.tv_sec += 1;
713 newtask->timeout.tv_usec -= 1000000L;
718 /* If the task is non-timeout task we have to tell the scheduler that we
719 would like to have these tasks scheduled at some odd distant future. */
720 if (type != SILC_TASK_TIMEOUT)
721 silc_schedule_set_listen_fd(schedule, fd, SILC_TASK_READ);
723 silc_mutex_lock(queue->lock);
725 /* Is this first task of the queue? */
726 if (queue->task == NULL) {
727 queue->task = newtask;
728 silc_mutex_unlock(queue->lock);
733 newtask = silc_task_add_timeout(queue, newtask, priority);
735 newtask = silc_task_add(queue, newtask, priority);
737 silc_mutex_unlock(queue->lock);
742 /* Removes a task from the scheduler */
744 void silc_schedule_task_del(SilcSchedule schedule, SilcTask task)
746 SilcTaskQueue queue = SILC_SCHEDULE_GET_QUEUE(task->type);
748 /* Unregister all tasks */
749 if (task == SILC_ALL_TASKS) {
751 SILC_LOG_DEBUG(("Unregistering all tasks at once"));
753 silc_mutex_lock(queue->lock);
756 silc_mutex_unlock(queue->lock);
765 if (queue->task == next->next)
770 silc_mutex_unlock(queue->lock);
774 SILC_LOG_DEBUG(("Unregistering task"));
776 silc_mutex_lock(queue->lock);
778 /* Unregister the specific task */
782 silc_mutex_unlock(queue->lock);
785 /* Remove task by fd */
787 void silc_schedule_task_del_by_fd(SilcSchedule schedule, SilcUInt32 fd)
789 SILC_LOG_DEBUG(("Unregister task by fd %d", fd));
791 silc_task_del_by_fd(schedule->timeout_queue, fd);
792 silc_task_del_by_fd(schedule->fd_queue, fd);
795 /* Remove task by task callback. */
797 void silc_schedule_task_del_by_callback(SilcSchedule schedule,
798 SilcTaskCallback callback)
800 SILC_LOG_DEBUG(("Unregister task by callback"));
802 silc_task_del_by_callback(schedule->timeout_queue, callback);
803 silc_task_del_by_callback(schedule->fd_queue, callback);
804 silc_task_del_by_callback(schedule->generic_queue, callback);
807 /* Remove task by context. */
809 void silc_schedule_task_del_by_context(SilcSchedule schedule, void *context)
811 SILC_LOG_DEBUG(("Unregister task by context"));
813 silc_task_del_by_context(schedule->timeout_queue, context);
814 silc_task_del_by_context(schedule->fd_queue, context);
815 silc_task_del_by_context(schedule->generic_queue, context);
818 /* Sets a file descriptor to be listened by select() in scheduler. One can
819 call this directly if wanted. This can be called multiple times for
820 one file descriptor to set different iomasks. */
822 void silc_schedule_set_listen_fd(SilcSchedule schedule,
823 SilcUInt32 fd, SilcTaskEvent iomask)
828 SILC_SCHEDULE_LOCK(schedule);
830 for (i = 0; i < schedule->max_fd; i++)
831 if (schedule->fd_list[i].fd == fd) {
832 schedule->fd_list[i].fd = fd;
833 schedule->fd_list[i].events = iomask;
834 if (i > schedule->last_fd)
835 schedule->last_fd = i;
841 for (i = 0; i < schedule->max_fd; i++)
842 if (schedule->fd_list[i].events == 0) {
843 schedule->fd_list[i].fd = fd;
844 schedule->fd_list[i].events = iomask;
845 if (i > schedule->last_fd)
846 schedule->last_fd = i;
850 SILC_SCHEDULE_UNLOCK(schedule);
853 /* Removes a file descriptor from listen list. */
855 void silc_schedule_unset_listen_fd(SilcSchedule schedule, SilcUInt32 fd)
859 SILC_SCHEDULE_LOCK(schedule);
861 SILC_LOG_DEBUG(("Unset listen fd %d", fd));
863 for (i = 0; i < schedule->max_fd; i++)
864 if (schedule->fd_list[i].fd == fd) {
865 schedule->fd_list[i].fd = 0;
866 schedule->fd_list[i].events = 0;
867 if (schedule->last_fd == i)
868 schedule->last_fd = schedule->max_fd - 1;
872 SILC_SCHEDULE_UNLOCK(schedule);
875 /* Register a new signal */
877 void silc_schedule_signal_register(SilcSchedule schedule, SilcUInt32 signal)
879 silc_schedule_internal_signal_register(schedule->internal, signal);
882 /* Unregister a new signal */
884 void silc_schedule_signal_unregister(SilcSchedule schedule, SilcUInt32 signal)
886 silc_schedule_internal_signal_unregister(schedule->internal, signal);
889 /* Allocates a newtask task queue into the scheduler */
891 static void silc_task_queue_alloc(SilcTaskQueue *queue)
893 *queue = silc_calloc(1, sizeof(**queue));
894 silc_mutex_alloc(&(*queue)->lock);
897 /* Free's a task queue. */
899 static void silc_task_queue_free(SilcTaskQueue queue)
901 silc_mutex_free(queue->lock);
905 /* Return task by its fd. */
907 static SilcTask silc_task_find(SilcTaskQueue queue, SilcUInt32 fd)
919 if (queue->task == next->next)
927 /* Adds a non-timeout task into the task queue. This function is used
928 by silc_task_register function. Returns a pointer to the registered
931 static SilcTask silc_task_add(SilcTaskQueue queue, SilcTask newtask,
932 SilcTaskPriority priority)
934 SilcTask task, next, prev;
936 /* Take the first task in the queue */
940 case SILC_TASK_PRI_LOW:
941 /* Lowest priority. The task is added at the end of the list. */
943 newtask->prev = prev;
944 newtask->next = task;
945 prev->next = newtask;
946 task->prev = newtask;
948 case SILC_TASK_PRI_NORMAL:
949 /* Normal priority. The task is added before lower priority tasks
950 but after tasks with higher priority. */
952 while(prev != task) {
953 if (prev->priority > SILC_TASK_PRI_LOW)
958 /* There are only lower priorities in the list, we will
959 sit before them and become the first task in the queue. */
961 newtask->prev = prev;
962 newtask->next = task;
963 task->prev = newtask;
964 prev->next = newtask;
966 /* We are now the first task in queue */
967 queue->task = newtask;
969 /* Found a spot from the list, add the task to the list. */
971 newtask->prev = prev;
972 newtask->next = next;
973 prev->next = newtask;
974 next->prev = newtask;
985 /* Return the timeout task with smallest timeout. */
987 static SilcTask silc_task_get_first(SilcTaskQueue queue, SilcTask first)
1001 if (silc_schedule_task_timeout_compare(&prev->timeout, &task->timeout))
1010 /* Adds a timeout task into the task queue. This function is used by
1011 silc_task_register function. Returns a pointer to the registered
1012 task. Timeout tasks are sorted by their timeout value in ascending
1013 order. The priority matters if there are more than one task with
1016 static SilcTask silc_task_add_timeout(SilcTaskQueue queue, SilcTask newtask,
1017 SilcTaskPriority priority)
1019 SilcTask task, prev, next;
1021 /* Take the first task in the queue */
1024 /* Take last task from the list */
1028 case SILC_TASK_PRI_LOW:
1029 /* Lowest priority. The task is added at the end of the list. */
1030 while(prev != task) {
1032 /* If we have longer timeout than with the task head of us
1033 we have found our spot. */
1034 if (silc_schedule_task_timeout_compare(&prev->timeout,
1038 /* If we are equal size of timeout we will be after it. */
1039 if (!silc_schedule_task_timeout_compare(&newtask->timeout,
1043 /* We have shorter timeout, compare to next one. */
1046 /* Found a spot from the list, add the task to the list. */
1048 newtask->prev = prev;
1049 newtask->next = next;
1050 prev->next = newtask;
1051 next->prev = newtask;
1054 /* Check if we are going to be the first task in the queue */
1055 if (silc_schedule_task_timeout_compare(&prev->timeout,
1058 if (!silc_schedule_task_timeout_compare(&newtask->timeout,
1062 /* We are now the first task in queue */
1063 queue->task = newtask;
1066 case SILC_TASK_PRI_NORMAL:
1067 /* Normal priority. The task is added before lower priority tasks
1068 but after tasks with higher priority. */
1069 while(prev != task) {
1071 /* If we have longer timeout than with the task head of us
1072 we have found our spot. */
1073 if (silc_schedule_task_timeout_compare(&prev->timeout,
1077 /* If we are equal size of timeout, priority kicks in place. */
1078 if (!silc_schedule_task_timeout_compare(&newtask->timeout,
1080 if (prev->priority >= SILC_TASK_PRI_NORMAL)
1083 /* We have shorter timeout or higher priority, compare to next one. */
1086 /* Found a spot from the list, add the task to the list. */
1088 newtask->prev = prev;
1089 newtask->next = next;
1090 prev->next = newtask;
1091 next->prev = newtask;
1094 /* Check if we are going to be the first task in the queue */
1095 if (silc_schedule_task_timeout_compare(&prev->timeout,
1098 if (!silc_schedule_task_timeout_compare(&newtask->timeout,
1100 if (prev->priority >= SILC_TASK_PRI_NORMAL)
1103 /* We are now the first task in queue */
1104 queue->task = newtask;
1115 /* Removes (unregisters) a task from particular task queue. This function
1116 is used internally by scheduler. This must be called holding the
1119 static int silc_schedule_task_remove(SilcTaskQueue queue, SilcTask task)
1121 SilcTask first, old, next;
1123 if (!queue || !task)
1130 first = queue->task;
1132 /* Unregister all tasks in queue */
1133 if (task == SILC_ALL_TASKS) {
1134 SILC_LOG_DEBUG(("Removing all tasks at once"));
1139 silc_free(next->prev);
1148 SILC_LOG_DEBUG(("Removing task"));
1150 /* Unregister the task */
1154 SilcTask prev, next;
1161 if (prev == old && next == old)
1163 if (queue->task == old)
1164 queue->task = silc_task_get_first(queue, next);
1177 /* Compare two time values. If the first argument is smaller than the
1178 second this function returns TRUE. */
1180 static int silc_schedule_task_timeout_compare(struct timeval *smaller,
1181 struct timeval *bigger)
1183 if ((smaller->tv_sec < bigger->tv_sec) ||
1184 ((smaller->tv_sec == bigger->tv_sec) &&
1185 (smaller->tv_usec < bigger->tv_usec)))
1191 static void silc_task_del_by_fd(SilcTaskQueue queue, SilcUInt32 fd)
1195 silc_mutex_lock(queue->lock);
1198 silc_mutex_unlock(queue->lock);
1206 next->valid = FALSE;
1207 if (queue->task == next->next)
1212 silc_mutex_unlock(queue->lock);
1215 static void silc_task_del_by_callback(SilcTaskQueue queue,
1216 SilcTaskCallback callback)
1220 silc_mutex_lock(queue->lock);
1223 silc_mutex_unlock(queue->lock);
1230 if (next->callback == callback)
1231 next->valid = FALSE;
1232 if (queue->task == next->next)
1237 silc_mutex_unlock(queue->lock);
1240 static void silc_task_del_by_context(SilcTaskQueue queue, void *context)
1244 silc_mutex_lock(queue->lock);
1247 silc_mutex_unlock(queue->lock);
1254 if (next->context == context)
1255 next->valid = FALSE;
1256 if (queue->task == next->next)
1261 silc_mutex_unlock(queue->lock);