5 Author: Pekka Riikonen <priikone@silcnet.org>
7 Copyright (C) 1998 - 2001 Pekka Riikonen
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
22 #include "silcincludes.h"
23 #include "silcschedule_i.h"
25 /* Forward declarations */
26 typedef struct SilcTaskQueueStruct *SilcTaskQueue;
28 /* System specific routines. Implemented under unix/, win32/ and such. */
30 /* System specific select(). Returns same values as normal select(). */
31 int silc_select(SilcScheduleFd fds, SilcUInt32 fds_count,
32 struct timeval *timeout);
34 /* Initializes the platform specific scheduler. This for example initializes
35 the wakeup mechanism of the scheduler. In multi-threaded environment
36 the scheduler needs to be wakenup when tasks are added or removed from
37 the task queues. Returns context to the platform specific scheduler. */
38 void *silc_schedule_internal_init(SilcSchedule schedule);
40 /* Uninitializes the platform specific scheduler context. */
41 void silc_schedule_internal_uninit(void *context);
43 /* Wakes up the scheduler. This is platform specific routine */
44 void silc_schedule_internal_wakeup(void *context);
47 void silc_schedule_internal_signal_register(void *context,
50 /* Unregister signal */
51 void silc_schedule_internal_signal_unregister(void *context,
54 /* Block registered signals in scheduler. */
55 void silc_schedule_internal_signals_block(void *context);
57 /* Unblock registered signals in schedule. */
58 void silc_schedule_internal_signals_unblock(void *context);
60 /* Internal task management routines. */
62 static void silc_task_queue_alloc(SilcTaskQueue *queue);
63 static void silc_task_queue_free(SilcTaskQueue queue);
64 static SilcTask silc_task_find(SilcTaskQueue queue, SilcUInt32 fd);
65 static SilcTask silc_task_add(SilcTaskQueue queue, SilcTask newtask,
66 SilcTaskPriority priority);
67 static SilcTask silc_task_get_first(SilcTaskQueue queue, SilcTask first);
68 static SilcTask silc_task_add_timeout(SilcTaskQueue queue, SilcTask newtask,
69 SilcTaskPriority priority);
70 static int silc_schedule_task_remove(SilcTaskQueue queue, SilcTask task);
71 static int silc_schedule_task_timeout_compare(struct timeval *smaller,
72 struct timeval *bigger);
73 static void silc_task_del_by_context(SilcTaskQueue queue, void *context);
74 static void silc_task_del_by_callback(SilcTaskQueue queue,
75 SilcTaskCallback callback);
76 static void silc_task_del_by_fd(SilcTaskQueue queue, SilcUInt32 fd);
78 /* Returns the task queue by task type */
79 #define SILC_SCHEDULE_GET_QUEUE(type) \
80 (type == SILC_TASK_FD ? schedule->fd_queue : \
81 type == SILC_TASK_TIMEOUT ? schedule->timeout_queue : \
82 schedule->generic_queue)
84 /* Locks. These also blocks signals that we care about and thus guarantee
85 that while we are in scheduler no signals can happen. This way we can
86 synchronise signals with SILC Scheduler. */
87 #define SILC_SCHEDULE_LOCK(schedule) \
89 silc_schedule_internal_signals_block(schedule->internal); \
90 silc_mutex_lock(schedule->lock); \
92 #define SILC_SCHEDULE_UNLOCK(schedule) \
94 silc_mutex_unlock(schedule->lock); \
95 silc_schedule_internal_signals_unblock(schedule->internal); \
98 /* SILC Task object. Represents one task in the scheduler. */
99 struct SilcTaskStruct {
101 struct timeval timeout;
102 SilcTaskCallback callback;
105 SilcTaskPriority priority;
108 /* Pointers forming doubly linked circular list */
109 struct SilcTaskStruct *next;
110 struct SilcTaskStruct *prev;
113 /* SILC Task Queue object. The queue holds all the tasks in the scheduler.
114 There are always three task queues in the scheduler. One for non-timeout
115 tasks (fd tasks performing tasks over specified file descriptor),
116 one for timeout tasks and one for generic tasks. */
117 struct SilcTaskQueueStruct {
118 SilcTask task; /* Pointer to all tasks */
119 struct timeval timeout; /* Current timeout */
120 SILC_MUTEX_DEFINE(lock); /* Queue's lock */
124 SILC Scheduler structure.
126 This is the actual schedule object in SILC. Both SILC client and server
127 uses this same scheduler. Actually, this scheduler could be used by any
128 program needing scheduling.
130 Following short description of the fields:
132 SilcTaskQueue fd_queue
134 Task queue hook for non-timeout tasks. Usually this means that these
135 tasks perform different kind of I/O on file descriptors. File
136 descriptors are usually network sockets but they actually can be
137 any file descriptors. This hook is initialized in silc_schedule_init
138 function. Timeout tasks should not be added to this queue because
139 they will never expire.
141 SilcTaskQueue timeout_queue
143 Task queue hook for timeout tasks. This hook is reserved specificly
144 for tasks with timeout. Non-timeout tasks should not be added to this
145 queue because they will never get scheduled. This hook is also
146 initialized in silc_schedule_init function.
148 SilcTaskQueue generic_queue
150 Task queue hook for generic tasks. This hook is reserved specificly
151 for generic tasks, tasks that apply to all file descriptors, except
152 to those that have specificly registered a non-timeout task. This hook
153 is also initialized in silc_schedule_init function.
155 SilcScheduleFd fd_list
157 List of file descriptors the scheduler is supposed to be listenning.
158 This is updated internally.
163 Size of the fd_list list. There can be `max_fd' many tasks in
164 the scheduler at once. The `last_fd' is the last valid entry
167 struct timeval *timeout;
169 Pointer to the schedules next timeout. Value of this timeout is
170 automatically updated in the silc_schedule function.
174 Marks validity of the scheduler. This is a boolean value. When this
175 is false the scheduler is terminated and the program will end. This
176 set to true when the scheduler is initialized with silc_schedule_init
182 File descriptor sets for select(). These are automatically managed
183 by the scheduler and should not be touched otherwise.
187 System specific scheduler context.
189 SILC_MUTEX_DEFINE(lock)
194 struct SilcScheduleStruct {
195 SilcTaskQueue fd_queue;
196 SilcTaskQueue timeout_queue;
197 SilcTaskQueue generic_queue;
198 SilcScheduleFd fd_list;
201 struct timeval *timeout;
204 SILC_MUTEX_DEFINE(lock);
208 /* Initializes the scheduler. This returns the scheduler context that
209 is given as arugment usually to all silc_schedule_* functions.
210 The `max_tasks' indicates the number of maximum tasks that the
211 scheduler can handle. */
213 SilcSchedule silc_schedule_init(int max_tasks)
215 SilcSchedule schedule;
217 SILC_LOG_DEBUG(("Initializing scheduler"));
219 schedule = silc_calloc(1, sizeof(*schedule));
221 /* Allocate three task queues, one for file descriptor based tasks,
222 one for timeout tasks and one for generic tasks. */
223 silc_task_queue_alloc(&schedule->fd_queue);
224 silc_task_queue_alloc(&schedule->timeout_queue);
225 silc_task_queue_alloc(&schedule->generic_queue);
230 /* Initialize the scheduler */
231 schedule->fd_list = silc_calloc(max_tasks, sizeof(*schedule->fd_list));
232 schedule->max_fd = max_tasks;
233 schedule->timeout = NULL;
234 schedule->valid = TRUE;
236 /* Allocate scheduler lock */
237 silc_mutex_alloc(&schedule->lock);
239 /* Initialize the platform specific scheduler. */
240 schedule->internal = silc_schedule_internal_init(schedule);
242 silc_schedule_signal_register(schedule, SIGALRM);
248 /* Uninitializes the schedule. This is called when the program is ready
249 to end. This removes all tasks and task queues. Returns FALSE if the
250 scheduler could not be uninitialized. This happens when the scheduler
251 is still valid and silc_schedule_stop has not been called. */
253 bool silc_schedule_uninit(SilcSchedule schedule)
255 SILC_LOG_DEBUG(("Uninitializing scheduler"));
257 if (schedule->valid == TRUE)
260 /* Unregister all tasks */
261 silc_schedule_task_remove(schedule->fd_queue, SILC_ALL_TASKS);
262 silc_schedule_task_remove(schedule->timeout_queue, SILC_ALL_TASKS);
263 silc_schedule_task_remove(schedule->generic_queue, SILC_ALL_TASKS);
265 /* Unregister all task queues */
266 silc_task_queue_free(schedule->fd_queue);
267 silc_task_queue_free(schedule->timeout_queue);
268 silc_task_queue_free(schedule->generic_queue);
270 silc_free(schedule->fd_list);
272 /* Uninit the platform specific scheduler. */
273 silc_schedule_internal_uninit(schedule->internal);
275 silc_mutex_free(schedule->lock);
280 /* Enlarge the capabilities of the scheduler to handle tasks to `max_tasks'. */
282 bool silc_schedule_reinit(SilcSchedule schedule, int max_tasks)
284 SILC_SCHEDULE_LOCK(schedule);
285 if (schedule->max_fd <= max_tasks)
287 schedule->fd_list = silc_realloc(schedule->fd_list,
288 (sizeof(*schedule->fd_list) * max_tasks));
289 schedule->max_fd = max_tasks;
290 SILC_SCHEDULE_UNLOCK(schedule);
294 /* Stops the schedule even if it is not supposed to be stopped yet.
295 After calling this, one should call silc_schedule_uninit (after the
296 silc_schedule has returned). */
298 void silc_schedule_stop(SilcSchedule schedule)
300 SILC_LOG_DEBUG(("Stopping scheduler"));
301 SILC_SCHEDULE_LOCK(schedule);
302 schedule->valid = FALSE;
303 SILC_SCHEDULE_UNLOCK(schedule);
306 /* Executes nontimeout tasks. It then checks whether any of ther fd tasks
307 was signaled by the silc_select. If some task was not signaled then
308 all generic tasks are executed for that task. The generic tasks are
309 never executed for task that has explicit fd task set. */
310 /* This holds the schedule->lock and the queue locks. */
312 static void silc_schedule_dispatch_nontimeout(SilcSchedule schedule)
315 int i, last_fd = schedule->last_fd;
318 for (i = 0; i <= last_fd; i++) {
319 if (schedule->fd_list[i].events == 0)
322 fd = schedule->fd_list[i].fd;
324 /* First check whether this fd has task in the fd queue */
325 silc_mutex_lock(schedule->fd_queue->lock);
326 task = silc_task_find(schedule->fd_queue, fd);
328 /* If the task was found then execute its callbacks. If not then
329 execute all generic tasks for that fd. */
331 /* Validity of the task is checked always before and after
332 execution beacuse the task might have been unregistered
333 in the callback function, ie. it is not valid anymore. */
335 /* Is the task ready for reading */
336 if (task->valid && schedule->fd_list[i].revents & SILC_TASK_READ) {
337 silc_mutex_unlock(schedule->fd_queue->lock);
338 SILC_SCHEDULE_UNLOCK(schedule);
339 task->callback(schedule, SILC_TASK_READ, task->fd, task->context);
340 SILC_SCHEDULE_LOCK(schedule);
341 silc_mutex_lock(schedule->fd_queue->lock);
344 /* Is the task ready for writing */
345 if (task->valid && schedule->fd_list[i].revents & SILC_TASK_WRITE) {
346 silc_mutex_unlock(schedule->fd_queue->lock);
347 SILC_SCHEDULE_UNLOCK(schedule);
348 task->callback(schedule, SILC_TASK_WRITE, task->fd, task->context);
349 SILC_SCHEDULE_LOCK(schedule);
350 silc_mutex_lock(schedule->fd_queue->lock);
354 silc_schedule_task_remove(schedule->fd_queue, task);
356 silc_mutex_unlock(schedule->fd_queue->lock);
358 /* Run generic tasks for this fd. */
360 silc_mutex_unlock(schedule->fd_queue->lock);
362 silc_mutex_lock(schedule->generic_queue->lock);
363 if (!schedule->generic_queue->task) {
364 silc_mutex_unlock(schedule->generic_queue->lock);
368 task = schedule->generic_queue->task;
370 /* Validity of the task is checked always before and after
371 execution beacuse the task might have been unregistered
372 in the callback function, ie. it is not valid anymore. */
374 /* Is the task ready for reading */
375 if (task->valid && schedule->fd_list[i].revents & SILC_TASK_READ) {
376 silc_mutex_unlock(schedule->generic_queue->lock);
377 SILC_SCHEDULE_UNLOCK(schedule);
378 task->callback(schedule, SILC_TASK_READ, fd, task->context);
379 SILC_SCHEDULE_LOCK(schedule);
380 silc_mutex_lock(schedule->generic_queue->lock);
383 /* Is the task ready for writing */
384 if (task->valid && schedule->fd_list[i].revents & SILC_TASK_WRITE) {
385 silc_mutex_unlock(schedule->generic_queue->lock);
386 SILC_SCHEDULE_UNLOCK(schedule);
387 task->callback(schedule, SILC_TASK_WRITE, fd, task->context);
388 SILC_SCHEDULE_LOCK(schedule);
389 silc_mutex_lock(schedule->generic_queue->lock);
393 /* Invalid (unregistered) tasks are removed from the
395 if (schedule->generic_queue->task == task->next) {
396 silc_schedule_task_remove(schedule->generic_queue, task);
397 silc_mutex_unlock(schedule->generic_queue->lock);
402 silc_schedule_task_remove(schedule->generic_queue, task);
406 /* Break if there isn't more tasks in the queue */
407 if (schedule->generic_queue->task == task->next)
413 silc_mutex_unlock(schedule->generic_queue->lock);
418 /* Executes all tasks whose timeout has expired. The task is removed from
419 the task queue after the callback function has returned. Also, invalid
420 tasks are removed here. We don't have to care about priorities because
421 tasks are already sorted in their priority order at the registration
423 /* This holds the schedule->lock and the schedule->timeout_queue->lock */
425 static void silc_schedule_dispatch_timeout(SilcSchedule schedule)
427 SilcTaskQueue queue = schedule->timeout_queue;
429 struct timeval curtime;
431 SILC_LOG_DEBUG(("Running timeout tasks"));
433 silc_gettimeofday(&curtime);
435 queue = schedule->timeout_queue;
436 if (queue && queue->task) {
439 /* Walk thorugh all tasks in the particular task queue and run all
440 the expired tasks. */
442 /* Execute the task if the timeout has expired */
443 if (silc_schedule_task_timeout_compare(&task->timeout, &curtime)) {
445 silc_mutex_unlock(queue->lock);
446 SILC_SCHEDULE_UNLOCK(schedule);
447 task->callback(schedule, SILC_TASK_EXPIRE, task->fd, task->context);
448 SILC_SCHEDULE_LOCK(schedule);
449 silc_mutex_lock(queue->lock);
452 /* Break if there isn't more tasks in the queue */
453 if (queue->task == task->next) {
454 silc_schedule_task_remove(queue, task);
460 /* Remove the task from queue */
461 silc_schedule_task_remove(queue, task->prev);
463 /* The timeout hasn't expired, check for next one */
465 /* Break if there isn't more tasks in the queue */
466 if (queue->task == task->next)
475 /* Calculates next timeout for select(). This is the timeout value
476 when at earliest some of the timeout tasks expire. If this is in the
477 past, they will be run now. */
478 /* This holds the schedule->lock and the schedule->timeout_queue->lock */
480 static void silc_schedule_select_timeout(SilcSchedule schedule)
482 SilcTaskQueue queue = schedule->timeout_queue;
484 struct timeval curtime;
486 /* Get the current time */
487 silc_gettimeofday(&curtime);
488 schedule->timeout = NULL;
490 /* First task in the task queue has always the smallest timeout. */
493 if (task && task->valid == TRUE) {
494 /* If the timeout is in past, we will run the task and all other
495 timeout tasks from the past. */
496 if (silc_schedule_task_timeout_compare(&task->timeout, &curtime)) {
497 silc_schedule_dispatch_timeout(schedule);
499 /* The task(s) has expired and doesn't exist on the task queue
500 anymore. We continue with new timeout. */
501 queue = schedule->timeout_queue;
503 if (task == NULL || task->valid == FALSE)
507 /* Calculate the next timeout for select() */
508 queue->timeout.tv_sec = task->timeout.tv_sec - curtime.tv_sec;
509 queue->timeout.tv_usec = task->timeout.tv_usec - curtime.tv_usec;
510 if (queue->timeout.tv_sec < 0)
511 queue->timeout.tv_sec = 0;
513 /* We wouldn't want to go under zero, check for it. */
514 if (queue->timeout.tv_usec < 0) {
515 queue->timeout.tv_sec -= 1;
516 if (queue->timeout.tv_sec < 0)
517 queue->timeout.tv_sec = 0;
518 queue->timeout.tv_usec += 1000000L;
521 /* We've got the timeout value */
524 /* Task is not valid, remove it and try next one. */
525 silc_schedule_task_remove(queue, task);
527 if (queue->task == NULL)
532 /* Save the timeout */
534 schedule->timeout = &queue->timeout;
535 SILC_LOG_DEBUG(("timeout: sec=%d, usec=%d", schedule->timeout->tv_sec,
536 schedule->timeout->tv_usec));
540 /* Runs the scheduler once and then returns. */
542 bool silc_schedule_one(SilcSchedule schedule, int timeout_usecs)
544 struct timeval timeout;
547 SILC_LOG_DEBUG(("In scheduler loop"));
549 if (!schedule->is_locked)
550 SILC_SCHEDULE_LOCK(schedule);
552 /* If the task queues aren't initialized or we aren't valid anymore
554 if ((!schedule->fd_queue && !schedule->timeout_queue
555 && !schedule->generic_queue) || schedule->valid == FALSE) {
556 SILC_LOG_DEBUG(("Scheduler not valid anymore, exiting"));
557 if (!schedule->is_locked)
558 SILC_SCHEDULE_UNLOCK(schedule);
562 /* Calculate next timeout for silc_select(). This is the timeout value
563 when at earliest some of the timeout tasks expire. */
564 silc_mutex_lock(schedule->timeout_queue->lock);
565 silc_schedule_select_timeout(schedule);
566 silc_mutex_unlock(schedule->timeout_queue->lock);
568 if (timeout_usecs >= 0) {
570 timeout.tv_usec = timeout_usecs;
571 schedule->timeout = &timeout;
574 SILC_SCHEDULE_UNLOCK(schedule);
576 /* This is the main select(). The program blocks here until some
577 of the selected file descriptors change status or the selected
579 SILC_LOG_DEBUG(("Select"));
580 ret = silc_select(schedule->fd_list, schedule->last_fd + 1,
583 SILC_SCHEDULE_LOCK(schedule);
590 SILC_LOG_ERROR(("Error in select(): %s", strerror(errno)));
594 silc_mutex_lock(schedule->timeout_queue->lock);
595 silc_schedule_dispatch_timeout(schedule);
596 silc_mutex_unlock(schedule->timeout_queue->lock);
599 /* There is some data available now */
600 SILC_LOG_DEBUG(("Running non-timeout tasks"));
601 silc_schedule_dispatch_nontimeout(schedule);
605 if (!schedule->is_locked)
606 SILC_SCHEDULE_UNLOCK(schedule);
611 /* The SILC scheduler. This is actually the main routine in SILC programs.
612 When this returns the program is to be ended. Before this function can
613 be called, one must call silc_schedule_init function. */
615 void silc_schedule(SilcSchedule schedule)
617 SILC_LOG_DEBUG(("Running scheduler"));
619 if (schedule->valid == FALSE) {
620 SILC_LOG_ERROR(("Scheduler is not valid, stopping"));
624 SILC_SCHEDULE_LOCK(schedule);
625 schedule->is_locked = TRUE;
627 /* Start the scheduler loop */
628 while (silc_schedule_one(schedule, -1))
631 SILC_SCHEDULE_UNLOCK(schedule);
634 /* Wakes up the scheduler. This is used only in multi-threaded
635 environments where threads may add new tasks or remove old tasks
636 from task queues. This is called to wake up the scheduler in the
637 main thread so that it detects the changes in the task queues.
638 If threads support is not compiled in this function has no effect.
639 Implementation of this function is platform specific. */
641 void silc_schedule_wakeup(SilcSchedule schedule)
644 SILC_LOG_DEBUG(("Wakeup scheduler"));
645 SILC_SCHEDULE_LOCK(schedule);
646 silc_schedule_internal_wakeup(schedule->internal);
647 SILC_SCHEDULE_UNLOCK(schedule);
651 /* Add new task to the scheduler */
653 SilcTask silc_schedule_task_add(SilcSchedule schedule, SilcUInt32 fd,
654 SilcTaskCallback callback, void *context,
655 long seconds, long useconds,
657 SilcTaskPriority priority)
663 SILC_LOG_DEBUG(("Registering new task, fd=%d type=%d priority=%d", fd,
666 queue = SILC_SCHEDULE_GET_QUEUE(type);
668 /* If the task is generic task, we check whether this task has already
669 been registered. Generic tasks are registered only once and after that
670 the same task applies to all file descriptors to be registered. */
671 if (type == SILC_TASK_GENERIC) {
672 silc_mutex_lock(queue->lock);
675 SilcTask task = queue->task;
677 if ((task->callback == callback) && (task->context == context)) {
678 SILC_LOG_DEBUG(("Found matching generic task, using the match"));
680 silc_mutex_unlock(queue->lock);
682 /* Add the fd to be listened, the task found now applies to this
684 silc_schedule_set_listen_fd(schedule, fd, SILC_TASK_READ);
688 if (queue->task == task->next)
695 silc_mutex_unlock(queue->lock);
698 newtask = silc_calloc(1, sizeof(*newtask));
700 newtask->context = context;
701 newtask->callback = callback;
702 newtask->valid = TRUE;
703 newtask->priority = priority;
704 newtask->type = type;
705 newtask->next = newtask;
706 newtask->prev = newtask;
708 /* Create timeout if marked to be timeout task */
709 if (((seconds + useconds) > 0) && (type == SILC_TASK_TIMEOUT)) {
710 silc_gettimeofday(&newtask->timeout);
711 newtask->timeout.tv_sec += seconds + (useconds / 1000000L);
712 newtask->timeout.tv_usec += (useconds % 1000000L);
713 if (newtask->timeout.tv_usec > 999999L) {
714 newtask->timeout.tv_sec += 1;
715 newtask->timeout.tv_usec -= 1000000L;
720 /* If the task is non-timeout task we have to tell the scheduler that we
721 would like to have these tasks scheduled at some odd distant future. */
722 if (type != SILC_TASK_TIMEOUT)
723 silc_schedule_set_listen_fd(schedule, fd, SILC_TASK_READ);
725 silc_mutex_lock(queue->lock);
727 /* Is this first task of the queue? */
728 if (queue->task == NULL) {
729 queue->task = newtask;
730 silc_mutex_unlock(queue->lock);
735 newtask = silc_task_add_timeout(queue, newtask, priority);
737 newtask = silc_task_add(queue, newtask, priority);
739 silc_mutex_unlock(queue->lock);
744 /* Removes a task from the scheduler */
746 void silc_schedule_task_del(SilcSchedule schedule, SilcTask task)
748 SilcTaskQueue queue = SILC_SCHEDULE_GET_QUEUE(task->type);
750 /* Unregister all tasks */
751 if (task == SILC_ALL_TASKS) {
753 SILC_LOG_DEBUG(("Unregistering all tasks at once"));
755 silc_mutex_lock(queue->lock);
758 silc_mutex_unlock(queue->lock);
767 if (queue->task == next->next)
772 silc_mutex_unlock(queue->lock);
776 SILC_LOG_DEBUG(("Unregistering task"));
778 silc_mutex_lock(queue->lock);
780 /* Unregister the specific task */
784 silc_mutex_unlock(queue->lock);
787 /* Remove task by fd */
789 void silc_schedule_task_del_by_fd(SilcSchedule schedule, SilcUInt32 fd)
791 SILC_LOG_DEBUG(("Unregister task by fd %d", fd));
793 silc_task_del_by_fd(schedule->timeout_queue, fd);
794 silc_task_del_by_fd(schedule->fd_queue, fd);
797 /* Remove task by task callback. */
799 void silc_schedule_task_del_by_callback(SilcSchedule schedule,
800 SilcTaskCallback callback)
802 SILC_LOG_DEBUG(("Unregister task by callback"));
804 silc_task_del_by_callback(schedule->timeout_queue, callback);
805 silc_task_del_by_callback(schedule->fd_queue, callback);
806 silc_task_del_by_callback(schedule->generic_queue, callback);
809 /* Remove task by context. */
811 void silc_schedule_task_del_by_context(SilcSchedule schedule, void *context)
813 SILC_LOG_DEBUG(("Unregister task by context"));
815 silc_task_del_by_context(schedule->timeout_queue, context);
816 silc_task_del_by_context(schedule->fd_queue, context);
817 silc_task_del_by_context(schedule->generic_queue, context);
820 /* Sets a file descriptor to be listened by select() in scheduler. One can
821 call this directly if wanted. This can be called multiple times for
822 one file descriptor to set different iomasks. */
824 void silc_schedule_set_listen_fd(SilcSchedule schedule,
825 SilcUInt32 fd, SilcTaskEvent iomask)
830 SILC_SCHEDULE_LOCK(schedule);
832 for (i = 0; i < schedule->max_fd; i++)
833 if (schedule->fd_list[i].fd == fd) {
834 schedule->fd_list[i].fd = fd;
835 schedule->fd_list[i].events = iomask;
836 if (i > schedule->last_fd)
837 schedule->last_fd = i;
843 for (i = 0; i < schedule->max_fd; i++)
844 if (schedule->fd_list[i].events == 0) {
845 schedule->fd_list[i].fd = fd;
846 schedule->fd_list[i].events = iomask;
847 if (i > schedule->last_fd)
848 schedule->last_fd = i;
852 SILC_SCHEDULE_UNLOCK(schedule);
855 /* Removes a file descriptor from listen list. */
857 void silc_schedule_unset_listen_fd(SilcSchedule schedule, SilcUInt32 fd)
861 SILC_SCHEDULE_LOCK(schedule);
863 SILC_LOG_DEBUG(("Unset listen fd %d", fd));
865 for (i = 0; i < schedule->max_fd; i++)
866 if (schedule->fd_list[i].fd == fd) {
867 schedule->fd_list[i].fd = 0;
868 schedule->fd_list[i].events = 0;
869 if (schedule->last_fd == i)
870 schedule->last_fd = schedule->max_fd - 1;
874 SILC_SCHEDULE_UNLOCK(schedule);
877 /* Register a new signal */
879 void silc_schedule_signal_register(SilcSchedule schedule, SilcUInt32 signal)
881 silc_schedule_internal_signal_register(schedule->internal, signal);
884 /* Unregister a new signal */
886 void silc_schedule_signal_unregister(SilcSchedule schedule, SilcUInt32 signal)
888 silc_schedule_internal_signal_unregister(schedule->internal, signal);
891 /* Allocates a newtask task queue into the scheduler */
893 static void silc_task_queue_alloc(SilcTaskQueue *queue)
895 *queue = silc_calloc(1, sizeof(**queue));
896 silc_mutex_alloc(&(*queue)->lock);
899 /* Free's a task queue. */
901 static void silc_task_queue_free(SilcTaskQueue queue)
903 silc_mutex_free(queue->lock);
907 /* Return task by its fd. */
909 static SilcTask silc_task_find(SilcTaskQueue queue, SilcUInt32 fd)
921 if (queue->task == next->next)
929 /* Adds a non-timeout task into the task queue. This function is used
930 by silc_task_register function. Returns a pointer to the registered
933 static SilcTask silc_task_add(SilcTaskQueue queue, SilcTask newtask,
934 SilcTaskPriority priority)
936 SilcTask task, next, prev;
938 /* Take the first task in the queue */
942 case SILC_TASK_PRI_LOW:
943 /* Lowest priority. The task is added at the end of the list. */
945 newtask->prev = prev;
946 newtask->next = task;
947 prev->next = newtask;
948 task->prev = newtask;
950 case SILC_TASK_PRI_NORMAL:
951 /* Normal priority. The task is added before lower priority tasks
952 but after tasks with higher priority. */
954 while(prev != task) {
955 if (prev->priority > SILC_TASK_PRI_LOW)
960 /* There are only lower priorities in the list, we will
961 sit before them and become the first task in the queue. */
963 newtask->prev = prev;
964 newtask->next = task;
965 task->prev = newtask;
966 prev->next = newtask;
968 /* We are now the first task in queue */
969 queue->task = newtask;
971 /* Found a spot from the list, add the task to the list. */
973 newtask->prev = prev;
974 newtask->next = next;
975 prev->next = newtask;
976 next->prev = newtask;
987 /* Return the timeout task with smallest timeout. */
989 static SilcTask silc_task_get_first(SilcTaskQueue queue, SilcTask first)
1003 if (silc_schedule_task_timeout_compare(&prev->timeout, &task->timeout))
1012 /* Adds a timeout task into the task queue. This function is used by
1013 silc_task_register function. Returns a pointer to the registered
1014 task. Timeout tasks are sorted by their timeout value in ascending
1015 order. The priority matters if there are more than one task with
1018 static SilcTask silc_task_add_timeout(SilcTaskQueue queue, SilcTask newtask,
1019 SilcTaskPriority priority)
1021 SilcTask task, prev, next;
1023 /* Take the first task in the queue */
1026 /* Take last task from the list */
1030 case SILC_TASK_PRI_LOW:
1031 /* Lowest priority. The task is added at the end of the list. */
1032 while(prev != task) {
1034 /* If we have longer timeout than with the task head of us
1035 we have found our spot. */
1036 if (silc_schedule_task_timeout_compare(&prev->timeout,
1040 /* If we are equal size of timeout we will be after it. */
1041 if (!silc_schedule_task_timeout_compare(&newtask->timeout,
1045 /* We have shorter timeout, compare to next one. */
1048 /* Found a spot from the list, add the task to the list. */
1050 newtask->prev = prev;
1051 newtask->next = next;
1052 prev->next = newtask;
1053 next->prev = newtask;
1056 /* Check if we are going to be the first task in the queue */
1057 if (silc_schedule_task_timeout_compare(&prev->timeout,
1060 if (!silc_schedule_task_timeout_compare(&newtask->timeout,
1064 /* We are now the first task in queue */
1065 queue->task = newtask;
1068 case SILC_TASK_PRI_NORMAL:
1069 /* Normal priority. The task is added before lower priority tasks
1070 but after tasks with higher priority. */
1071 while(prev != task) {
1073 /* If we have longer timeout than with the task head of us
1074 we have found our spot. */
1075 if (silc_schedule_task_timeout_compare(&prev->timeout,
1079 /* If we are equal size of timeout, priority kicks in place. */
1080 if (!silc_schedule_task_timeout_compare(&newtask->timeout,
1082 if (prev->priority >= SILC_TASK_PRI_NORMAL)
1085 /* We have shorter timeout or higher priority, compare to next one. */
1088 /* Found a spot from the list, add the task to the list. */
1090 newtask->prev = prev;
1091 newtask->next = next;
1092 prev->next = newtask;
1093 next->prev = newtask;
1096 /* Check if we are going to be the first task in the queue */
1097 if (silc_schedule_task_timeout_compare(&prev->timeout,
1100 if (!silc_schedule_task_timeout_compare(&newtask->timeout,
1102 if (prev->priority >= SILC_TASK_PRI_NORMAL)
1105 /* We are now the first task in queue */
1106 queue->task = newtask;
1117 /* Removes (unregisters) a task from particular task queue. This function
1118 is used internally by scheduler. This must be called holding the
1121 static int silc_schedule_task_remove(SilcTaskQueue queue, SilcTask task)
1123 SilcTask first, old, next;
1125 if (!queue || !task)
1132 first = queue->task;
1134 /* Unregister all tasks in queue */
1135 if (task == SILC_ALL_TASKS) {
1136 SILC_LOG_DEBUG(("Removing all tasks at once"));
1141 silc_free(next->prev);
1150 SILC_LOG_DEBUG(("Removing task"));
1152 /* Unregister the task */
1156 SilcTask prev, next;
1163 if (prev == old && next == old)
1165 if (queue->task == old)
1166 queue->task = silc_task_get_first(queue, next);
1179 /* Compare two time values. If the first argument is smaller than the
1180 second this function returns TRUE. */
1182 static int silc_schedule_task_timeout_compare(struct timeval *smaller,
1183 struct timeval *bigger)
1185 if ((smaller->tv_sec < bigger->tv_sec) ||
1186 ((smaller->tv_sec == bigger->tv_sec) &&
1187 (smaller->tv_usec < bigger->tv_usec)))
1193 static void silc_task_del_by_fd(SilcTaskQueue queue, SilcUInt32 fd)
1197 silc_mutex_lock(queue->lock);
1200 silc_mutex_unlock(queue->lock);
1208 next->valid = FALSE;
1209 if (queue->task == next->next)
1214 silc_mutex_unlock(queue->lock);
1217 static void silc_task_del_by_callback(SilcTaskQueue queue,
1218 SilcTaskCallback callback)
1222 silc_mutex_lock(queue->lock);
1225 silc_mutex_unlock(queue->lock);
1232 if (next->callback == callback)
1233 next->valid = FALSE;
1234 if (queue->task == next->next)
1239 silc_mutex_unlock(queue->lock);
1242 static void silc_task_del_by_context(SilcTaskQueue queue, void *context)
1246 silc_mutex_lock(queue->lock);
1249 silc_mutex_unlock(queue->lock);
1256 if (next->context == context)
1257 next->valid = FALSE;
1258 if (queue->task == next->next)
1263 silc_mutex_unlock(queue->lock);