5 Author: Pekka Riikonen <priikone@silcnet.org>
7 Copyright (C) 1998 - 2007 Pekka Riikonen
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
23 /************************** Types and definitions ***************************/
25 /* Platform specific implementation */
26 extern const SilcScheduleOps schedule_ops;
28 static void silc_schedule_task_remove(SilcSchedule schedule, SilcTask task);
29 static void silc_schedule_dispatch_fd(SilcSchedule schedule);
30 static void silc_schedule_dispatch_timeout(SilcSchedule schedule,
31 SilcBool dispatch_all);
34 /************************ Static utility functions **************************/
36 /* Fd task hash table destructor */
38 static void silc_schedule_fd_destructor(void *key, void *context,
44 /* Executes file descriptor tasks. Invalid tasks are removed here. */
46 static void silc_schedule_dispatch_fd(SilcSchedule schedule)
51 /* The dispatch list includes only valid tasks, and tasks that have
52 something to dispatch. Dispatching is atomic; no matter if another
53 thread invalidates a task when we unlock, we dispatch to completion. */
54 SILC_SCHEDULE_UNLOCK(schedule);
55 silc_list_start(schedule->fd_dispatch);
56 while ((task = silc_list_get(schedule->fd_dispatch))) {
59 /* Is the task ready for reading */
60 if (task->revents & SILC_TASK_READ)
61 t->callback(schedule, schedule->app_context, SILC_TASK_READ,
62 task->fd, t->context);
64 /* Is the task ready for writing */
65 if (t->valid && task->revents & SILC_TASK_WRITE)
66 t->callback(schedule, schedule->app_context, SILC_TASK_WRITE,
67 task->fd, t->context);
69 SILC_SCHEDULE_LOCK(schedule);
71 /* Remove invalidated tasks */
72 silc_list_start(schedule->fd_dispatch);
73 while ((task = silc_list_get(schedule->fd_dispatch)))
74 if (silc_unlikely(!task->header.valid))
75 silc_schedule_task_remove(schedule, (SilcTask)task);
78 /* Executes all tasks whose timeout has expired. The task is removed from
79 the task queue after the callback function has returned. Also, invalid
80 tasks are removed here. */
82 static void silc_schedule_dispatch_timeout(SilcSchedule schedule,
83 SilcBool dispatch_all)
87 struct timeval curtime;
90 SILC_LOG_DEBUG(("Running timeout tasks"));
92 silc_gettimeofday(&curtime);
94 /* First task in the task queue has always the earliest timeout. */
95 silc_list_start(schedule->timeout_queue);
96 task = silc_list_get(schedule->timeout_queue);
97 if (silc_unlikely(!task))
102 /* Remove invalid task */
103 if (silc_unlikely(!t->valid)) {
104 silc_schedule_task_remove(schedule, t);
108 /* Execute the task if the timeout has expired */
109 if (silc_compare_timeval(&task->timeout, &curtime) > 0 && !dispatch_all)
113 SILC_SCHEDULE_UNLOCK(schedule);
114 t->callback(schedule, schedule->app_context, SILC_TASK_EXPIRE, 0,
116 SILC_SCHEDULE_LOCK(schedule);
118 /* Remove the expired task */
119 silc_schedule_task_remove(schedule, t);
121 /* Balance when we have lots of small timeouts */
122 if (silc_unlikely((++count) > 40))
124 } while (silc_likely((task = silc_list_get(schedule->timeout_queue))));
127 /* Calculates next timeout. This is the timeout value when at earliest some
128 of the timeout tasks expire. If this is in the past, they will be
131 static void silc_schedule_select_timeout(SilcSchedule schedule)
134 SilcTaskTimeout task;
135 struct timeval curtime;
136 SilcBool dispatch = TRUE;
138 /* Get the current time */
139 silc_gettimeofday(&curtime);
140 schedule->has_timeout = FALSE;
142 /* First task in the task queue has always the earliest timeout. */
143 silc_list_start(schedule->timeout_queue);
144 task = silc_list_get(schedule->timeout_queue);
145 if (silc_unlikely(!task))
150 /* Remove invalid task */
151 if (silc_unlikely(!t->valid)) {
152 silc_schedule_task_remove(schedule, t);
156 /* If the timeout is in past, we will run the task and all other
157 timeout tasks from the past. */
158 if (silc_compare_timeval(&task->timeout, &curtime) <= 0 && dispatch) {
159 silc_schedule_dispatch_timeout(schedule, FALSE);
160 if (silc_unlikely(!schedule->valid))
163 /* Start selecting new timeout again after dispatch */
164 silc_list_start(schedule->timeout_queue);
169 /* Calculate the next timeout */
170 curtime.tv_sec = task->timeout.tv_sec - curtime.tv_sec;
171 curtime.tv_usec = task->timeout.tv_usec - curtime.tv_usec;
172 if (curtime.tv_sec < 0)
175 /* We wouldn't want to go under zero, check for it. */
176 if (curtime.tv_usec < 0) {
178 if (curtime.tv_sec < 0)
180 curtime.tv_usec += 1000000L;
183 } while ((task = silc_list_get(schedule->timeout_queue)));
185 /* Save the timeout */
187 schedule->timeout = curtime;
188 schedule->has_timeout = TRUE;
189 SILC_LOG_DEBUG(("timeout: sec=%ld, usec=%ld", schedule->timeout.tv_sec,
190 schedule->timeout.tv_usec));
194 /* Removes task from the scheduler. This must be called with scheduler
197 static void silc_schedule_task_remove(SilcSchedule schedule, SilcTask task)
201 if (silc_unlikely(task == SILC_ALL_TASKS)) {
203 SilcHashTableList htl;
206 /* Delete from fd queue */
207 silc_hash_table_list(schedule->fd_queue, &htl);
208 while (silc_hash_table_get(&htl, &fd, (void *)&task))
209 silc_hash_table_del(schedule->fd_queue, fd);
210 silc_hash_table_list_reset(&htl);
212 /* Delete from timeout queue */
213 silc_list_start(schedule->timeout_queue);
214 while ((task = silc_list_get(schedule->timeout_queue))) {
215 silc_list_del(schedule->timeout_queue, task);
222 if (silc_likely(task->type == 1)) {
223 /* Delete from timeout queue */
224 silc_list_del(schedule->timeout_queue, task);
226 /* Put to free list */
227 silc_list_add(schedule->free_tasks, task);
229 /* Delete from fd queue */
230 ftask = (SilcTaskFd)task;
231 silc_hash_table_del(schedule->fd_queue, SILC_32_TO_PTR(ftask->fd));
235 /* Timeout freelist garbage collection */
237 SILC_TASK_CALLBACK(silc_schedule_timeout_gc)
242 if (!schedule->valid)
245 SILC_LOG_DEBUG(("Timeout freelist garbage collection"));
247 SILC_SCHEDULE_LOCK(schedule);
249 if (silc_list_count(schedule->free_tasks) <= 10) {
250 SILC_SCHEDULE_UNLOCK(schedule);
251 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
255 if (silc_list_count(schedule->timeout_queue) >
256 silc_list_count(schedule->free_tasks)) {
257 SILC_SCHEDULE_UNLOCK(schedule);
258 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
263 c = silc_list_count(schedule->free_tasks) / 2;
264 if (c > silc_list_count(schedule->timeout_queue))
265 c = (silc_list_count(schedule->free_tasks) -
266 silc_list_count(schedule->timeout_queue));
267 if (silc_list_count(schedule->free_tasks) - c < 10)
268 c -= (10 - (silc_list_count(schedule->free_tasks) - c));
270 SILC_LOG_DEBUG(("Freeing %d unused tasks, leaving %d", c,
271 silc_list_count(schedule->free_tasks) - c));
273 silc_list_start(schedule->free_tasks);
274 while ((t = silc_list_get(schedule->free_tasks)) && c-- > 0) {
275 silc_list_del(schedule->free_tasks, t);
278 silc_list_start(schedule->free_tasks);
280 SILC_SCHEDULE_UNLOCK(schedule);
282 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
286 #ifdef SILC_DIST_INPLACE
287 /* Print schedule statistics to stdout */
289 void silc_schedule_stats(SilcSchedule schedule)
292 fprintf(stdout, "Schedule %p statistics:\n\n", schedule);
293 fprintf(stdout, "Num FD tasks : %u (%u bytes allocated)\n",
294 silc_hash_table_count(schedule->fd_queue),
295 (unsigned int)sizeof(*ftask) *
296 silc_hash_table_count(schedule->fd_queue));
297 fprintf(stdout, "Num Timeout tasks : %d (%d bytes allocated)\n",
298 silc_list_count(schedule->timeout_queue),
299 (unsigned int)sizeof(struct SilcTaskTimeoutStruct) *
300 silc_list_count(schedule->timeout_queue));
301 fprintf(stdout, "Num Timeout freelist : %d (%d bytes allocated)\n",
302 silc_list_count(schedule->free_tasks),
303 (unsigned int)sizeof(struct SilcTaskTimeoutStruct) *
304 silc_list_count(schedule->free_tasks));
306 #endif /* SILC_DIST_INPLACE */
308 /****************************** Public API **********************************/
310 /* Initializes the scheduler. This returns the scheduler context that
311 is given as arugment usually to all silc_schedule_* functions.
312 The `max_tasks' indicates the number of maximum tasks that the
313 scheduler can handle. The `app_context' is application specific
314 context that is delivered to task callbacks. */
316 SilcSchedule silc_schedule_init(int max_tasks, void *app_context)
318 SilcSchedule schedule;
320 SILC_LOG_DEBUG(("Initializing scheduler"));
322 schedule = silc_calloc(1, sizeof(*schedule));
327 silc_hash_table_alloc(0, silc_hash_uint, NULL, NULL, NULL,
328 silc_schedule_fd_destructor, NULL, TRUE);
329 if (!schedule->fd_queue) {
334 silc_list_init(schedule->timeout_queue, struct SilcTaskStruct, next);
335 silc_list_init(schedule->free_tasks, struct SilcTaskStruct, next);
337 schedule->app_context = app_context;
338 schedule->valid = TRUE;
339 schedule->max_tasks = max_tasks;
341 /* Allocate scheduler lock */
342 silc_mutex_alloc(&schedule->lock);
344 /* Initialize the platform specific scheduler. */
345 schedule->internal = schedule_ops.init(schedule, app_context);
346 if (!schedule->internal) {
347 silc_hash_table_free(schedule->fd_queue);
348 silc_mutex_free(schedule->lock);
353 /* Timeout freelist garbage collection */
354 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
360 /* Uninitializes the schedule. This is called when the program is ready
361 to end. This removes all tasks and task queues. Returns FALSE if the
362 scheduler could not be uninitialized. This happens when the scheduler
363 is still valid and silc_schedule_stop has not been called. */
365 SilcBool silc_schedule_uninit(SilcSchedule schedule)
369 SILC_LOG_DEBUG(("Uninitializing scheduler"));
371 if (schedule->valid == TRUE)
374 /* Dispatch all timeouts before going away */
375 SILC_SCHEDULE_LOCK(schedule);
376 silc_schedule_dispatch_timeout(schedule, TRUE);
377 SILC_SCHEDULE_UNLOCK(schedule);
379 /* Deliver signals before going away */
380 if (schedule->signal_tasks) {
381 schedule_ops.signals_call(schedule, schedule->internal);
382 schedule->signal_tasks = FALSE;
385 /* Unregister all tasks */
386 silc_schedule_task_del(schedule, SILC_ALL_TASKS);
387 silc_schedule_task_remove(schedule, SILC_ALL_TASKS);
389 /* Delete timeout task freelist */
390 silc_list_start(schedule->free_tasks);
391 while ((task = silc_list_get(schedule->free_tasks)))
394 /* Unregister all task queues */
395 silc_hash_table_free(schedule->fd_queue);
397 /* Uninit the platform specific scheduler. */
398 schedule_ops.uninit(schedule, schedule->internal);
400 silc_mutex_free(schedule->lock);
406 /* Stops the schedule even if it is not supposed to be stopped yet.
407 After calling this, one should call silc_schedule_uninit (after the
408 silc_schedule has returned). */
410 void silc_schedule_stop(SilcSchedule schedule)
412 SILC_LOG_DEBUG(("Stopping scheduler"));
413 SILC_SCHEDULE_LOCK(schedule);
414 schedule->valid = FALSE;
415 SILC_SCHEDULE_UNLOCK(schedule);
418 /* Runs the scheduler once and then returns. Must be called locked. */
420 static SilcBool silc_schedule_iterate(SilcSchedule schedule, int timeout_usecs)
422 struct timeval timeout;
426 SILC_LOG_DEBUG(("In scheduler loop"));
428 /* Deliver signals if any has been set to be called */
429 if (silc_unlikely(schedule->signal_tasks)) {
430 SILC_SCHEDULE_UNLOCK(schedule);
431 schedule_ops.signals_call(schedule, schedule->internal);
432 schedule->signal_tasks = FALSE;
433 SILC_SCHEDULE_LOCK(schedule);
436 /* Check if scheduler is valid */
437 if (silc_unlikely(schedule->valid == FALSE)) {
438 SILC_LOG_DEBUG(("Scheduler not valid anymore, exiting"));
442 /* Calculate next timeout for silc_select(). This is the timeout value
443 when at earliest some of the timeout tasks expire. This may dispatch
444 already expired timeouts. */
445 silc_schedule_select_timeout(schedule);
447 /* Check if scheduler is valid */
448 if (silc_unlikely(schedule->valid == FALSE)) {
449 SILC_LOG_DEBUG(("Scheduler not valid anymore, exiting"));
453 if (timeout_usecs >= 0) {
455 timeout.tv_usec = timeout_usecs;
456 schedule->timeout = timeout;
457 schedule->has_timeout = TRUE;
460 /* This is the main silc_select(). The program blocks here until some
461 of the selected file descriptors change status or the selected
463 SILC_LOG_DEBUG(("Select"));
464 ret = schedule_ops.schedule(schedule, schedule->internal);
466 if (silc_likely(ret == 0)) {
468 SILC_LOG_DEBUG(("Running timeout tasks"));
469 if (silc_likely(silc_list_count(schedule->timeout_queue)))
470 silc_schedule_dispatch_timeout(schedule, FALSE);
473 } else if (silc_likely(ret > 0)) {
474 /* There is some data available now */
475 SILC_LOG_DEBUG(("Running fd tasks"));
476 silc_schedule_dispatch_fd(schedule);
478 /* If timeout was very short, dispatch also timeout tasks */
479 if (schedule->has_timeout && schedule->timeout.tv_sec == 0 &&
480 schedule->timeout.tv_usec < 50000)
481 silc_schedule_dispatch_timeout(schedule, FALSE);
485 /* Error or special case handling */
491 SILC_LOG_ERROR(("Error in select()/poll(): %s", strerror(errno)));
494 } while (timeout_usecs == -1);
499 /* Runs the scheduler once and then returns. */
501 SilcBool silc_schedule_one(SilcSchedule schedule, int timeout_usecs)
504 SILC_SCHEDULE_LOCK(schedule);
505 ret = silc_schedule_iterate(schedule, timeout_usecs);
506 SILC_SCHEDULE_UNLOCK(schedule);
510 /* Runs the scheduler and blocks here. When this returns the scheduler
514 void silc_schedule(SilcSchedule schedule)
516 SILC_LOG_DEBUG(("Running scheduler"));
518 /* Start the scheduler loop */
519 SILC_SCHEDULE_LOCK(schedule);
520 silc_schedule_iterate(schedule, -1);
521 SILC_SCHEDULE_UNLOCK(schedule);
523 #endif /* !SILC_SYMBIAN */
525 /* Wakes up the scheduler. This is used only in multi-threaded
526 environments where threads may add new tasks or remove old tasks
527 from task queues. This is called to wake up the scheduler in the
528 main thread so that it detects the changes in the task queues.
529 If threads support is not compiled in this function has no effect.
530 Implementation of this function is platform specific. */
532 void silc_schedule_wakeup(SilcSchedule schedule)
535 SILC_LOG_DEBUG(("Wakeup scheduler"));
536 SILC_SCHEDULE_LOCK(schedule);
537 schedule_ops.wakeup(schedule, schedule->internal);
538 SILC_SCHEDULE_UNLOCK(schedule);
542 /* Returns the application specific context that was saved into the
543 scheduler in silc_schedule_init function. The context is also
544 returned to application in task callback functions, but this function
545 may be used to get it as well if needed. */
547 void *silc_schedule_get_context(SilcSchedule schedule)
549 return schedule->app_context;
552 /* Set notify callback */
554 void silc_schedule_set_notify(SilcSchedule schedule,
555 SilcTaskNotifyCb notify, void *context)
557 schedule->notify = notify;
558 schedule->notify_context = context;
561 /* Add new task to the scheduler */
563 SilcTask silc_schedule_task_add(SilcSchedule schedule, SilcUInt32 fd,
564 SilcTaskCallback callback, void *context,
565 long seconds, long useconds,
568 SilcTask task = NULL;
570 if (silc_unlikely(!schedule->valid))
573 SILC_SCHEDULE_LOCK(schedule);
575 if (silc_likely(type == SILC_TASK_TIMEOUT)) {
576 SilcTaskTimeout tmp, prev, ttask;
579 silc_list_start(schedule->free_tasks);
580 ttask = silc_list_get(schedule->free_tasks);
581 if (silc_unlikely(!ttask)) {
582 ttask = silc_calloc(1, sizeof(*ttask));
583 if (silc_unlikely(!ttask))
586 silc_list_del(schedule->free_tasks, ttask);
588 ttask->header.type = 1;
589 ttask->header.callback = callback;
590 ttask->header.context = context;
591 ttask->header.valid = TRUE;
594 silc_gettimeofday(&ttask->timeout);
595 if ((seconds + useconds) > 0) {
596 ttask->timeout.tv_sec += seconds + (useconds / 1000000L);
597 ttask->timeout.tv_usec += (useconds % 1000000L);
598 if (ttask->timeout.tv_usec >= 1000000L) {
599 ttask->timeout.tv_sec += 1;
600 ttask->timeout.tv_usec -= 1000000L;
604 SILC_LOG_DEBUG(("New timeout task %p: sec=%ld, usec=%ld", ttask,
607 /* Add task to correct spot so that the first task in the list has
608 the earliest timeout. */
609 list = schedule->timeout_queue;
610 silc_list_start(list);
612 while ((tmp = silc_list_get(list)) != SILC_LIST_END) {
613 /* If we have shorter timeout, we have found our spot */
614 if (silc_compare_timeval(&ttask->timeout, &tmp->timeout) < 0) {
615 silc_list_insert(schedule->timeout_queue, prev, ttask);
621 silc_list_add(schedule->timeout_queue, ttask);
623 task = (SilcTask)ttask;
625 /* Call notify callback */
626 if (schedule->notify)
627 schedule->notify(schedule, TRUE, task, FALSE, 0, 0, seconds, useconds,
628 schedule->notify_context);
630 } else if (silc_likely(type == SILC_TASK_FD)) {
633 /* Check if fd is already added */
634 if (silc_unlikely(silc_hash_table_find(schedule->fd_queue,
636 NULL, (void *)&task))) {
640 /* Remove invalid task. We must have unique fd key to hash table. */
641 silc_schedule_task_remove(schedule, task);
644 /* Check max tasks */
645 if (silc_unlikely(schedule->max_tasks > 0 &&
646 silc_hash_table_count(schedule->fd_queue) >=
647 schedule->max_tasks)) {
648 SILC_LOG_WARNING(("Scheduler task limit reached: cannot add new task"));
653 ftask = silc_calloc(1, sizeof(*ftask));
654 if (silc_unlikely(!ftask)) {
659 SILC_LOG_DEBUG(("New fd task %p fd=%d", ftask, fd));
661 ftask->header.type = 0;
662 ftask->header.callback = callback;
663 ftask->header.context = context;
664 ftask->header.valid = TRUE;
665 ftask->events = SILC_TASK_READ;
668 /* Add task and schedule it */
669 if (!silc_hash_table_add(schedule->fd_queue, SILC_32_TO_PTR(fd), ftask)) {
674 if (!schedule_ops.schedule_fd(schedule, schedule->internal,
675 ftask, ftask->events)) {
676 silc_hash_table_del(schedule->fd_queue, SILC_32_TO_PTR(fd));
681 task = (SilcTask)ftask;
683 /* Call notify callback */
684 if (schedule->notify)
685 schedule->notify(schedule, TRUE, task, TRUE, ftask->fd,
686 SILC_TASK_READ, 0, 0, schedule->notify_context);
688 } else if (silc_unlikely(type == SILC_TASK_SIGNAL)) {
689 SILC_SCHEDULE_UNLOCK(schedule);
690 schedule_ops.signal_register(schedule, schedule->internal, fd,
696 SILC_SCHEDULE_UNLOCK(schedule);
699 /* On symbian we wakeup scheduler immediately after adding timeout task
700 in case the task is added outside the scheduler loop (in some active
702 if (task && task->type == 1)
703 silc_schedule_wakeup(schedule);
704 #endif /* SILC_SYMBIAN */
709 /* Invalidates task */
711 SilcBool silc_schedule_task_del(SilcSchedule schedule, SilcTask task)
713 if (silc_unlikely(task == SILC_ALL_TASKS)) {
714 SilcHashTableList htl;
716 SILC_LOG_DEBUG(("Unregister all tasks"));
718 SILC_SCHEDULE_LOCK(schedule);
720 /* Delete from fd queue */
721 silc_hash_table_list(schedule->fd_queue, &htl);
722 while (silc_hash_table_get(&htl, NULL, (void *)&task)) {
725 /* Call notify callback */
726 if (schedule->notify)
727 schedule->notify(schedule, FALSE, task, TRUE,
728 ((SilcTaskFd)task)->fd, 0, 0, 0,
729 schedule->notify_context);
731 silc_hash_table_list_reset(&htl);
733 /* Delete from timeout queue */
734 silc_list_start(schedule->timeout_queue);
735 while ((task = (SilcTask)silc_list_get(schedule->timeout_queue))) {
738 /* Call notify callback */
739 if (schedule->notify)
740 schedule->notify(schedule, FALSE, task, FALSE, 0, 0, 0, 0,
741 schedule->notify_context);
744 SILC_SCHEDULE_UNLOCK(schedule);
748 SILC_LOG_DEBUG(("Unregistering task %p", task));
749 SILC_SCHEDULE_LOCK(schedule);
752 /* Call notify callback */
753 if (schedule->notify)
754 schedule->notify(schedule, FALSE, task, !task->type, 0, 0, 0, 0,
755 schedule->notify_context);
756 SILC_SCHEDULE_UNLOCK(schedule);
761 /* Invalidate task by fd */
763 SilcBool silc_schedule_task_del_by_fd(SilcSchedule schedule, SilcUInt32 fd)
765 SilcTask task = NULL;
766 SilcBool ret = FALSE;
768 SILC_LOG_DEBUG(("Unregister task by fd %d", fd));
770 SILC_SCHEDULE_LOCK(schedule);
772 /* fd is unique, so there is only one task with this fd in the table */
773 if (silc_likely(silc_hash_table_find(schedule->fd_queue,
774 SILC_32_TO_PTR(fd), NULL,
776 SILC_LOG_DEBUG(("Deleting task %p", task));
779 /* Call notify callback */
780 if (schedule->notify)
781 schedule->notify(schedule, FALSE, task, TRUE, fd, 0, 0, 0,
782 schedule->notify_context);
786 SILC_SCHEDULE_UNLOCK(schedule);
788 /* If it is signal, remove it */
789 if (silc_unlikely(!task)) {
790 schedule_ops.signal_unregister(schedule, schedule->internal, fd);
797 /* Invalidate task by task callback. */
799 SilcBool silc_schedule_task_del_by_callback(SilcSchedule schedule,
800 SilcTaskCallback callback)
803 SilcHashTableList htl;
805 SilcBool ret = FALSE;
807 SILC_LOG_DEBUG(("Unregister task by callback"));
809 SILC_SCHEDULE_LOCK(schedule);
811 /* Delete from fd queue */
812 silc_hash_table_list(schedule->fd_queue, &htl);
813 while (silc_hash_table_get(&htl, NULL, (void *)&task)) {
814 if (task->callback == callback) {
817 /* Call notify callback */
818 if (schedule->notify)
819 schedule->notify(schedule, FALSE, task, TRUE,
820 ((SilcTaskFd)task)->fd, 0, 0, 0,
821 schedule->notify_context);
825 silc_hash_table_list_reset(&htl);
827 /* Delete from timeout queue */
828 list = schedule->timeout_queue;
829 silc_list_start(list);
830 while ((task = (SilcTask)silc_list_get(list))) {
831 if (task->callback == callback) {
834 /* Call notify callback */
835 if (schedule->notify)
836 schedule->notify(schedule, FALSE, task, FALSE, 0, 0, 0, 0,
837 schedule->notify_context);
842 SILC_SCHEDULE_UNLOCK(schedule);
847 /* Invalidate task by context. */
849 SilcBool silc_schedule_task_del_by_context(SilcSchedule schedule,
853 SilcHashTableList htl;
855 SilcBool ret = FALSE;
857 SILC_LOG_DEBUG(("Unregister task by context"));
859 SILC_SCHEDULE_LOCK(schedule);
861 /* Delete from fd queue */
862 silc_hash_table_list(schedule->fd_queue, &htl);
863 while (silc_hash_table_get(&htl, NULL, (void *)&task)) {
864 if (task->context == context) {
867 /* Call notify callback */
868 if (schedule->notify)
869 schedule->notify(schedule, FALSE, task, TRUE,
870 ((SilcTaskFd)task)->fd, 0, 0, 0,
871 schedule->notify_context);
875 silc_hash_table_list_reset(&htl);
877 /* Delete from timeout queue */
878 list = schedule->timeout_queue;
879 silc_list_start(list);
880 while ((task = (SilcTask)silc_list_get(list))) {
881 if (task->context == context) {
884 /* Call notify callback */
885 if (schedule->notify)
886 schedule->notify(schedule, FALSE, task, FALSE, 0, 0, 0, 0,
887 schedule->notify_context);
892 SILC_SCHEDULE_UNLOCK(schedule);
897 /* Invalidate task by all */
899 SilcBool silc_schedule_task_del_by_all(SilcSchedule schedule, int fd,
900 SilcTaskCallback callback,
905 SilcBool ret = FALSE;
907 SILC_LOG_DEBUG(("Unregister task by fd, callback and context"));
909 /* For fd task, callback and context is irrelevant as fd is unique */
911 return silc_schedule_task_del_by_fd(schedule, fd);
913 SILC_SCHEDULE_LOCK(schedule);
915 /* Delete from timeout queue */
916 list = schedule->timeout_queue;
917 silc_list_start(list);
918 while ((task = (SilcTask)silc_list_get(list))) {
919 if (task->callback == callback && task->context == context) {
922 /* Call notify callback */
923 if (schedule->notify)
924 schedule->notify(schedule, FALSE, task, FALSE, 0, 0, 0, 0,
925 schedule->notify_context);
930 SILC_SCHEDULE_UNLOCK(schedule);
935 /* Sets a file descriptor to be listened by scheduler. One can call this
936 directly if wanted. This can be called multiple times for one file
937 descriptor to set different iomasks. */
939 SilcBool silc_schedule_set_listen_fd(SilcSchedule schedule, SilcUInt32 fd,
940 SilcTaskEvent mask, SilcBool send_events)
944 if (silc_unlikely(!schedule->valid))
947 SILC_SCHEDULE_LOCK(schedule);
949 if (silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fd),
950 NULL, (void *)&task)) {
951 if (!schedule_ops.schedule_fd(schedule, schedule->internal, task, mask)) {
952 SILC_SCHEDULE_UNLOCK(schedule);
956 if (silc_unlikely(send_events) && mask) {
957 task->revents = mask;
958 silc_schedule_dispatch_fd(schedule);
961 /* Call notify callback */
962 if (schedule->notify)
963 schedule->notify(schedule, TRUE, (SilcTask)task,
964 TRUE, task->fd, mask, 0, 0,
965 schedule->notify_context);
968 SILC_SCHEDULE_UNLOCK(schedule);
973 /* Returns the file descriptor's current requested event mask. */
975 SilcTaskEvent silc_schedule_get_fd_events(SilcSchedule schedule,
979 SilcTaskEvent event = 0;
981 if (silc_unlikely(!schedule->valid))
984 SILC_SCHEDULE_LOCK(schedule);
985 if (silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fd),
986 NULL, (void *)&task))
987 event = task->events;
988 SILC_SCHEDULE_UNLOCK(schedule);
993 /* Removes a file descriptor from listen list. */
995 void silc_schedule_unset_listen_fd(SilcSchedule schedule, SilcUInt32 fd)
997 silc_schedule_set_listen_fd(schedule, fd, 0, FALSE);