5 Author: Pekka Riikonen <priikone@silcnet.org>
7 Copyright (C) 1998 - 2007 Pekka Riikonen
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
23 /************************** Types and definitions ***************************/
25 /* Platform specific implementation */
26 extern const SilcScheduleOps schedule_ops;
28 static void silc_schedule_task_remove(SilcSchedule schedule, SilcTask task);
29 static void silc_schedule_dispatch_fd(SilcSchedule schedule);
30 static void silc_schedule_dispatch_timeout(SilcSchedule schedule,
31 SilcBool dispatch_all);
34 /************************ Static utility functions **************************/
36 /* Fd task hash table destructor */
38 static void silc_schedule_fd_destructor(void *key, void *context,
44 /* Executes file descriptor tasks. Invalid tasks are removed here. */
46 static void silc_schedule_dispatch_fd(SilcSchedule schedule)
51 /* The dispatch list includes only valid tasks, and tasks that have
52 something to dispatch. Dispatching is atomic; no matter if another
53 thread invalidates a task when we unlock, we dispatch to completion. */
54 SILC_SCHEDULE_UNLOCK(schedule);
55 silc_list_start(schedule->fd_dispatch);
56 while ((task = silc_list_get(schedule->fd_dispatch))) {
59 /* Is the task ready for reading */
60 if (task->revents & SILC_TASK_READ)
61 t->callback(schedule, schedule->app_context, SILC_TASK_READ,
62 task->fd, t->context);
64 /* Is the task ready for writing */
65 if (t->valid && task->revents & SILC_TASK_WRITE)
66 t->callback(schedule, schedule->app_context, SILC_TASK_WRITE,
67 task->fd, t->context);
69 SILC_SCHEDULE_LOCK(schedule);
71 /* Remove invalidated tasks */
72 silc_list_start(schedule->fd_dispatch);
73 while ((task = silc_list_get(schedule->fd_dispatch)))
74 if (silc_unlikely(!task->header.valid))
75 silc_schedule_task_remove(schedule, (SilcTask)task);
78 /* Executes all tasks whose timeout has expired. The task is removed from
79 the task queue after the callback function has returned. Also, invalid
80 tasks are removed here. */
82 static void silc_schedule_dispatch_timeout(SilcSchedule schedule,
83 SilcBool dispatch_all)
87 struct timeval curtime;
90 SILC_LOG_DEBUG(("Running timeout tasks"));
92 silc_gettimeofday(&curtime);
94 /* First task in the task queue has always the earliest timeout. */
95 silc_list_start(schedule->timeout_queue);
96 task = silc_list_get(schedule->timeout_queue);
97 if (silc_unlikely(!task))
102 /* Remove invalid task */
103 if (silc_unlikely(!t->valid)) {
104 silc_schedule_task_remove(schedule, t);
108 /* Execute the task if the timeout has expired */
109 if (silc_compare_timeval(&task->timeout, &curtime) > 0 && !dispatch_all)
113 SILC_SCHEDULE_UNLOCK(schedule);
114 t->callback(schedule, schedule->app_context, SILC_TASK_EXPIRE, 0,
116 SILC_SCHEDULE_LOCK(schedule);
118 /* Remove the expired task */
119 silc_schedule_task_remove(schedule, t);
121 /* Balance when we have lots of small timeouts */
122 if (silc_unlikely((++count) > 40))
124 } while (silc_likely((task = silc_list_get(schedule->timeout_queue))));
127 /* Calculates next timeout. This is the timeout value when at earliest some
128 of the timeout tasks expire. If this is in the past, they will be
131 static void silc_schedule_select_timeout(SilcSchedule schedule)
134 SilcTaskTimeout task;
135 struct timeval curtime;
136 SilcBool dispatch = TRUE;
138 /* Get the current time */
139 silc_gettimeofday(&curtime);
140 schedule->has_timeout = FALSE;
142 /* First task in the task queue has always the earliest timeout. */
143 silc_list_start(schedule->timeout_queue);
144 task = silc_list_get(schedule->timeout_queue);
145 if (silc_unlikely(!task))
150 /* Remove invalid task */
151 if (silc_unlikely(!t->valid)) {
152 silc_schedule_task_remove(schedule, t);
156 /* If the timeout is in past, we will run the task and all other
157 timeout tasks from the past. */
158 if (silc_compare_timeval(&task->timeout, &curtime) <= 0 && dispatch) {
159 silc_schedule_dispatch_timeout(schedule, FALSE);
160 if (silc_unlikely(!schedule->valid))
163 /* Start selecting new timeout again after dispatch */
164 silc_list_start(schedule->timeout_queue);
169 /* Calculate the next timeout */
170 curtime.tv_sec = task->timeout.tv_sec - curtime.tv_sec;
171 curtime.tv_usec = task->timeout.tv_usec - curtime.tv_usec;
172 if (curtime.tv_sec < 0)
175 /* We wouldn't want to go under zero, check for it. */
176 if (curtime.tv_usec < 0) {
178 if (curtime.tv_sec < 0)
180 curtime.tv_usec += 1000000L;
183 } while ((task = silc_list_get(schedule->timeout_queue)));
185 /* Save the timeout */
187 schedule->timeout = curtime;
188 schedule->has_timeout = TRUE;
189 SILC_LOG_DEBUG(("timeout: sec=%d, usec=%d", schedule->timeout.tv_sec,
190 schedule->timeout.tv_usec));
194 /* Removes task from the scheduler. This must be called with scheduler
197 static void silc_schedule_task_remove(SilcSchedule schedule, SilcTask task)
201 if (silc_unlikely(task == SILC_ALL_TASKS)) {
203 SilcHashTableList htl;
206 /* Delete from fd queue */
207 silc_hash_table_list(schedule->fd_queue, &htl);
208 while (silc_hash_table_get(&htl, (void *)&fd, (void *)&task))
209 silc_hash_table_del(schedule->fd_queue, SILC_32_TO_PTR(fd));
210 silc_hash_table_list_reset(&htl);
212 /* Delete from timeout queue */
213 silc_list_start(schedule->timeout_queue);
214 while ((task = silc_list_get(schedule->timeout_queue))) {
215 silc_list_del(schedule->timeout_queue, task);
222 if (silc_likely(task->type == 1)) {
223 /* Delete from timeout queue */
224 silc_list_del(schedule->timeout_queue, task);
226 /* Put to free list */
227 silc_list_add(schedule->free_tasks, task);
229 /* Delete from fd queue */
230 ftask = (SilcTaskFd)task;
231 silc_hash_table_del(schedule->fd_queue, SILC_32_TO_PTR(ftask->fd));
235 /* Timeout freelist garbage collection */
237 SILC_TASK_CALLBACK(silc_schedule_timeout_gc)
242 if (!schedule->valid)
245 SILC_LOG_DEBUG(("Timeout freelist garbage collection"));
247 SILC_SCHEDULE_LOCK(schedule);
249 if (silc_list_count(schedule->free_tasks) <= 10) {
250 SILC_SCHEDULE_UNLOCK(schedule);
251 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
255 if (silc_list_count(schedule->timeout_queue) >
256 silc_list_count(schedule->free_tasks)) {
257 SILC_SCHEDULE_UNLOCK(schedule);
258 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
263 c = silc_list_count(schedule->free_tasks) / 2;
264 if (c > silc_list_count(schedule->timeout_queue))
265 c = (silc_list_count(schedule->free_tasks) -
266 silc_list_count(schedule->timeout_queue));
267 if (silc_list_count(schedule->free_tasks) - c < 10)
268 c -= (10 - (silc_list_count(schedule->free_tasks) - c));
270 SILC_LOG_DEBUG(("Freeing %d unused tasks, leaving %d", c,
271 silc_list_count(schedule->free_tasks) - c));
273 silc_list_start(schedule->free_tasks);
274 while ((t = silc_list_get(schedule->free_tasks)) && c-- > 0) {
275 silc_list_del(schedule->free_tasks, t);
278 silc_list_start(schedule->free_tasks);
280 SILC_SCHEDULE_UNLOCK(schedule);
282 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
286 #ifdef SILC_DIST_INPLACE
287 /* Print schedule statistics to stdout */
289 void silc_schedule_stats(SilcSchedule schedule)
292 fprintf(stdout, "Schedule %p statistics:\n\n", schedule);
293 fprintf(stdout, "Num FD tasks : %lu (%lu bytes allocated)\n",
294 silc_hash_table_count(schedule->fd_queue),
295 sizeof(*ftask) * silc_hash_table_count(schedule->fd_queue));
296 fprintf(stdout, "Num Timeout tasks : %d (%d bytes allocated)\n",
297 silc_list_count(schedule->timeout_queue),
298 sizeof(struct SilcTaskTimeoutStruct) *
299 silc_list_count(schedule->timeout_queue));
300 fprintf(stdout, "Num Timeout freelist : %d (%d bytes allocated)\n",
301 silc_list_count(schedule->free_tasks),
302 sizeof(struct SilcTaskTimeoutStruct) *
303 silc_list_count(schedule->free_tasks));
305 #endif /* SILC_DIST_INPLACE */
307 /****************************** Public API **********************************/
309 /* Initializes the scheduler. This returns the scheduler context that
310 is given as arugment usually to all silc_schedule_* functions.
311 The `max_tasks' indicates the number of maximum tasks that the
312 scheduler can handle. The `app_context' is application specific
313 context that is delivered to task callbacks. */
315 SilcSchedule silc_schedule_init(int max_tasks, void *app_context)
317 SilcSchedule schedule;
319 SILC_LOG_DEBUG(("Initializing scheduler"));
321 schedule = silc_calloc(1, sizeof(*schedule));
326 silc_hash_table_alloc(0, silc_hash_uint, NULL, NULL, NULL,
327 silc_schedule_fd_destructor, NULL, TRUE);
328 if (!schedule->fd_queue) {
333 silc_list_init(schedule->timeout_queue, struct SilcTaskStruct, next);
334 silc_list_init(schedule->free_tasks, struct SilcTaskStruct, next);
336 schedule->app_context = app_context;
337 schedule->valid = TRUE;
338 schedule->max_tasks = max_tasks;
340 /* Allocate scheduler lock */
341 silc_mutex_alloc(&schedule->lock);
343 /* Initialize the platform specific scheduler. */
344 schedule->internal = schedule_ops.init(schedule, app_context);
345 if (!schedule->internal) {
346 silc_hash_table_free(schedule->fd_queue);
347 silc_mutex_free(schedule->lock);
352 /* Timeout freelist garbage collection */
353 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
359 /* Uninitializes the schedule. This is called when the program is ready
360 to end. This removes all tasks and task queues. Returns FALSE if the
361 scheduler could not be uninitialized. This happens when the scheduler
362 is still valid and silc_schedule_stop has not been called. */
364 SilcBool silc_schedule_uninit(SilcSchedule schedule)
368 SILC_LOG_DEBUG(("Uninitializing scheduler"));
370 if (schedule->valid == TRUE)
373 /* Dispatch all timeouts before going away */
374 SILC_SCHEDULE_LOCK(schedule);
375 silc_schedule_dispatch_timeout(schedule, TRUE);
376 SILC_SCHEDULE_UNLOCK(schedule);
378 /* Deliver signals before going away */
379 if (schedule->signal_tasks) {
380 schedule_ops.signals_call(schedule, schedule->internal);
381 schedule->signal_tasks = FALSE;
384 /* Unregister all tasks */
385 silc_schedule_task_del(schedule, SILC_ALL_TASKS);
386 silc_schedule_task_remove(schedule, SILC_ALL_TASKS);
388 /* Delete timeout task freelist */
389 silc_list_start(schedule->free_tasks);
390 while ((task = silc_list_get(schedule->free_tasks)))
393 /* Unregister all task queues */
394 silc_hash_table_free(schedule->fd_queue);
396 /* Uninit the platform specific scheduler. */
397 schedule_ops.uninit(schedule, schedule->internal);
399 silc_mutex_free(schedule->lock);
405 /* Stops the schedule even if it is not supposed to be stopped yet.
406 After calling this, one should call silc_schedule_uninit (after the
407 silc_schedule has returned). */
409 void silc_schedule_stop(SilcSchedule schedule)
411 SILC_LOG_DEBUG(("Stopping scheduler"));
412 SILC_SCHEDULE_LOCK(schedule);
413 schedule->valid = FALSE;
414 SILC_SCHEDULE_UNLOCK(schedule);
417 /* Runs the scheduler once and then returns. Must be called locked. */
419 static SilcBool silc_schedule_iterate(SilcSchedule schedule, int timeout_usecs)
421 struct timeval timeout;
425 SILC_LOG_DEBUG(("In scheduler loop"));
427 /* Deliver signals if any has been set to be called */
428 if (silc_unlikely(schedule->signal_tasks)) {
429 SILC_SCHEDULE_UNLOCK(schedule);
430 schedule_ops.signals_call(schedule, schedule->internal);
431 schedule->signal_tasks = FALSE;
432 SILC_SCHEDULE_LOCK(schedule);
435 /* Check if scheduler is valid */
436 if (silc_unlikely(schedule->valid == FALSE)) {
437 SILC_LOG_DEBUG(("Scheduler not valid anymore, exiting"));
441 /* Calculate next timeout for silc_select(). This is the timeout value
442 when at earliest some of the timeout tasks expire. This may dispatch
443 already expired timeouts. */
444 silc_schedule_select_timeout(schedule);
446 /* Check if scheduler is valid */
447 if (silc_unlikely(schedule->valid == FALSE)) {
448 SILC_LOG_DEBUG(("Scheduler not valid anymore, exiting"));
452 if (timeout_usecs >= 0) {
454 timeout.tv_usec = timeout_usecs;
455 schedule->timeout = timeout;
456 schedule->has_timeout = TRUE;
459 /* This is the main silc_select(). The program blocks here until some
460 of the selected file descriptors change status or the selected
462 SILC_LOG_DEBUG(("Select"));
463 ret = schedule_ops.schedule(schedule, schedule->internal);
465 if (silc_likely(ret == 0)) {
467 SILC_LOG_DEBUG(("Running timeout tasks"));
468 if (silc_likely(silc_list_count(schedule->timeout_queue)))
469 silc_schedule_dispatch_timeout(schedule, FALSE);
472 } else if (silc_likely(ret > 0)) {
473 /* There is some data available now */
474 SILC_LOG_DEBUG(("Running fd tasks"));
475 silc_schedule_dispatch_fd(schedule);
477 /* If timeout was very short, dispatch also timeout tasks */
478 if (schedule->has_timeout && schedule->timeout.tv_sec == 0 &&
479 schedule->timeout.tv_usec < 50000)
480 silc_schedule_dispatch_timeout(schedule, FALSE);
484 /* Error or special case handling */
490 SILC_LOG_ERROR(("Error in select()/poll(): %s", strerror(errno)));
493 } while (timeout_usecs == -1);
498 /* Runs the scheduler once and then returns. */
500 SilcBool silc_schedule_one(SilcSchedule schedule, int timeout_usecs)
503 SILC_SCHEDULE_LOCK(schedule);
504 ret = silc_schedule_iterate(schedule, timeout_usecs);
505 SILC_SCHEDULE_UNLOCK(schedule);
509 /* Runs the scheduler and blocks here. When this returns the scheduler
513 void silc_schedule(SilcSchedule schedule)
515 SILC_LOG_DEBUG(("Running scheduler"));
517 /* Start the scheduler loop */
518 SILC_SCHEDULE_LOCK(schedule);
519 silc_schedule_iterate(schedule, -1);
520 SILC_SCHEDULE_UNLOCK(schedule);
522 #endif /* !SILC_SYMBIAN */
524 /* Wakes up the scheduler. This is used only in multi-threaded
525 environments where threads may add new tasks or remove old tasks
526 from task queues. This is called to wake up the scheduler in the
527 main thread so that it detects the changes in the task queues.
528 If threads support is not compiled in this function has no effect.
529 Implementation of this function is platform specific. */
531 void silc_schedule_wakeup(SilcSchedule schedule)
534 SILC_LOG_DEBUG(("Wakeup scheduler"));
535 SILC_SCHEDULE_LOCK(schedule);
536 schedule_ops.wakeup(schedule, schedule->internal);
537 SILC_SCHEDULE_UNLOCK(schedule);
541 /* Returns the application specific context that was saved into the
542 scheduler in silc_schedule_init function. The context is also
543 returned to application in task callback functions, but this function
544 may be used to get it as well if needed. */
546 void *silc_schedule_get_context(SilcSchedule schedule)
548 return schedule->app_context;
551 /* Set notify callback */
553 void silc_schedule_set_notify(SilcSchedule schedule,
554 SilcTaskNotifyCb notify, void *context)
556 schedule->notify = notify;
557 schedule->notify_context = context;
560 /* Add new task to the scheduler */
562 SilcTask silc_schedule_task_add(SilcSchedule schedule, SilcUInt32 fd,
563 SilcTaskCallback callback, void *context,
564 long seconds, long useconds,
567 SilcTask task = NULL;
569 if (silc_unlikely(!schedule->valid))
572 SILC_SCHEDULE_LOCK(schedule);
574 if (silc_likely(type == SILC_TASK_TIMEOUT)) {
575 SilcTaskTimeout tmp, prev, ttask;
578 silc_list_start(schedule->free_tasks);
579 ttask = silc_list_get(schedule->free_tasks);
580 if (silc_unlikely(!ttask)) {
581 ttask = silc_calloc(1, sizeof(*ttask));
582 if (silc_unlikely(!ttask))
585 silc_list_del(schedule->free_tasks, ttask);
587 ttask->header.type = 1;
588 ttask->header.callback = callback;
589 ttask->header.context = context;
590 ttask->header.valid = TRUE;
593 silc_gettimeofday(&ttask->timeout);
594 if ((seconds + useconds) > 0) {
595 ttask->timeout.tv_sec += seconds + (useconds / 1000000L);
596 ttask->timeout.tv_usec += (useconds % 1000000L);
597 if (ttask->timeout.tv_usec >= 1000000L) {
598 ttask->timeout.tv_sec += 1;
599 ttask->timeout.tv_usec -= 1000000L;
603 SILC_LOG_DEBUG(("New timeout task %p: sec=%d, usec=%d", ttask,
606 /* Add task to correct spot so that the first task in the list has
607 the earliest timeout. */
608 list = schedule->timeout_queue;
609 silc_list_start(list);
611 while ((tmp = silc_list_get(list)) != SILC_LIST_END) {
612 /* If we have shorter timeout, we have found our spot */
613 if (silc_compare_timeval(&ttask->timeout, &tmp->timeout) < 0) {
614 silc_list_insert(schedule->timeout_queue, prev, ttask);
620 silc_list_add(schedule->timeout_queue, ttask);
622 task = (SilcTask)ttask;
624 /* Call notify callback */
625 if (schedule->notify)
626 schedule->notify(schedule, TRUE, task, FALSE, 0, 0, seconds, useconds,
627 schedule->notify_context);
629 } else if (silc_likely(type == SILC_TASK_FD)) {
632 /* Check if fd is already added */
633 if (silc_unlikely(silc_hash_table_find(schedule->fd_queue,
635 NULL, (void *)&task))) {
639 /* Remove invalid task. We must have unique fd key to hash table. */
640 silc_schedule_task_remove(schedule, task);
643 /* Check max tasks */
644 if (silc_unlikely(schedule->max_tasks > 0 &&
645 silc_hash_table_count(schedule->fd_queue) >=
646 schedule->max_tasks)) {
647 SILC_LOG_WARNING(("Scheduler task limit reached: cannot add new task"));
652 ftask = silc_calloc(1, sizeof(*ftask));
653 if (silc_unlikely(!ftask)) {
658 SILC_LOG_DEBUG(("New fd task %p fd=%d", ftask, fd));
660 ftask->header.type = 0;
661 ftask->header.callback = callback;
662 ftask->header.context = context;
663 ftask->header.valid = TRUE;
664 ftask->events = SILC_TASK_READ;
667 /* Add task and schedule it */
668 if (!silc_hash_table_add(schedule->fd_queue, SILC_32_TO_PTR(fd), ftask)) {
673 if (!schedule_ops.schedule_fd(schedule, schedule->internal,
674 ftask, ftask->events)) {
675 silc_hash_table_del(schedule->fd_queue, SILC_32_TO_PTR(fd));
680 task = (SilcTask)ftask;
682 /* Call notify callback */
683 if (schedule->notify)
684 schedule->notify(schedule, TRUE, task, TRUE, ftask->fd,
685 SILC_TASK_READ, 0, 0, schedule->notify_context);
687 } else if (silc_unlikely(type == SILC_TASK_SIGNAL)) {
688 SILC_SCHEDULE_UNLOCK(schedule);
689 schedule_ops.signal_register(schedule, schedule->internal, fd,
695 SILC_SCHEDULE_UNLOCK(schedule);
698 /* On symbian we wakeup scheduler immediately after adding timeout task
699 in case the task is added outside the scheduler loop (in some active
701 if (task && task->type == 1)
702 silc_schedule_wakeup(schedule);
703 #endif /* SILC_SYMBIAN */
708 /* Invalidates task */
710 SilcBool silc_schedule_task_del(SilcSchedule schedule, SilcTask task)
712 if (silc_unlikely(task == SILC_ALL_TASKS)) {
713 SilcHashTableList htl;
715 SILC_LOG_DEBUG(("Unregister all tasks"));
717 SILC_SCHEDULE_LOCK(schedule);
719 /* Delete from fd queue */
720 silc_hash_table_list(schedule->fd_queue, &htl);
721 while (silc_hash_table_get(&htl, NULL, (void *)&task)) {
724 /* Call notify callback */
725 if (schedule->notify)
726 schedule->notify(schedule, FALSE, task, TRUE,
727 ((SilcTaskFd)task)->fd, 0, 0, 0,
728 schedule->notify_context);
730 silc_hash_table_list_reset(&htl);
732 /* Delete from timeout queue */
733 silc_list_start(schedule->timeout_queue);
734 while ((task = (SilcTask)silc_list_get(schedule->timeout_queue))) {
737 /* Call notify callback */
738 if (schedule->notify)
739 schedule->notify(schedule, FALSE, task, FALSE, 0, 0, 0, 0,
740 schedule->notify_context);
743 SILC_SCHEDULE_UNLOCK(schedule);
747 SILC_LOG_DEBUG(("Unregistering task %p", task));
748 SILC_SCHEDULE_LOCK(schedule);
751 /* Call notify callback */
752 if (schedule->notify)
753 schedule->notify(schedule, FALSE, task, !task->type, 0, 0, 0, 0,
754 schedule->notify_context);
755 SILC_SCHEDULE_UNLOCK(schedule);
760 /* Invalidate task by fd */
762 SilcBool silc_schedule_task_del_by_fd(SilcSchedule schedule, SilcUInt32 fd)
764 SilcTask task = NULL;
765 SilcBool ret = FALSE;
767 SILC_LOG_DEBUG(("Unregister task by fd %d", fd));
769 SILC_SCHEDULE_LOCK(schedule);
771 /* fd is unique, so there is only one task with this fd in the table */
772 if (silc_likely(silc_hash_table_find(schedule->fd_queue,
773 SILC_32_TO_PTR(fd), NULL,
775 SILC_LOG_DEBUG(("Deleting task %p", task));
778 /* Call notify callback */
779 if (schedule->notify)
780 schedule->notify(schedule, FALSE, task, TRUE, fd, 0, 0, 0,
781 schedule->notify_context);
785 SILC_SCHEDULE_UNLOCK(schedule);
787 /* If it is signal, remove it */
788 if (silc_unlikely(!task)) {
789 schedule_ops.signal_unregister(schedule, schedule->internal, fd);
796 /* Invalidate task by task callback. */
798 SilcBool silc_schedule_task_del_by_callback(SilcSchedule schedule,
799 SilcTaskCallback callback)
802 SilcHashTableList htl;
804 SilcBool ret = FALSE;
806 SILC_LOG_DEBUG(("Unregister task by callback"));
808 SILC_SCHEDULE_LOCK(schedule);
810 /* Delete from fd queue */
811 silc_hash_table_list(schedule->fd_queue, &htl);
812 while (silc_hash_table_get(&htl, NULL, (void *)&task)) {
813 if (task->callback == callback) {
816 /* Call notify callback */
817 if (schedule->notify)
818 schedule->notify(schedule, FALSE, task, TRUE,
819 ((SilcTaskFd)task)->fd, 0, 0, 0,
820 schedule->notify_context);
824 silc_hash_table_list_reset(&htl);
826 /* Delete from timeout queue */
827 list = schedule->timeout_queue;
828 silc_list_start(list);
829 while ((task = (SilcTask)silc_list_get(list))) {
830 if (task->callback == callback) {
833 /* Call notify callback */
834 if (schedule->notify)
835 schedule->notify(schedule, FALSE, task, FALSE, 0, 0, 0, 0,
836 schedule->notify_context);
841 SILC_SCHEDULE_UNLOCK(schedule);
846 /* Invalidate task by context. */
848 SilcBool silc_schedule_task_del_by_context(SilcSchedule schedule,
852 SilcHashTableList htl;
854 SilcBool ret = FALSE;
856 SILC_LOG_DEBUG(("Unregister task by context"));
858 SILC_SCHEDULE_LOCK(schedule);
860 /* Delete from fd queue */
861 silc_hash_table_list(schedule->fd_queue, &htl);
862 while (silc_hash_table_get(&htl, NULL, (void *)&task)) {
863 if (task->context == context) {
866 /* Call notify callback */
867 if (schedule->notify)
868 schedule->notify(schedule, FALSE, task, TRUE,
869 ((SilcTaskFd)task)->fd, 0, 0, 0,
870 schedule->notify_context);
874 silc_hash_table_list_reset(&htl);
876 /* Delete from timeout queue */
877 list = schedule->timeout_queue;
878 silc_list_start(list);
879 while ((task = (SilcTask)silc_list_get(list))) {
880 if (task->context == context) {
883 /* Call notify callback */
884 if (schedule->notify)
885 schedule->notify(schedule, FALSE, task, FALSE, 0, 0, 0, 0,
886 schedule->notify_context);
891 SILC_SCHEDULE_UNLOCK(schedule);
896 /* Invalidate task by all */
898 SilcBool silc_schedule_task_del_by_all(SilcSchedule schedule, int fd,
899 SilcTaskCallback callback,
904 SilcBool ret = FALSE;
906 SILC_LOG_DEBUG(("Unregister task by fd, callback and context"));
908 /* For fd task, callback and context is irrelevant as fd is unique */
910 return silc_schedule_task_del_by_fd(schedule, fd);
912 SILC_SCHEDULE_LOCK(schedule);
914 /* Delete from timeout queue */
915 list = schedule->timeout_queue;
916 silc_list_start(list);
917 while ((task = (SilcTask)silc_list_get(list))) {
918 if (task->callback == callback && task->context == context) {
921 /* Call notify callback */
922 if (schedule->notify)
923 schedule->notify(schedule, FALSE, task, FALSE, 0, 0, 0, 0,
924 schedule->notify_context);
929 SILC_SCHEDULE_UNLOCK(schedule);
934 /* Sets a file descriptor to be listened by scheduler. One can call this
935 directly if wanted. This can be called multiple times for one file
936 descriptor to set different iomasks. */
938 SilcBool silc_schedule_set_listen_fd(SilcSchedule schedule, SilcUInt32 fd,
939 SilcTaskEvent mask, SilcBool send_events)
943 if (silc_unlikely(!schedule->valid))
946 SILC_SCHEDULE_LOCK(schedule);
948 if (silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fd),
949 NULL, (void *)&task)) {
950 if (!schedule_ops.schedule_fd(schedule, schedule->internal, task, mask)) {
951 SILC_SCHEDULE_UNLOCK(schedule);
955 if (silc_unlikely(send_events) && mask) {
956 task->revents = mask;
957 silc_schedule_dispatch_fd(schedule);
960 /* Call notify callback */
961 if (schedule->notify)
962 schedule->notify(schedule, TRUE, (SilcTask)task,
963 TRUE, task->fd, mask, 0, 0,
964 schedule->notify_context);
967 SILC_SCHEDULE_UNLOCK(schedule);
972 /* Returns the file descriptor's current requested event mask. */
974 SilcTaskEvent silc_schedule_get_fd_events(SilcSchedule schedule,
978 SilcTaskEvent event = 0;
980 if (silc_unlikely(!schedule->valid))
983 SILC_SCHEDULE_LOCK(schedule);
984 if (silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fd),
985 NULL, (void *)&task))
986 event = task->events;
987 SILC_SCHEDULE_UNLOCK(schedule);
992 /* Removes a file descriptor from listen list. */
994 void silc_schedule_unset_listen_fd(SilcSchedule schedule, SilcUInt32 fd)
996 silc_schedule_set_listen_fd(schedule, fd, 0, FALSE);