5 Author: Pekka Riikonen <priikone@silcnet.org>
7 Copyright (C) 1998 - 2007 Pekka Riikonen
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
23 /************************** Types and definitions ***************************/
25 /* Platform specific implementation */
26 extern const SilcScheduleOps schedule_ops;
28 static void silc_schedule_task_remove(SilcSchedule schedule, SilcTask task);
29 static void silc_schedule_dispatch_fd(SilcSchedule schedule);
30 static void silc_schedule_dispatch_timeout(SilcSchedule schedule,
31 SilcBool dispatch_all);
34 /************************ Static utility functions **************************/
36 /* Fd task hash table destructor */
38 static void silc_schedule_fd_destructor(void *key, void *context,
44 /* Executes file descriptor tasks. Invalid tasks are removed here. */
46 static void silc_schedule_dispatch_fd(SilcSchedule schedule)
51 /* The dispatch list includes only valid tasks, and tasks that have
52 something to dispatch. Dispatching is atomic; no matter if another
53 thread invalidates a task when we unlock, we dispatch to completion. */
54 SILC_SCHEDULE_UNLOCK(schedule);
55 silc_list_start(schedule->fd_dispatch);
56 while ((task = silc_list_get(schedule->fd_dispatch))) {
59 /* Is the task ready for reading */
60 if (task->revents & SILC_TASK_READ)
61 t->callback(schedule, schedule->app_context, SILC_TASK_READ,
62 task->fd, t->context);
64 /* Is the task ready for writing */
65 if (t->valid && task->revents & SILC_TASK_WRITE)
66 t->callback(schedule, schedule->app_context, SILC_TASK_WRITE,
67 task->fd, t->context);
69 SILC_SCHEDULE_LOCK(schedule);
71 /* Remove invalidated tasks */
72 silc_list_start(schedule->fd_dispatch);
73 while ((task = silc_list_get(schedule->fd_dispatch)))
74 if (silc_unlikely(!task->header.valid))
75 silc_schedule_task_remove(schedule, (SilcTask)task);
78 /* Executes all tasks whose timeout has expired. The task is removed from
79 the task queue after the callback function has returned. Also, invalid
80 tasks are removed here. */
82 static void silc_schedule_dispatch_timeout(SilcSchedule schedule,
83 SilcBool dispatch_all)
87 struct timeval curtime;
90 SILC_LOG_DEBUG(("Running timeout tasks"));
92 silc_gettimeofday(&curtime);
94 /* First task in the task queue has always the earliest timeout. */
95 silc_list_start(schedule->timeout_queue);
96 task = silc_list_get(schedule->timeout_queue);
97 if (silc_unlikely(!task))
102 /* Remove invalid task */
103 if (silc_unlikely(!t->valid)) {
104 silc_schedule_task_remove(schedule, t);
108 /* Execute the task if the timeout has expired */
109 if (silc_compare_timeval(&task->timeout, &curtime) > 0 && !dispatch_all)
113 SILC_SCHEDULE_UNLOCK(schedule);
114 t->callback(schedule, schedule->app_context, SILC_TASK_EXPIRE, 0,
116 SILC_SCHEDULE_LOCK(schedule);
118 /* Remove the expired task */
119 silc_schedule_task_remove(schedule, t);
121 /* Balance when we have lots of small timeouts */
122 if (silc_unlikely((++count) > 40))
124 } while (silc_likely((task = silc_list_get(schedule->timeout_queue))));
127 /* Calculates next timeout. This is the timeout value when at earliest some
128 of the timeout tasks expire. If this is in the past, they will be
131 static void silc_schedule_select_timeout(SilcSchedule schedule)
134 SilcTaskTimeout task;
135 struct timeval curtime;
136 SilcBool dispatch = TRUE;
138 /* Get the current time */
139 silc_gettimeofday(&curtime);
140 schedule->has_timeout = FALSE;
142 /* First task in the task queue has always the earliest timeout. */
143 silc_list_start(schedule->timeout_queue);
144 task = silc_list_get(schedule->timeout_queue);
145 if (silc_unlikely(!task))
150 /* Remove invalid task */
151 if (silc_unlikely(!t->valid)) {
152 silc_schedule_task_remove(schedule, t);
156 /* If the timeout is in past, we will run the task and all other
157 timeout tasks from the past. */
158 if (silc_compare_timeval(&task->timeout, &curtime) <= 0 && dispatch) {
159 silc_schedule_dispatch_timeout(schedule, FALSE);
160 if (silc_unlikely(!schedule->valid))
163 /* Start selecting new timeout again after dispatch */
164 silc_list_start(schedule->timeout_queue);
169 /* Calculate the next timeout */
170 curtime.tv_sec = task->timeout.tv_sec - curtime.tv_sec;
171 curtime.tv_usec = task->timeout.tv_usec - curtime.tv_usec;
172 if (curtime.tv_sec < 0)
175 /* We wouldn't want to go under zero, check for it. */
176 if (curtime.tv_usec < 0) {
178 if (curtime.tv_sec < 0)
180 curtime.tv_usec += 1000000L;
183 } while ((task = silc_list_get(schedule->timeout_queue)));
185 /* Save the timeout */
187 schedule->timeout = curtime;
188 schedule->has_timeout = TRUE;
189 SILC_LOG_DEBUG(("timeout: sec=%d, usec=%d", schedule->timeout.tv_sec,
190 schedule->timeout.tv_usec));
194 /* Removes task from the scheduler. This must be called with scheduler
197 static void silc_schedule_task_remove(SilcSchedule schedule, SilcTask task)
201 if (silc_unlikely(task == SILC_ALL_TASKS)) {
203 SilcHashTableList htl;
206 /* Delete from fd queue */
207 silc_hash_table_list(schedule->fd_queue, &htl);
208 while (silc_hash_table_get(&htl, (void *)&fd, (void *)&task))
209 silc_hash_table_del(schedule->fd_queue, SILC_32_TO_PTR(fd));
210 silc_hash_table_list_reset(&htl);
212 /* Delete from timeout queue */
213 silc_list_start(schedule->timeout_queue);
214 while ((task = silc_list_get(schedule->timeout_queue))) {
215 silc_list_del(schedule->timeout_queue, task);
222 if (silc_likely(task->type == 1)) {
223 /* Delete from timeout queue */
224 silc_list_del(schedule->timeout_queue, task);
226 /* Put to free list */
227 silc_list_add(schedule->free_tasks, task);
229 /* Delete from fd queue */
230 ftask = (SilcTaskFd)task;
231 silc_hash_table_del(schedule->fd_queue, SILC_32_TO_PTR(ftask->fd));
235 /* Timeout freelist garbage collection */
237 SILC_TASK_CALLBACK(silc_schedule_timeout_gc)
242 if (!schedule->valid)
245 SILC_LOG_DEBUG(("Timeout freelist garbage collection"));
247 SILC_SCHEDULE_LOCK(schedule);
249 if (silc_list_count(schedule->free_tasks) <= 10) {
250 SILC_SCHEDULE_UNLOCK(schedule);
251 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
255 if (silc_list_count(schedule->timeout_queue) >
256 silc_list_count(schedule->free_tasks)) {
257 SILC_SCHEDULE_UNLOCK(schedule);
258 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
263 c = silc_list_count(schedule->free_tasks) / 2;
264 if (c > silc_list_count(schedule->timeout_queue))
265 c = (silc_list_count(schedule->free_tasks) -
266 silc_list_count(schedule->timeout_queue));
267 if (silc_list_count(schedule->free_tasks) - c < 10)
268 c -= (10 - (silc_list_count(schedule->free_tasks) - c));
270 SILC_LOG_DEBUG(("Freeing %d unused tasks, leaving %d", c,
271 silc_list_count(schedule->free_tasks) - c));
273 silc_list_start(schedule->free_tasks);
274 while ((t = silc_list_get(schedule->free_tasks)) && c-- > 0) {
275 silc_list_del(schedule->free_tasks, t);
278 silc_list_start(schedule->free_tasks);
280 SILC_SCHEDULE_UNLOCK(schedule);
282 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
286 #ifdef SILC_DIST_INPLACE
287 /* Print schedule statistics to stdout */
289 void silc_schedule_stats(SilcSchedule schedule)
292 fprintf(stdout, "Schedule %p statistics:\n\n", schedule);
293 fprintf(stdout, "Num FD tasks : %lu (%lu bytes allocated)\n",
294 silc_hash_table_count(schedule->fd_queue),
295 sizeof(*ftask) * silc_hash_table_count(schedule->fd_queue));
296 fprintf(stdout, "Num Timeout tasks : %d (%d bytes allocated)\n",
297 silc_list_count(schedule->timeout_queue),
298 sizeof(struct SilcTaskTimeoutStruct) *
299 silc_list_count(schedule->timeout_queue));
300 fprintf(stdout, "Num Timeout freelist : %d (%d bytes allocated)\n",
301 silc_list_count(schedule->free_tasks),
302 sizeof(struct SilcTaskTimeoutStruct) *
303 silc_list_count(schedule->free_tasks));
305 #endif /* SILC_DIST_INPLACE */
307 /****************************** Public API **********************************/
309 /* Initializes the scheduler. This returns the scheduler context that
310 is given as arugment usually to all silc_schedule_* functions.
311 The `max_tasks' indicates the number of maximum tasks that the
312 scheduler can handle. The `app_context' is application specific
313 context that is delivered to task callbacks. */
315 SilcSchedule silc_schedule_init(int max_tasks, void *app_context,
318 SilcSchedule schedule;
320 stack = silc_stack_alloc(0, stack);
324 /* Allocate scheduler from the stack */
325 schedule = silc_scalloc(stack, 1, sizeof(*schedule));
329 SILC_LOG_DEBUG(("Initializing scheduler %p", schedule));
331 /* Allocate Fd task hash table dynamically */
333 silc_hash_table_alloc(NULL, 0, silc_hash_uint, NULL, NULL, NULL,
334 silc_schedule_fd_destructor, NULL, TRUE);
335 if (!schedule->fd_queue) {
336 silc_stack_free(stack);
340 silc_list_init(schedule->timeout_queue, struct SilcTaskStruct, next);
341 silc_list_init(schedule->free_tasks, struct SilcTaskStruct, next);
343 schedule->stack = stack;
344 schedule->app_context = app_context;
345 schedule->valid = TRUE;
346 schedule->max_tasks = max_tasks;
348 /* Allocate scheduler lock */
349 silc_mutex_alloc(&schedule->lock);
351 /* Initialize the platform specific scheduler. */
352 schedule->internal = schedule_ops.init(schedule, app_context);
353 if (!schedule->internal) {
354 silc_hash_table_free(schedule->fd_queue);
355 silc_mutex_free(schedule->lock);
356 silc_stack_free(stack);
360 /* Timeout freelist garbage collection */
361 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
367 /* Uninitializes the schedule. This is called when the program is ready
368 to end. This removes all tasks and task queues. Returns FALSE if the
369 scheduler could not be uninitialized. This happens when the scheduler
370 is still valid and silc_schedule_stop has not been called. */
372 SilcBool silc_schedule_uninit(SilcSchedule schedule)
376 SILC_LOG_DEBUG(("Uninitializing scheduler %p", schedule));
378 if (schedule->valid == TRUE)
381 /* Dispatch all timeouts before going away */
382 SILC_SCHEDULE_LOCK(schedule);
383 silc_schedule_dispatch_timeout(schedule, TRUE);
384 SILC_SCHEDULE_UNLOCK(schedule);
386 /* Deliver signals before going away */
387 if (schedule->signal_tasks) {
388 schedule_ops.signals_call(schedule, schedule->internal);
389 schedule->signal_tasks = FALSE;
392 /* Unregister all tasks */
393 silc_schedule_task_del(schedule, SILC_ALL_TASKS);
394 silc_schedule_task_remove(schedule, SILC_ALL_TASKS);
396 /* Delete timeout task freelist */
397 silc_list_start(schedule->free_tasks);
398 while ((task = silc_list_get(schedule->free_tasks)))
401 /* Unregister all task queues */
402 silc_hash_table_free(schedule->fd_queue);
404 /* Uninit the platform specific scheduler. */
405 schedule_ops.uninit(schedule, schedule->internal);
407 silc_mutex_free(schedule->lock);
408 silc_stack_free(schedule->stack);
413 /* Stops the schedule even if it is not supposed to be stopped yet.
414 After calling this, one should call silc_schedule_uninit (after the
415 silc_schedule has returned). */
417 void silc_schedule_stop(SilcSchedule schedule)
419 SILC_LOG_DEBUG(("Stopping scheduler"));
420 SILC_SCHEDULE_LOCK(schedule);
421 schedule->valid = FALSE;
422 SILC_SCHEDULE_UNLOCK(schedule);
425 /* Runs the scheduler once and then returns. Must be called locked. */
427 static SilcBool silc_schedule_iterate(SilcSchedule schedule, int timeout_usecs)
429 struct timeval timeout;
433 SILC_LOG_DEBUG(("In scheduler loop"));
435 /* Deliver signals if any has been set to be called */
436 if (silc_unlikely(schedule->signal_tasks)) {
437 SILC_SCHEDULE_UNLOCK(schedule);
438 schedule_ops.signals_call(schedule, schedule->internal);
439 schedule->signal_tasks = FALSE;
440 SILC_SCHEDULE_LOCK(schedule);
443 /* Check if scheduler is valid */
444 if (silc_unlikely(schedule->valid == FALSE)) {
445 SILC_LOG_DEBUG(("Scheduler not valid anymore, exiting"));
449 /* Calculate next timeout for silc_select(). This is the timeout value
450 when at earliest some of the timeout tasks expire. This may dispatch
451 already expired timeouts. */
452 silc_schedule_select_timeout(schedule);
454 /* Check if scheduler is valid */
455 if (silc_unlikely(schedule->valid == FALSE)) {
456 SILC_LOG_DEBUG(("Scheduler not valid anymore, exiting"));
460 if (timeout_usecs >= 0) {
462 timeout.tv_usec = timeout_usecs;
463 schedule->timeout = timeout;
464 schedule->has_timeout = TRUE;
467 /* This is the main silc_select(). The program blocks here until some
468 of the selected file descriptors change status or the selected
470 SILC_LOG_DEBUG(("Select"));
471 ret = schedule_ops.schedule(schedule, schedule->internal);
473 if (silc_likely(ret == 0)) {
475 SILC_LOG_DEBUG(("Running timeout tasks"));
476 if (silc_likely(silc_list_count(schedule->timeout_queue)))
477 silc_schedule_dispatch_timeout(schedule, FALSE);
480 } else if (silc_likely(ret > 0)) {
481 /* There is some data available now */
482 SILC_LOG_DEBUG(("Running fd tasks"));
483 silc_schedule_dispatch_fd(schedule);
485 /* If timeout was very short, dispatch also timeout tasks */
486 if (schedule->has_timeout && schedule->timeout.tv_sec == 0 &&
487 schedule->timeout.tv_usec < 50000)
488 silc_schedule_dispatch_timeout(schedule, FALSE);
492 /* Error or special case handling */
498 SILC_LOG_ERROR(("Error in select()/poll(): %s", strerror(errno)));
501 } while (timeout_usecs == -1);
506 /* Runs the scheduler once and then returns. */
508 SilcBool silc_schedule_one(SilcSchedule schedule, int timeout_usecs)
511 SILC_SCHEDULE_LOCK(schedule);
512 ret = silc_schedule_iterate(schedule, timeout_usecs);
513 SILC_SCHEDULE_UNLOCK(schedule);
517 /* Runs the scheduler and blocks here. When this returns the scheduler
521 void silc_schedule(SilcSchedule schedule)
523 SILC_LOG_DEBUG(("Running scheduler"));
525 /* Start the scheduler loop */
526 SILC_SCHEDULE_LOCK(schedule);
527 silc_schedule_iterate(schedule, -1);
528 SILC_SCHEDULE_UNLOCK(schedule);
530 #endif /* !SILC_SYMBIAN */
532 /* Wakes up the scheduler. This is used only in multi-threaded
533 environments where threads may add new tasks or remove old tasks
534 from task queues. This is called to wake up the scheduler in the
535 main thread so that it detects the changes in the task queues.
536 If threads support is not compiled in this function has no effect.
537 Implementation of this function is platform specific. */
539 void silc_schedule_wakeup(SilcSchedule schedule)
542 SILC_LOG_DEBUG(("Wakeup scheduler"));
543 SILC_SCHEDULE_LOCK(schedule);
544 schedule_ops.wakeup(schedule, schedule->internal);
545 SILC_SCHEDULE_UNLOCK(schedule);
549 /* Returns the application specific context that was saved into the
550 scheduler in silc_schedule_init function. The context is also
551 returned to application in task callback functions, but this function
552 may be used to get it as well if needed. */
554 void *silc_schedule_get_context(SilcSchedule schedule)
556 return schedule->app_context;
559 /* Return the stack of the scheduler */
561 SilcStack silc_schedule_get_stack(SilcSchedule schedule)
563 return schedule->stack;
566 /* Set notify callback */
568 void silc_schedule_set_notify(SilcSchedule schedule,
569 SilcTaskNotifyCb notify, void *context)
571 schedule->notify = notify;
572 schedule->notify_context = context;
575 /* Add new task to the scheduler */
577 SilcTask silc_schedule_task_add(SilcSchedule schedule, SilcUInt32 fd,
578 SilcTaskCallback callback, void *context,
579 long seconds, long useconds,
582 SilcTask task = NULL;
584 if (silc_unlikely(!schedule->valid))
587 SILC_SCHEDULE_LOCK(schedule);
589 if (silc_likely(type == SILC_TASK_TIMEOUT)) {
590 SilcTaskTimeout tmp, prev, ttask;
593 silc_list_start(schedule->free_tasks);
594 ttask = silc_list_get(schedule->free_tasks);
595 if (silc_unlikely(!ttask)) {
596 ttask = silc_calloc(1, sizeof(*ttask));
597 if (silc_unlikely(!ttask))
600 silc_list_del(schedule->free_tasks, ttask);
602 ttask->header.type = 1;
603 ttask->header.callback = callback;
604 ttask->header.context = context;
605 ttask->header.valid = TRUE;
608 silc_gettimeofday(&ttask->timeout);
609 if ((seconds + useconds) > 0) {
610 ttask->timeout.tv_sec += seconds + (useconds / 1000000L);
611 ttask->timeout.tv_usec += (useconds % 1000000L);
612 if (ttask->timeout.tv_usec >= 1000000L) {
613 ttask->timeout.tv_sec += 1;
614 ttask->timeout.tv_usec -= 1000000L;
618 SILC_LOG_DEBUG(("New timeout task %p: sec=%d, usec=%d", ttask,
621 /* Add task to correct spot so that the first task in the list has
622 the earliest timeout. */
623 list = schedule->timeout_queue;
624 silc_list_start(list);
626 while ((tmp = silc_list_get(list)) != SILC_LIST_END) {
627 /* If we have shorter timeout, we have found our spot */
628 if (silc_compare_timeval(&ttask->timeout, &tmp->timeout) < 0) {
629 silc_list_insert(schedule->timeout_queue, prev, ttask);
635 silc_list_add(schedule->timeout_queue, ttask);
637 task = (SilcTask)ttask;
639 /* Call notify callback */
640 if (schedule->notify)
641 schedule->notify(schedule, TRUE, task, FALSE, 0, 0, seconds, useconds,
642 schedule->notify_context);
644 } else if (silc_likely(type == SILC_TASK_FD)) {
647 /* Check if fd is already added */
648 if (silc_unlikely(silc_hash_table_find(schedule->fd_queue,
650 NULL, (void *)&task))) {
654 /* Remove invalid task. We must have unique fd key to hash table. */
655 silc_schedule_task_remove(schedule, task);
658 /* Check max tasks */
659 if (silc_unlikely(schedule->max_tasks > 0 &&
660 silc_hash_table_count(schedule->fd_queue) >=
661 schedule->max_tasks)) {
662 SILC_LOG_WARNING(("Scheduler task limit reached: cannot add new task"));
667 ftask = silc_calloc(1, sizeof(*ftask));
668 if (silc_unlikely(!ftask)) {
673 SILC_LOG_DEBUG(("New fd task %p fd=%d", ftask, fd));
675 ftask->header.type = 0;
676 ftask->header.callback = callback;
677 ftask->header.context = context;
678 ftask->header.valid = TRUE;
679 ftask->events = SILC_TASK_READ;
682 /* Add task and schedule it */
683 if (!silc_hash_table_add(schedule->fd_queue, SILC_32_TO_PTR(fd), ftask)) {
688 if (!schedule_ops.schedule_fd(schedule, schedule->internal,
689 ftask, ftask->events)) {
690 silc_hash_table_del(schedule->fd_queue, SILC_32_TO_PTR(fd));
695 task = (SilcTask)ftask;
697 /* Call notify callback */
698 if (schedule->notify)
699 schedule->notify(schedule, TRUE, task, TRUE, ftask->fd,
700 SILC_TASK_READ, 0, 0, schedule->notify_context);
702 } else if (silc_unlikely(type == SILC_TASK_SIGNAL)) {
703 SILC_SCHEDULE_UNLOCK(schedule);
704 schedule_ops.signal_register(schedule, schedule->internal, fd,
710 SILC_SCHEDULE_UNLOCK(schedule);
713 /* On symbian we wakeup scheduler immediately after adding timeout task
714 in case the task is added outside the scheduler loop (in some active
716 if (task && task->type == 1)
717 silc_schedule_wakeup(schedule);
718 #endif /* SILC_SYMBIAN */
723 /* Invalidates task */
725 SilcBool silc_schedule_task_del(SilcSchedule schedule, SilcTask task)
727 if (silc_unlikely(task == SILC_ALL_TASKS)) {
728 SilcHashTableList htl;
730 SILC_LOG_DEBUG(("Unregister all tasks"));
732 SILC_SCHEDULE_LOCK(schedule);
734 /* Delete from fd queue */
735 silc_hash_table_list(schedule->fd_queue, &htl);
736 while (silc_hash_table_get(&htl, NULL, (void *)&task)) {
739 /* Call notify callback */
740 if (schedule->notify)
741 schedule->notify(schedule, FALSE, task, TRUE,
742 ((SilcTaskFd)task)->fd, 0, 0, 0,
743 schedule->notify_context);
745 silc_hash_table_list_reset(&htl);
747 /* Delete from timeout queue */
748 silc_list_start(schedule->timeout_queue);
749 while ((task = (SilcTask)silc_list_get(schedule->timeout_queue))) {
752 /* Call notify callback */
753 if (schedule->notify)
754 schedule->notify(schedule, FALSE, task, FALSE, 0, 0, 0, 0,
755 schedule->notify_context);
758 SILC_SCHEDULE_UNLOCK(schedule);
762 SILC_LOG_DEBUG(("Unregistering task %p", task));
763 SILC_SCHEDULE_LOCK(schedule);
766 /* Call notify callback */
767 if (schedule->notify)
768 schedule->notify(schedule, FALSE, task, !task->type, 0, 0, 0, 0,
769 schedule->notify_context);
770 SILC_SCHEDULE_UNLOCK(schedule);
775 /* Invalidate task by fd */
777 SilcBool silc_schedule_task_del_by_fd(SilcSchedule schedule, SilcUInt32 fd)
779 SilcTask task = NULL;
780 SilcBool ret = FALSE;
782 SILC_LOG_DEBUG(("Unregister task by fd %d", fd));
784 SILC_SCHEDULE_LOCK(schedule);
786 /* fd is unique, so there is only one task with this fd in the table */
787 if (silc_likely(silc_hash_table_find(schedule->fd_queue,
788 SILC_32_TO_PTR(fd), NULL,
790 SILC_LOG_DEBUG(("Deleting task %p", task));
793 /* Call notify callback */
794 if (schedule->notify)
795 schedule->notify(schedule, FALSE, task, TRUE, fd, 0, 0, 0,
796 schedule->notify_context);
800 SILC_SCHEDULE_UNLOCK(schedule);
802 /* If it is signal, remove it */
803 if (silc_unlikely(!task)) {
804 schedule_ops.signal_unregister(schedule, schedule->internal, fd);
811 /* Invalidate task by task callback. */
813 SilcBool silc_schedule_task_del_by_callback(SilcSchedule schedule,
814 SilcTaskCallback callback)
817 SilcHashTableList htl;
819 SilcBool ret = FALSE;
821 SILC_LOG_DEBUG(("Unregister task by callback"));
823 SILC_SCHEDULE_LOCK(schedule);
825 /* Delete from fd queue */
826 silc_hash_table_list(schedule->fd_queue, &htl);
827 while (silc_hash_table_get(&htl, NULL, (void *)&task)) {
828 if (task->callback == callback) {
831 /* Call notify callback */
832 if (schedule->notify)
833 schedule->notify(schedule, FALSE, task, TRUE,
834 ((SilcTaskFd)task)->fd, 0, 0, 0,
835 schedule->notify_context);
839 silc_hash_table_list_reset(&htl);
841 /* Delete from timeout queue */
842 list = schedule->timeout_queue;
843 silc_list_start(list);
844 while ((task = (SilcTask)silc_list_get(list))) {
845 if (task->callback == callback) {
848 /* Call notify callback */
849 if (schedule->notify)
850 schedule->notify(schedule, FALSE, task, FALSE, 0, 0, 0, 0,
851 schedule->notify_context);
856 SILC_SCHEDULE_UNLOCK(schedule);
861 /* Invalidate task by context. */
863 SilcBool silc_schedule_task_del_by_context(SilcSchedule schedule,
867 SilcHashTableList htl;
869 SilcBool ret = FALSE;
871 SILC_LOG_DEBUG(("Unregister task by context"));
873 SILC_SCHEDULE_LOCK(schedule);
875 /* Delete from fd queue */
876 silc_hash_table_list(schedule->fd_queue, &htl);
877 while (silc_hash_table_get(&htl, NULL, (void *)&task)) {
878 if (task->context == context) {
881 /* Call notify callback */
882 if (schedule->notify)
883 schedule->notify(schedule, FALSE, task, TRUE,
884 ((SilcTaskFd)task)->fd, 0, 0, 0,
885 schedule->notify_context);
889 silc_hash_table_list_reset(&htl);
891 /* Delete from timeout queue */
892 list = schedule->timeout_queue;
893 silc_list_start(list);
894 while ((task = (SilcTask)silc_list_get(list))) {
895 if (task->context == context) {
898 /* Call notify callback */
899 if (schedule->notify)
900 schedule->notify(schedule, FALSE, task, FALSE, 0, 0, 0, 0,
901 schedule->notify_context);
906 SILC_SCHEDULE_UNLOCK(schedule);
911 /* Invalidate task by all */
913 SilcBool silc_schedule_task_del_by_all(SilcSchedule schedule, int fd,
914 SilcTaskCallback callback,
919 SilcBool ret = FALSE;
921 SILC_LOG_DEBUG(("Unregister task by fd, callback and context"));
923 /* For fd task, callback and context is irrelevant as fd is unique */
925 return silc_schedule_task_del_by_fd(schedule, fd);
927 SILC_SCHEDULE_LOCK(schedule);
929 /* Delete from timeout queue */
930 list = schedule->timeout_queue;
931 silc_list_start(list);
932 while ((task = (SilcTask)silc_list_get(list))) {
933 if (task->callback == callback && task->context == context) {
936 /* Call notify callback */
937 if (schedule->notify)
938 schedule->notify(schedule, FALSE, task, FALSE, 0, 0, 0, 0,
939 schedule->notify_context);
944 SILC_SCHEDULE_UNLOCK(schedule);
949 /* Sets a file descriptor to be listened by scheduler. One can call this
950 directly if wanted. This can be called multiple times for one file
951 descriptor to set different iomasks. */
953 SilcBool silc_schedule_set_listen_fd(SilcSchedule schedule, SilcUInt32 fd,
954 SilcTaskEvent mask, SilcBool send_events)
958 if (silc_unlikely(!schedule->valid))
961 SILC_SCHEDULE_LOCK(schedule);
963 if (silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fd),
964 NULL, (void *)&task)) {
965 if (!schedule_ops.schedule_fd(schedule, schedule->internal, task, mask)) {
966 SILC_SCHEDULE_UNLOCK(schedule);
970 if (silc_unlikely(send_events) && mask) {
971 task->revents = mask;
972 silc_schedule_dispatch_fd(schedule);
975 /* Call notify callback */
976 if (schedule->notify)
977 schedule->notify(schedule, TRUE, (SilcTask)task,
978 TRUE, task->fd, mask, 0, 0,
979 schedule->notify_context);
982 SILC_SCHEDULE_UNLOCK(schedule);
987 /* Returns the file descriptor's current requested event mask. */
989 SilcTaskEvent silc_schedule_get_fd_events(SilcSchedule schedule,
993 SilcTaskEvent event = 0;
995 if (silc_unlikely(!schedule->valid))
998 SILC_SCHEDULE_LOCK(schedule);
999 if (silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fd),
1000 NULL, (void *)&task))
1001 event = task->events;
1002 SILC_SCHEDULE_UNLOCK(schedule);
1007 /* Removes a file descriptor from listen list. */
1009 void silc_schedule_unset_listen_fd(SilcSchedule schedule, SilcUInt32 fd)
1011 silc_schedule_set_listen_fd(schedule, fd, 0, FALSE);