5 Author: Pekka Riikonen <priikone@silcnet.org>
7 Copyright (C) 1998 - 2007 Pekka Riikonen
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
23 /************************** Types and definitions ***************************/
25 /* Platform specific implementation */
26 extern const SilcScheduleOps schedule_ops;
28 static void silc_schedule_task_remove(SilcSchedule schedule, SilcTask task);
29 static void silc_schedule_dispatch_fd(SilcSchedule schedule);
30 static void silc_schedule_dispatch_timeout(SilcSchedule schedule,
31 SilcBool dispatch_all);
34 /************************ Static utility functions **************************/
36 /* Fd task hash table destructor */
38 static void silc_schedule_fd_destructor(void *key, void *context,
44 /* Executes file descriptor tasks. Invalid tasks are removed here. */
46 static void silc_schedule_dispatch_fd(SilcSchedule schedule)
51 /* The dispatch list includes only valid tasks, and tasks that have
52 something to dispatch. Dispatching is atomic; no matter if another
53 thread invalidates a task when we unlock, we dispatch to completion. */
54 SILC_SCHEDULE_UNLOCK(schedule);
55 silc_list_start(schedule->fd_dispatch);
56 while ((task = silc_list_get(schedule->fd_dispatch))) {
59 /* Is the task ready for reading */
60 if (task->revents & SILC_TASK_READ)
61 t->callback(schedule, schedule->app_context, SILC_TASK_READ,
62 task->fd, t->context);
64 /* Is the task ready for writing */
65 if (t->valid && task->revents & SILC_TASK_WRITE)
66 t->callback(schedule, schedule->app_context, SILC_TASK_WRITE,
67 task->fd, t->context);
69 SILC_SCHEDULE_LOCK(schedule);
71 /* Remove invalidated tasks */
72 silc_list_start(schedule->fd_dispatch);
73 while ((task = silc_list_get(schedule->fd_dispatch)))
74 if (silc_unlikely(!task->header.valid))
75 silc_schedule_task_remove(schedule, (SilcTask)task);
78 /* Executes all tasks whose timeout has expired. The task is removed from
79 the task queue after the callback function has returned. Also, invalid
80 tasks are removed here. */
82 static void silc_schedule_dispatch_timeout(SilcSchedule schedule,
83 SilcBool dispatch_all)
87 struct timeval curtime;
90 SILC_LOG_DEBUG(("Running timeout tasks"));
92 silc_gettimeofday(&curtime);
94 /* First task in the task queue has always the earliest timeout. */
95 silc_list_start(schedule->timeout_queue);
96 task = silc_list_get(schedule->timeout_queue);
97 if (silc_unlikely(!task))
102 /* Remove invalid task */
103 if (silc_unlikely(!t->valid)) {
104 silc_schedule_task_remove(schedule, t);
108 /* Execute the task if the timeout has expired */
109 if (silc_compare_timeval(&task->timeout, &curtime) > 0 && !dispatch_all)
113 SILC_SCHEDULE_UNLOCK(schedule);
114 t->callback(schedule, schedule->app_context, SILC_TASK_EXPIRE, 0,
116 SILC_SCHEDULE_LOCK(schedule);
118 /* Remove the expired task */
119 silc_schedule_task_remove(schedule, t);
121 /* Balance when we have lots of small timeouts */
122 if (silc_unlikely((++count) > 40))
124 } while (silc_likely((task = silc_list_get(schedule->timeout_queue))));
127 /* Calculates next timeout. This is the timeout value when at earliest some
128 of the timeout tasks expire. If this is in the past, they will be
131 static void silc_schedule_select_timeout(SilcSchedule schedule)
134 SilcTaskTimeout task;
135 struct timeval curtime;
136 SilcBool dispatch = TRUE;
138 /* Get the current time */
139 silc_gettimeofday(&curtime);
140 schedule->has_timeout = FALSE;
142 /* First task in the task queue has always the earliest timeout. */
143 silc_list_start(schedule->timeout_queue);
144 task = silc_list_get(schedule->timeout_queue);
145 if (silc_unlikely(!task))
150 /* Remove invalid task */
151 if (silc_unlikely(!t->valid)) {
152 silc_schedule_task_remove(schedule, t);
156 /* If the timeout is in past, we will run the task and all other
157 timeout tasks from the past. */
158 if (silc_compare_timeval(&task->timeout, &curtime) <= 0 && dispatch) {
159 silc_schedule_dispatch_timeout(schedule, FALSE);
160 if (silc_unlikely(!schedule->valid))
163 /* Start selecting new timeout again after dispatch */
164 silc_list_start(schedule->timeout_queue);
169 /* Calculate the next timeout */
170 curtime.tv_sec = task->timeout.tv_sec - curtime.tv_sec;
171 curtime.tv_usec = task->timeout.tv_usec - curtime.tv_usec;
172 if (curtime.tv_sec < 0)
175 /* We wouldn't want to go under zero, check for it. */
176 if (curtime.tv_usec < 0) {
178 if (curtime.tv_sec < 0)
180 curtime.tv_usec += 1000000L;
183 } while ((task = silc_list_get(schedule->timeout_queue)));
185 /* Save the timeout */
187 schedule->timeout = curtime;
188 schedule->has_timeout = TRUE;
189 SILC_LOG_DEBUG(("timeout: sec=%d, usec=%d", schedule->timeout.tv_sec,
190 schedule->timeout.tv_usec));
194 /* Removes task from the scheduler. This must be called with scheduler
197 static void silc_schedule_task_remove(SilcSchedule schedule, SilcTask task)
201 if (silc_unlikely(task == SILC_ALL_TASKS)) {
203 SilcHashTableList htl;
206 /* Delete from fd queue */
207 silc_hash_table_list(schedule->fd_queue, &htl);
208 while (silc_hash_table_get(&htl, &fd, (void *)&task))
209 silc_hash_table_del(schedule->fd_queue, fd);
210 silc_hash_table_list_reset(&htl);
212 /* Delete from timeout queue */
213 silc_list_start(schedule->timeout_queue);
214 while ((task = silc_list_get(schedule->timeout_queue))) {
215 silc_list_del(schedule->timeout_queue, task);
222 if (silc_likely(task->type == 1)) {
223 /* Delete from timeout queue */
224 silc_list_del(schedule->timeout_queue, task);
226 /* Put to free list */
227 silc_list_add(schedule->free_tasks, task);
229 /* Delete from fd queue */
230 ftask = (SilcTaskFd)task;
231 silc_hash_table_del(schedule->fd_queue, SILC_32_TO_PTR(ftask->fd));
235 /* Timeout freelist garbage collection */
237 SILC_TASK_CALLBACK(silc_schedule_timeout_gc)
242 if (!schedule->valid)
245 SILC_LOG_DEBUG(("Timeout freelist garbage collection"));
247 SILC_SCHEDULE_LOCK(schedule);
249 if (silc_list_count(schedule->free_tasks) <= 10) {
250 SILC_SCHEDULE_UNLOCK(schedule);
251 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
255 if (silc_list_count(schedule->timeout_queue) >
256 silc_list_count(schedule->free_tasks)) {
257 SILC_SCHEDULE_UNLOCK(schedule);
258 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
263 c = silc_list_count(schedule->free_tasks) / 2;
264 if (c > silc_list_count(schedule->timeout_queue))
265 c = (silc_list_count(schedule->free_tasks) -
266 silc_list_count(schedule->timeout_queue));
267 if (silc_list_count(schedule->free_tasks) - c < 10)
268 c -= (10 - (silc_list_count(schedule->free_tasks) - c));
270 SILC_LOG_DEBUG(("Freeing %d unused tasks, leaving %d", c,
271 silc_list_count(schedule->free_tasks) - c));
273 silc_list_start(schedule->free_tasks);
274 while ((t = silc_list_get(schedule->free_tasks)) && c-- > 0) {
275 silc_list_del(schedule->free_tasks, t);
278 silc_list_start(schedule->free_tasks);
280 SILC_SCHEDULE_UNLOCK(schedule);
282 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
286 #ifdef SILC_DIST_INPLACE
287 /* Print schedule statistics to stdout */
289 void silc_schedule_stats(SilcSchedule schedule)
292 fprintf(stdout, "Schedule %p statistics:\n\n", schedule);
293 fprintf(stdout, "Num FD tasks : %lu (%lu bytes allocated)\n",
294 silc_hash_table_count(schedule->fd_queue),
295 sizeof(*ftask) * silc_hash_table_count(schedule->fd_queue));
296 fprintf(stdout, "Num Timeout tasks : %d (%d bytes allocated)\n",
297 silc_list_count(schedule->timeout_queue),
298 sizeof(struct SilcTaskTimeoutStruct) *
299 silc_list_count(schedule->timeout_queue));
300 fprintf(stdout, "Num Timeout freelist : %d (%d bytes allocated)\n",
301 silc_list_count(schedule->free_tasks),
302 sizeof(struct SilcTaskTimeoutStruct) *
303 silc_list_count(schedule->free_tasks));
305 #endif /* SILC_DIST_INPLACE */
307 /****************************** Public API **********************************/
309 /* Initializes the scheduler. This returns the scheduler context that
310 is given as arugment usually to all silc_schedule_* functions.
311 The `max_tasks' indicates the number of maximum tasks that the
312 scheduler can handle. The `app_context' is application specific
313 context that is delivered to task callbacks. */
315 SilcSchedule silc_schedule_init(int max_tasks, void *app_context,
318 SilcSchedule schedule;
320 /* Initialize Tls, in case it hasn't been done yet */
321 silc_thread_tls_init();
323 stack = silc_stack_alloc(0, stack);
327 /* Allocate scheduler from the stack */
328 schedule = silc_scalloc(stack, 1, sizeof(*schedule));
332 SILC_LOG_DEBUG(("Initializing scheduler %p", schedule));
334 /* Allocate Fd task hash table dynamically */
336 silc_hash_table_alloc(NULL, 0, silc_hash_uint, NULL, NULL, NULL,
337 silc_schedule_fd_destructor, NULL, TRUE);
338 if (!schedule->fd_queue) {
339 silc_stack_free(stack);
343 silc_list_init(schedule->timeout_queue, struct SilcTaskStruct, next);
344 silc_list_init(schedule->free_tasks, struct SilcTaskStruct, next);
346 schedule->stack = stack;
347 schedule->app_context = app_context;
348 schedule->valid = TRUE;
349 schedule->max_tasks = max_tasks;
351 /* Allocate scheduler lock */
352 silc_mutex_alloc(&schedule->lock);
354 /* Initialize the platform specific scheduler. */
355 schedule->internal = schedule_ops.init(schedule, app_context);
356 if (!schedule->internal) {
357 silc_hash_table_free(schedule->fd_queue);
358 silc_mutex_free(schedule->lock);
359 silc_stack_free(stack);
363 /* Timeout freelist garbage collection */
364 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
370 /* Uninitializes the schedule. This is called when the program is ready
371 to end. This removes all tasks and task queues. Returns FALSE if the
372 scheduler could not be uninitialized. This happens when the scheduler
373 is still valid and silc_schedule_stop has not been called. */
375 SilcBool silc_schedule_uninit(SilcSchedule schedule)
379 SILC_LOG_DEBUG(("Uninitializing scheduler %p", schedule));
381 if (schedule->valid == TRUE)
384 /* Dispatch all timeouts before going away */
385 SILC_SCHEDULE_LOCK(schedule);
386 silc_schedule_dispatch_timeout(schedule, TRUE);
387 SILC_SCHEDULE_UNLOCK(schedule);
389 /* Deliver signals before going away */
390 if (schedule->signal_tasks) {
391 schedule_ops.signals_call(schedule, schedule->internal);
392 schedule->signal_tasks = FALSE;
395 /* Unregister all tasks */
396 silc_schedule_task_del(schedule, SILC_ALL_TASKS);
397 silc_schedule_task_remove(schedule, SILC_ALL_TASKS);
399 /* Delete timeout task freelist */
400 silc_list_start(schedule->free_tasks);
401 while ((task = silc_list_get(schedule->free_tasks)))
404 /* Unregister all task queues */
405 silc_hash_table_free(schedule->fd_queue);
407 /* Uninit the platform specific scheduler. */
408 schedule_ops.uninit(schedule, schedule->internal);
410 silc_mutex_free(schedule->lock);
411 silc_stack_free(schedule->stack);
416 /* Stops the schedule even if it is not supposed to be stopped yet.
417 After calling this, one should call silc_schedule_uninit (after the
418 silc_schedule has returned). */
420 void silc_schedule_stop(SilcSchedule schedule)
422 SILC_LOG_DEBUG(("Stopping scheduler"));
423 SILC_SCHEDULE_LOCK(schedule);
424 schedule->valid = FALSE;
425 SILC_SCHEDULE_UNLOCK(schedule);
428 /* Runs the scheduler once and then returns. Must be called locked. */
430 static SilcBool silc_schedule_iterate(SilcSchedule schedule, int timeout_usecs)
432 struct timeval timeout;
436 SILC_LOG_DEBUG(("In scheduler loop"));
438 /* Deliver signals if any has been set to be called */
439 if (silc_unlikely(schedule->signal_tasks)) {
440 SILC_SCHEDULE_UNLOCK(schedule);
441 schedule_ops.signals_call(schedule, schedule->internal);
442 schedule->signal_tasks = FALSE;
443 SILC_SCHEDULE_LOCK(schedule);
446 /* Check if scheduler is valid */
447 if (silc_unlikely(schedule->valid == FALSE)) {
448 SILC_LOG_DEBUG(("Scheduler not valid anymore, exiting"));
452 /* Calculate next timeout for silc_select(). This is the timeout value
453 when at earliest some of the timeout tasks expire. This may dispatch
454 already expired timeouts. */
455 silc_schedule_select_timeout(schedule);
457 /* Check if scheduler is valid */
458 if (silc_unlikely(schedule->valid == FALSE)) {
459 SILC_LOG_DEBUG(("Scheduler not valid anymore, exiting"));
463 if (timeout_usecs >= 0) {
465 timeout.tv_usec = timeout_usecs;
466 schedule->timeout = timeout;
467 schedule->has_timeout = TRUE;
470 /* This is the main silc_select(). The program blocks here until some
471 of the selected file descriptors change status or the selected
473 SILC_LOG_DEBUG(("Select"));
474 ret = schedule_ops.schedule(schedule, schedule->internal);
476 if (silc_likely(ret == 0)) {
478 SILC_LOG_DEBUG(("Running timeout tasks"));
479 if (silc_likely(silc_list_count(schedule->timeout_queue)))
480 silc_schedule_dispatch_timeout(schedule, FALSE);
483 } else if (silc_likely(ret > 0)) {
484 /* There is some data available now */
485 SILC_LOG_DEBUG(("Running fd tasks"));
486 silc_schedule_dispatch_fd(schedule);
488 /* If timeout was very short, dispatch also timeout tasks */
489 if (schedule->has_timeout && schedule->timeout.tv_sec == 0 &&
490 schedule->timeout.tv_usec < 50000)
491 silc_schedule_dispatch_timeout(schedule, FALSE);
495 /* Error or special case handling */
501 SILC_LOG_ERROR(("Error in select()/poll(): %s", strerror(errno)));
504 } while (timeout_usecs == -1);
509 /* Runs the scheduler once and then returns. */
511 SilcBool silc_schedule_one(SilcSchedule schedule, int timeout_usecs)
514 SILC_SCHEDULE_LOCK(schedule);
515 ret = silc_schedule_iterate(schedule, timeout_usecs);
516 SILC_SCHEDULE_UNLOCK(schedule);
520 /* Runs the scheduler and blocks here. When this returns the scheduler
524 void silc_schedule(SilcSchedule schedule)
526 SILC_LOG_DEBUG(("Running scheduler"));
528 /* Start the scheduler loop */
529 SILC_SCHEDULE_LOCK(schedule);
530 silc_schedule_iterate(schedule, -1);
531 SILC_SCHEDULE_UNLOCK(schedule);
533 #endif /* !SILC_SYMBIAN */
535 /* Wakes up the scheduler. This is used only in multi-threaded
536 environments where threads may add new tasks or remove old tasks
537 from task queues. This is called to wake up the scheduler in the
538 main thread so that it detects the changes in the task queues.
539 If threads support is not compiled in this function has no effect.
540 Implementation of this function is platform specific. */
542 void silc_schedule_wakeup(SilcSchedule schedule)
545 SILC_LOG_DEBUG(("Wakeup scheduler"));
546 SILC_SCHEDULE_LOCK(schedule);
547 schedule_ops.wakeup(schedule, schedule->internal);
548 SILC_SCHEDULE_UNLOCK(schedule);
552 /* Returns the application specific context that was saved into the
553 scheduler in silc_schedule_init function. The context is also
554 returned to application in task callback functions, but this function
555 may be used to get it as well if needed. */
557 void *silc_schedule_get_context(SilcSchedule schedule)
559 return schedule->app_context;
562 /* Return the stack of the scheduler */
564 SilcStack silc_schedule_get_stack(SilcSchedule schedule)
566 return schedule->stack;
569 /* Set notify callback */
571 void silc_schedule_set_notify(SilcSchedule schedule,
572 SilcTaskNotifyCb notify, void *context)
574 schedule->notify = notify;
575 schedule->notify_context = context;
578 /* Add new task to the scheduler */
580 SilcTask silc_schedule_task_add(SilcSchedule schedule, SilcUInt32 fd,
581 SilcTaskCallback callback, void *context,
582 long seconds, long useconds,
585 SilcTask task = NULL;
587 if (silc_unlikely(!schedule->valid)) {
588 silc_set_errno(SILC_ERR_NOT_VALID);
592 SILC_SCHEDULE_LOCK(schedule);
594 if (silc_likely(type == SILC_TASK_TIMEOUT)) {
595 SilcTaskTimeout tmp, prev, ttask;
598 silc_list_start(schedule->free_tasks);
599 ttask = silc_list_get(schedule->free_tasks);
600 if (silc_unlikely(!ttask)) {
601 ttask = silc_calloc(1, sizeof(*ttask));
602 if (silc_unlikely(!ttask))
605 silc_list_del(schedule->free_tasks, ttask);
607 ttask->header.type = 1;
608 ttask->header.callback = callback;
609 ttask->header.context = context;
610 ttask->header.valid = TRUE;
613 silc_gettimeofday(&ttask->timeout);
614 if ((seconds + useconds) > 0) {
615 ttask->timeout.tv_sec += seconds + (useconds / 1000000L);
616 ttask->timeout.tv_usec += (useconds % 1000000L);
617 if (ttask->timeout.tv_usec >= 1000000L) {
618 ttask->timeout.tv_sec += 1;
619 ttask->timeout.tv_usec -= 1000000L;
623 SILC_LOG_DEBUG(("New timeout task %p: sec=%d, usec=%d", ttask,
626 /* Add task to correct spot so that the first task in the list has
627 the earliest timeout. */
628 list = schedule->timeout_queue;
629 silc_list_start(list);
631 while ((tmp = silc_list_get(list)) != SILC_LIST_END) {
632 /* If we have shorter timeout, we have found our spot */
633 if (silc_compare_timeval(&ttask->timeout, &tmp->timeout) < 0) {
634 silc_list_insert(schedule->timeout_queue, prev, ttask);
640 silc_list_add(schedule->timeout_queue, ttask);
642 task = (SilcTask)ttask;
644 /* Call notify callback */
645 if (schedule->notify)
646 schedule->notify(schedule, TRUE, task, FALSE, 0, 0, seconds, useconds,
647 schedule->notify_context);
649 } else if (silc_likely(type == SILC_TASK_FD)) {
652 /* Check if fd is already added */
653 if (silc_unlikely(silc_hash_table_find(schedule->fd_queue,
655 NULL, (void *)&task))) {
659 /* Remove invalid task. We must have unique fd key to hash table. */
660 silc_schedule_task_remove(schedule, task);
663 /* Check max tasks */
664 if (silc_unlikely(schedule->max_tasks > 0 &&
665 silc_hash_table_count(schedule->fd_queue) >=
666 schedule->max_tasks)) {
667 SILC_LOG_WARNING(("Scheduler task limit reached: cannot add new task"));
669 silc_set_errno(SILC_ERR_LIMIT);
673 ftask = silc_calloc(1, sizeof(*ftask));
674 if (silc_unlikely(!ftask)) {
679 SILC_LOG_DEBUG(("New fd task %p fd=%d", ftask, fd));
681 ftask->header.type = 0;
682 ftask->header.callback = callback;
683 ftask->header.context = context;
684 ftask->header.valid = TRUE;
685 ftask->events = SILC_TASK_READ;
688 /* Add task and schedule it */
689 if (!silc_hash_table_add(schedule->fd_queue, SILC_32_TO_PTR(fd), ftask)) {
694 if (!schedule_ops.schedule_fd(schedule, schedule->internal,
695 ftask, ftask->events)) {
696 silc_hash_table_del(schedule->fd_queue, SILC_32_TO_PTR(fd));
701 task = (SilcTask)ftask;
703 /* Call notify callback */
704 if (schedule->notify)
705 schedule->notify(schedule, TRUE, task, TRUE, ftask->fd,
706 SILC_TASK_READ, 0, 0, schedule->notify_context);
708 } else if (silc_unlikely(type == SILC_TASK_SIGNAL)) {
709 SILC_SCHEDULE_UNLOCK(schedule);
710 schedule_ops.signal_register(schedule, schedule->internal, fd,
716 SILC_SCHEDULE_UNLOCK(schedule);
719 /* On symbian we wakeup scheduler immediately after adding timeout task
720 in case the task is added outside the scheduler loop (in some active
722 if (task && task->type == 1)
723 silc_schedule_wakeup(schedule);
724 #endif /* SILC_SYMBIAN */
729 /* Invalidates task */
731 SilcBool silc_schedule_task_del(SilcSchedule schedule, SilcTask task)
733 if (silc_unlikely(task == SILC_ALL_TASKS)) {
734 SilcHashTableList htl;
736 SILC_LOG_DEBUG(("Unregister all tasks"));
738 SILC_SCHEDULE_LOCK(schedule);
740 /* Delete from fd queue */
741 silc_hash_table_list(schedule->fd_queue, &htl);
742 while (silc_hash_table_get(&htl, NULL, (void *)&task)) {
745 /* Call notify callback */
746 if (schedule->notify)
747 schedule->notify(schedule, FALSE, task, TRUE,
748 ((SilcTaskFd)task)->fd, 0, 0, 0,
749 schedule->notify_context);
751 silc_hash_table_list_reset(&htl);
753 /* Delete from timeout queue */
754 silc_list_start(schedule->timeout_queue);
755 while ((task = (SilcTask)silc_list_get(schedule->timeout_queue))) {
758 /* Call notify callback */
759 if (schedule->notify)
760 schedule->notify(schedule, FALSE, task, FALSE, 0, 0, 0, 0,
761 schedule->notify_context);
764 SILC_SCHEDULE_UNLOCK(schedule);
768 SILC_LOG_DEBUG(("Unregistering task %p", task));
769 SILC_SCHEDULE_LOCK(schedule);
772 /* Call notify callback */
773 if (schedule->notify)
774 schedule->notify(schedule, FALSE, task, !task->type, 0, 0, 0, 0,
775 schedule->notify_context);
776 SILC_SCHEDULE_UNLOCK(schedule);
781 /* Invalidate task by fd */
783 SilcBool silc_schedule_task_del_by_fd(SilcSchedule schedule, SilcUInt32 fd)
785 SilcTask task = NULL;
786 SilcBool ret = FALSE;
788 SILC_LOG_DEBUG(("Unregister task by fd %d", fd));
790 SILC_SCHEDULE_LOCK(schedule);
792 /* fd is unique, so there is only one task with this fd in the table */
793 if (silc_likely(silc_hash_table_find(schedule->fd_queue,
794 SILC_32_TO_PTR(fd), NULL,
796 SILC_LOG_DEBUG(("Deleting task %p", task));
799 /* Call notify callback */
800 if (schedule->notify)
801 schedule->notify(schedule, FALSE, task, TRUE, fd, 0, 0, 0,
802 schedule->notify_context);
806 SILC_SCHEDULE_UNLOCK(schedule);
808 /* If it is signal, remove it */
809 if (silc_unlikely(!task)) {
810 schedule_ops.signal_unregister(schedule, schedule->internal, fd);
815 silc_set_errno(SILC_ERR_NOT_FOUND);
820 /* Invalidate task by task callback. */
822 SilcBool silc_schedule_task_del_by_callback(SilcSchedule schedule,
823 SilcTaskCallback callback)
826 SilcHashTableList htl;
828 SilcBool ret = FALSE;
830 SILC_LOG_DEBUG(("Unregister task by callback"));
832 SILC_SCHEDULE_LOCK(schedule);
834 /* Delete from fd queue */
835 silc_hash_table_list(schedule->fd_queue, &htl);
836 while (silc_hash_table_get(&htl, NULL, (void *)&task)) {
837 if (task->callback == callback) {
840 /* Call notify callback */
841 if (schedule->notify)
842 schedule->notify(schedule, FALSE, task, TRUE,
843 ((SilcTaskFd)task)->fd, 0, 0, 0,
844 schedule->notify_context);
848 silc_hash_table_list_reset(&htl);
850 /* Delete from timeout queue */
851 list = schedule->timeout_queue;
852 silc_list_start(list);
853 while ((task = (SilcTask)silc_list_get(list))) {
854 if (task->callback == callback) {
857 /* Call notify callback */
858 if (schedule->notify)
859 schedule->notify(schedule, FALSE, task, FALSE, 0, 0, 0, 0,
860 schedule->notify_context);
865 SILC_SCHEDULE_UNLOCK(schedule);
868 silc_set_errno(SILC_ERR_NOT_FOUND);
873 /* Invalidate task by context. */
875 SilcBool silc_schedule_task_del_by_context(SilcSchedule schedule,
879 SilcHashTableList htl;
881 SilcBool ret = FALSE;
883 SILC_LOG_DEBUG(("Unregister task by context"));
885 SILC_SCHEDULE_LOCK(schedule);
887 /* Delete from fd queue */
888 silc_hash_table_list(schedule->fd_queue, &htl);
889 while (silc_hash_table_get(&htl, NULL, (void *)&task)) {
890 if (task->context == context) {
893 /* Call notify callback */
894 if (schedule->notify)
895 schedule->notify(schedule, FALSE, task, TRUE,
896 ((SilcTaskFd)task)->fd, 0, 0, 0,
897 schedule->notify_context);
901 silc_hash_table_list_reset(&htl);
903 /* Delete from timeout queue */
904 list = schedule->timeout_queue;
905 silc_list_start(list);
906 while ((task = (SilcTask)silc_list_get(list))) {
907 if (task->context == context) {
910 /* Call notify callback */
911 if (schedule->notify)
912 schedule->notify(schedule, FALSE, task, FALSE, 0, 0, 0, 0,
913 schedule->notify_context);
918 SILC_SCHEDULE_UNLOCK(schedule);
921 silc_set_errno(SILC_ERR_NOT_FOUND);
926 /* Invalidate task by all */
928 SilcBool silc_schedule_task_del_by_all(SilcSchedule schedule, int fd,
929 SilcTaskCallback callback,
934 SilcBool ret = FALSE;
936 SILC_LOG_DEBUG(("Unregister task by fd, callback and context"));
938 /* For fd task, callback and context is irrelevant as fd is unique */
940 return silc_schedule_task_del_by_fd(schedule, fd);
942 SILC_SCHEDULE_LOCK(schedule);
944 /* Delete from timeout queue */
945 list = schedule->timeout_queue;
946 silc_list_start(list);
947 while ((task = (SilcTask)silc_list_get(list))) {
948 if (task->callback == callback && task->context == context) {
951 /* Call notify callback */
952 if (schedule->notify)
953 schedule->notify(schedule, FALSE, task, FALSE, 0, 0, 0, 0,
954 schedule->notify_context);
959 SILC_SCHEDULE_UNLOCK(schedule);
962 silc_set_errno(SILC_ERR_NOT_FOUND);
967 /* Sets a file descriptor to be listened by scheduler. One can call this
968 directly if wanted. This can be called multiple times for one file
969 descriptor to set different iomasks. */
971 SilcBool silc_schedule_set_listen_fd(SilcSchedule schedule, SilcUInt32 fd,
972 SilcTaskEvent mask, SilcBool send_events)
976 if (silc_unlikely(!schedule->valid)) {
977 silc_set_errno(SILC_ERR_NOT_VALID);
981 SILC_SCHEDULE_LOCK(schedule);
983 if (silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fd),
984 NULL, (void *)&task)) {
985 if (!schedule_ops.schedule_fd(schedule, schedule->internal, task, mask)) {
986 SILC_SCHEDULE_UNLOCK(schedule);
990 if (silc_unlikely(send_events) && mask) {
991 task->revents = mask;
992 silc_schedule_dispatch_fd(schedule);
995 /* Call notify callback */
996 if (schedule->notify)
997 schedule->notify(schedule, TRUE, (SilcTask)task,
998 TRUE, task->fd, mask, 0, 0,
999 schedule->notify_context);
1002 SILC_SCHEDULE_UNLOCK(schedule);
1007 /* Returns the file descriptor's current requested event mask. */
1009 SilcTaskEvent silc_schedule_get_fd_events(SilcSchedule schedule,
1013 SilcTaskEvent event = 0;
1015 if (silc_unlikely(!schedule->valid)) {
1016 silc_set_errno(SILC_ERR_NOT_VALID);
1020 SILC_SCHEDULE_LOCK(schedule);
1021 if (silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fd),
1022 NULL, (void *)&task))
1023 event = task->events;
1024 SILC_SCHEDULE_UNLOCK(schedule);
1029 /* Removes a file descriptor from listen list. */
1031 void silc_schedule_unset_listen_fd(SilcSchedule schedule, SilcUInt32 fd)
1033 silc_schedule_set_listen_fd(schedule, fd, 0, FALSE);