5 Author: Pekka Riikonen <priikone@silcnet.org>
7 Copyright (C) 1998 - 2007 Pekka Riikonen
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
23 /************************** Types and definitions ***************************/
25 /* Platform specific implementation */
26 extern const SilcScheduleOps schedule_ops;
28 static void silc_schedule_task_remove(SilcSchedule schedule, SilcTask task);
29 static void silc_schedule_dispatch_fd(SilcSchedule schedule);
30 static void silc_schedule_dispatch_timeout(SilcSchedule schedule,
31 SilcBool dispatch_all);
34 /************************ Static utility functions **************************/
36 /* Fd task hash table destructor */
38 static void silc_schedule_fd_destructor(void *key, void *context,
44 /* Executes file descriptor tasks. Invalid tasks are removed here. */
46 static void silc_schedule_dispatch_fd(SilcSchedule schedule)
51 /* The dispatch list includes only valid tasks, and tasks that have
52 something to dispatch. Dispatching is atomic; no matter if another
53 thread invalidates a task when we unlock, we dispatch to completion. */
54 SILC_SCHEDULE_UNLOCK(schedule);
55 silc_list_start(schedule->fd_dispatch);
56 while ((task = silc_list_get(schedule->fd_dispatch))) {
59 /* Is the task ready for reading */
60 if (task->revents & SILC_TASK_READ)
61 t->callback(schedule, schedule->app_context, SILC_TASK_READ,
62 task->fd, t->context);
64 /* Is the task ready for writing */
65 if (t->valid && task->revents & SILC_TASK_WRITE)
66 t->callback(schedule, schedule->app_context, SILC_TASK_WRITE,
67 task->fd, t->context);
69 SILC_SCHEDULE_LOCK(schedule);
71 /* Remove invalidated tasks */
72 silc_list_start(schedule->fd_dispatch);
73 while ((task = silc_list_get(schedule->fd_dispatch)))
74 if (silc_unlikely(!task->header.valid))
75 silc_schedule_task_remove(schedule, (SilcTask)task);
78 /* Executes all tasks whose timeout has expired. The task is removed from
79 the task queue after the callback function has returned. Also, invalid
80 tasks are removed here. */
82 static void silc_schedule_dispatch_timeout(SilcSchedule schedule,
83 SilcBool dispatch_all)
87 struct timeval curtime;
90 SILC_LOG_DEBUG(("Running timeout tasks"));
92 silc_gettimeofday(&curtime);
94 /* First task in the task queue has always the earliest timeout. */
95 silc_list_start(schedule->timeout_queue);
96 task = silc_list_get(schedule->timeout_queue);
97 if (silc_unlikely(!task))
102 /* Remove invalid task */
103 if (silc_unlikely(!t->valid)) {
104 silc_schedule_task_remove(schedule, t);
108 /* Execute the task if the timeout has expired */
109 if (silc_compare_timeval(&task->timeout, &curtime) > 0 && !dispatch_all)
113 SILC_SCHEDULE_UNLOCK(schedule);
114 t->callback(schedule, schedule->app_context, SILC_TASK_EXPIRE, 0,
116 SILC_SCHEDULE_LOCK(schedule);
118 /* Remove the expired task */
119 silc_schedule_task_remove(schedule, t);
121 /* Balance when we have lots of small timeouts */
122 if (silc_unlikely((++count) > 40))
124 } while (silc_likely((task = silc_list_get(schedule->timeout_queue))));
127 /* Calculates next timeout. This is the timeout value when at earliest some
128 of the timeout tasks expire. If this is in the past, they will be
131 static void silc_schedule_select_timeout(SilcSchedule schedule)
134 SilcTaskTimeout task;
135 struct timeval curtime;
136 SilcBool dispatch = TRUE;
138 /* Get the current time */
139 silc_gettimeofday(&curtime);
140 schedule->has_timeout = FALSE;
142 /* First task in the task queue has always the earliest timeout. */
143 silc_list_start(schedule->timeout_queue);
144 task = silc_list_get(schedule->timeout_queue);
145 if (silc_unlikely(!task))
150 /* Remove invalid task */
151 if (silc_unlikely(!t->valid)) {
152 silc_schedule_task_remove(schedule, t);
156 /* If the timeout is in past, we will run the task and all other
157 timeout tasks from the past. */
158 if (silc_compare_timeval(&task->timeout, &curtime) <= 0 && dispatch) {
159 silc_schedule_dispatch_timeout(schedule, FALSE);
160 if (silc_unlikely(!schedule->valid))
163 /* Start selecting new timeout again after dispatch */
164 silc_list_start(schedule->timeout_queue);
169 /* Calculate the next timeout */
170 curtime.tv_sec = task->timeout.tv_sec - curtime.tv_sec;
171 curtime.tv_usec = task->timeout.tv_usec - curtime.tv_usec;
172 if (curtime.tv_sec < 0)
175 /* We wouldn't want to go under zero, check for it. */
176 if (curtime.tv_usec < 0) {
178 if (curtime.tv_sec < 0)
180 curtime.tv_usec += 1000000L;
183 } while ((task = silc_list_get(schedule->timeout_queue)));
185 /* Save the timeout */
187 schedule->timeout = curtime;
188 schedule->has_timeout = TRUE;
189 SILC_LOG_DEBUG(("timeout: sec=%d, usec=%d", schedule->timeout.tv_sec,
190 schedule->timeout.tv_usec));
194 /* Removes task from the scheduler. This must be called with scheduler
197 static void silc_schedule_task_remove(SilcSchedule schedule, SilcTask task)
201 if (silc_unlikely(task == SILC_ALL_TASKS)) {
203 SilcHashTableList htl;
206 /* Delete from fd queue */
207 silc_hash_table_list(schedule->fd_queue, &htl);
208 while (silc_hash_table_get(&htl, &fd, (void *)&task))
209 silc_hash_table_del(schedule->fd_queue, fd);
210 silc_hash_table_list_reset(&htl);
212 /* Delete from timeout queue */
213 silc_list_start(schedule->timeout_queue);
214 while ((task = silc_list_get(schedule->timeout_queue))) {
215 silc_list_del(schedule->timeout_queue, task);
222 if (silc_likely(task->type == 1)) {
223 /* Delete from timeout queue */
224 silc_list_del(schedule->timeout_queue, task);
226 /* Put to free list */
227 silc_list_add(schedule->free_tasks, task);
229 /* Delete from fd queue */
230 ftask = (SilcTaskFd)task;
231 silc_hash_table_del(schedule->fd_queue, SILC_32_TO_PTR(ftask->fd));
235 /* Timeout freelist garbage collection */
237 SILC_TASK_CALLBACK(silc_schedule_timeout_gc)
242 if (!schedule->valid)
245 SILC_LOG_DEBUG(("Timeout freelist garbage collection"));
247 SILC_SCHEDULE_LOCK(schedule);
249 if (silc_list_count(schedule->free_tasks) <= 10) {
250 SILC_SCHEDULE_UNLOCK(schedule);
251 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
255 if (silc_list_count(schedule->timeout_queue) >
256 silc_list_count(schedule->free_tasks)) {
257 SILC_SCHEDULE_UNLOCK(schedule);
258 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
263 c = silc_list_count(schedule->free_tasks) / 2;
264 if (c > silc_list_count(schedule->timeout_queue))
265 c = (silc_list_count(schedule->free_tasks) -
266 silc_list_count(schedule->timeout_queue));
267 if (silc_list_count(schedule->free_tasks) - c < 10)
268 c -= (10 - (silc_list_count(schedule->free_tasks) - c));
270 SILC_LOG_DEBUG(("Freeing %d unused tasks, leaving %d", c,
271 silc_list_count(schedule->free_tasks) - c));
273 silc_list_start(schedule->free_tasks);
274 while ((t = silc_list_get(schedule->free_tasks)) && c-- > 0) {
275 silc_list_del(schedule->free_tasks, t);
278 silc_list_start(schedule->free_tasks);
280 SILC_SCHEDULE_UNLOCK(schedule);
282 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
286 #ifdef SILC_DIST_INPLACE
287 /* Print schedule statistics to stdout */
289 void silc_schedule_stats(SilcSchedule schedule)
292 fprintf(stdout, "Schedule %p statistics:\n\n", schedule);
293 fprintf(stdout, "Num FD tasks : %d (%lu bytes allocated)\n",
294 silc_hash_table_count(schedule->fd_queue),
295 sizeof(*ftask) * silc_hash_table_count(schedule->fd_queue));
296 fprintf(stdout, "Num Timeout tasks : %d (%lu bytes allocated)\n",
297 silc_list_count(schedule->timeout_queue),
298 sizeof(struct SilcTaskTimeoutStruct) *
299 silc_list_count(schedule->timeout_queue));
300 fprintf(stdout, "Num Timeout freelist : %d (%lu bytes allocated)\n",
301 silc_list_count(schedule->free_tasks),
302 sizeof(struct SilcTaskTimeoutStruct) *
303 silc_list_count(schedule->free_tasks));
305 #endif /* SILC_DIST_INPLACE */
307 /****************************** Public API **********************************/
309 /* Initializes the scheduler. This returns the scheduler context that
310 is given as arugment usually to all silc_schedule_* functions.
311 The `max_tasks' indicates the number of maximum tasks that the
312 scheduler can handle. The `app_context' is application specific
313 context that is delivered to task callbacks. */
315 SilcSchedule silc_schedule_init(int max_tasks, void *app_context,
318 SilcSchedule schedule;
320 /* Initialize Tls, in case it hasn't been done yet */
321 silc_thread_tls_init();
323 stack = silc_stack_alloc(0, stack);
327 /* Allocate scheduler from the stack */
328 schedule = silc_scalloc(stack, 1, sizeof(*schedule));
332 SILC_LOG_DEBUG(("Initializing scheduler %p", schedule));
334 /* Allocate Fd task hash table dynamically */
336 silc_hash_table_alloc(NULL, 0, silc_hash_uint, NULL, NULL, NULL,
337 silc_schedule_fd_destructor, NULL, TRUE);
338 if (!schedule->fd_queue) {
339 silc_stack_free(stack);
343 silc_list_init(schedule->timeout_queue, struct SilcTaskStruct, next);
344 silc_list_init(schedule->free_tasks, struct SilcTaskStruct, next);
346 schedule->stack = stack;
347 schedule->app_context = app_context;
348 schedule->valid = TRUE;
349 schedule->max_tasks = max_tasks;
351 /* Allocate scheduler lock */
352 silc_mutex_alloc(&schedule->lock);
354 /* Initialize the platform specific scheduler. */
355 schedule->internal = schedule_ops.init(schedule, app_context);
356 if (!schedule->internal) {
357 silc_hash_table_free(schedule->fd_queue);
358 silc_mutex_free(schedule->lock);
359 silc_stack_free(stack);
363 /* Timeout freelist garbage collection */
364 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
370 /* Uninitializes the schedule. This is called when the program is ready
371 to end. This removes all tasks and task queues. Returns FALSE if the
372 scheduler could not be uninitialized. This happens when the scheduler
373 is still valid and silc_schedule_stop has not been called. */
375 SilcBool silc_schedule_uninit(SilcSchedule schedule)
379 SILC_VERIFY(schedule);
381 SILC_LOG_DEBUG(("Uninitializing scheduler %p", schedule));
383 if (schedule->valid == TRUE)
386 /* Dispatch all timeouts before going away */
387 SILC_SCHEDULE_LOCK(schedule);
388 silc_schedule_dispatch_timeout(schedule, TRUE);
389 SILC_SCHEDULE_UNLOCK(schedule);
391 /* Deliver signals before going away */
392 if (schedule->signal_tasks) {
393 schedule_ops.signals_call(schedule, schedule->internal);
394 schedule->signal_tasks = FALSE;
397 /* Unregister all tasks */
398 silc_schedule_task_del(schedule, SILC_ALL_TASKS);
399 silc_schedule_task_remove(schedule, SILC_ALL_TASKS);
401 /* Delete timeout task freelist */
402 silc_list_start(schedule->free_tasks);
403 while ((task = silc_list_get(schedule->free_tasks)))
406 /* Unregister all task queues */
407 silc_hash_table_free(schedule->fd_queue);
409 /* Uninit the platform specific scheduler. */
410 schedule_ops.uninit(schedule, schedule->internal);
412 silc_mutex_free(schedule->lock);
413 silc_stack_free(schedule->stack);
418 /* Stops the schedule even if it is not supposed to be stopped yet.
419 After calling this, one should call silc_schedule_uninit (after the
420 silc_schedule has returned). */
422 void silc_schedule_stop(SilcSchedule schedule)
424 SILC_LOG_DEBUG(("Stopping scheduler"));
425 SILC_VERIFY(schedule);
426 SILC_SCHEDULE_LOCK(schedule);
427 schedule->valid = FALSE;
428 SILC_SCHEDULE_UNLOCK(schedule);
431 /* Runs the scheduler once and then returns. Must be called locked. */
433 static SilcBool silc_schedule_iterate(SilcSchedule schedule, int timeout_usecs)
435 struct timeval timeout;
439 SILC_LOG_DEBUG(("In scheduler loop"));
441 /* Deliver signals if any has been set to be called */
442 if (silc_unlikely(schedule->signal_tasks)) {
443 SILC_SCHEDULE_UNLOCK(schedule);
444 schedule_ops.signals_call(schedule, schedule->internal);
445 schedule->signal_tasks = FALSE;
446 SILC_SCHEDULE_LOCK(schedule);
449 /* Check if scheduler is valid */
450 if (silc_unlikely(schedule->valid == FALSE)) {
451 SILC_LOG_DEBUG(("Scheduler not valid anymore, exiting"));
455 /* Calculate next timeout for silc_select(). This is the timeout value
456 when at earliest some of the timeout tasks expire. This may dispatch
457 already expired timeouts. */
458 silc_schedule_select_timeout(schedule);
460 /* Check if scheduler is valid */
461 if (silc_unlikely(schedule->valid == FALSE)) {
462 SILC_LOG_DEBUG(("Scheduler not valid anymore, exiting"));
466 if (timeout_usecs >= 0) {
468 timeout.tv_usec = timeout_usecs;
469 schedule->timeout = timeout;
470 schedule->has_timeout = TRUE;
473 /* This is the main silc_select(). The program blocks here until some
474 of the selected file descriptors change status or the selected
476 SILC_LOG_DEBUG(("Select"));
477 ret = schedule_ops.schedule(schedule, schedule->internal);
479 if (silc_likely(ret == 0)) {
481 SILC_LOG_DEBUG(("Running timeout tasks"));
482 if (silc_likely(silc_list_count(schedule->timeout_queue)))
483 silc_schedule_dispatch_timeout(schedule, FALSE);
486 } else if (silc_likely(ret > 0)) {
487 /* There is some data available now */
488 SILC_LOG_DEBUG(("Running fd tasks"));
489 silc_schedule_dispatch_fd(schedule);
491 /* If timeout was very short, dispatch also timeout tasks */
492 if (schedule->has_timeout && schedule->timeout.tv_sec == 0 &&
493 schedule->timeout.tv_usec < 50000)
494 silc_schedule_dispatch_timeout(schedule, FALSE);
498 /* Error or special case handling */
504 SILC_LOG_ERROR(("Error in select()/poll(): %s", strerror(errno)));
507 } while (timeout_usecs == -1);
512 /* Runs the scheduler once and then returns. */
514 SilcBool silc_schedule_one(SilcSchedule schedule, int timeout_usecs)
517 SILC_SCHEDULE_LOCK(schedule);
518 ret = silc_schedule_iterate(schedule, timeout_usecs);
519 SILC_SCHEDULE_UNLOCK(schedule);
523 /* Runs the scheduler and blocks here. When this returns the scheduler
527 void silc_schedule(SilcSchedule schedule)
529 SILC_LOG_DEBUG(("Running scheduler"));
531 /* Start the scheduler loop */
532 SILC_SCHEDULE_LOCK(schedule);
533 silc_schedule_iterate(schedule, -1);
534 SILC_SCHEDULE_UNLOCK(schedule);
536 #endif /* !SILC_SYMBIAN */
538 /* Wakes up the scheduler. This is used only in multi-threaded
539 environments where threads may add new tasks or remove old tasks
540 from task queues. This is called to wake up the scheduler in the
541 main thread so that it detects the changes in the task queues.
542 If threads support is not compiled in this function has no effect.
543 Implementation of this function is platform specific. */
545 void silc_schedule_wakeup(SilcSchedule schedule)
548 SILC_LOG_DEBUG(("Wakeup scheduler"));
549 SILC_SCHEDULE_LOCK(schedule);
550 schedule_ops.wakeup(schedule, schedule->internal);
551 SILC_SCHEDULE_UNLOCK(schedule);
555 /* Returns the application specific context that was saved into the
556 scheduler in silc_schedule_init function. The context is also
557 returned to application in task callback functions, but this function
558 may be used to get it as well if needed. */
560 void *silc_schedule_get_context(SilcSchedule schedule)
562 return schedule->app_context;
565 /* Return the stack of the scheduler */
567 SilcStack silc_schedule_get_stack(SilcSchedule schedule)
569 return schedule->stack;
572 /* Set notify callback */
574 void silc_schedule_set_notify(SilcSchedule schedule,
575 SilcTaskNotifyCb notify, void *context)
577 schedule->notify = notify;
578 schedule->notify_context = context;
581 /* Set global scheduler */
583 void silc_schedule_set_global(SilcSchedule schedule)
585 SilcTls tls = silc_thread_get_tls();
588 /* Try to initialize Tls */
589 tls = silc_thread_tls_init();
595 SILC_LOG_DEBUG(("Setting global scheduler %p", schedule));
597 tls->schedule = schedule;
600 /* Return global scheduler */
602 SilcSchedule silc_schedule_get_global(void)
604 SilcTls tls = silc_thread_get_tls();
609 SILC_LOG_DEBUG(("Return global scheduler %p", tls->schedule));
611 return tls->schedule;
614 /* Add new task to the scheduler */
616 SilcTask silc_schedule_task_add(SilcSchedule schedule, SilcUInt32 fd,
617 SilcTaskCallback callback, void *context,
618 long seconds, long useconds,
621 SilcTask task = NULL;
624 schedule = silc_schedule_get_global();
625 SILC_VERIFY(schedule);
627 silc_set_errno(SILC_ERR_INVALID_ARGUMENT);
632 if (silc_unlikely(!schedule->valid)) {
633 silc_set_errno(SILC_ERR_NOT_VALID);
637 SILC_SCHEDULE_LOCK(schedule);
639 if (silc_likely(type == SILC_TASK_TIMEOUT)) {
640 SilcTaskTimeout tmp, prev, ttask;
643 silc_list_start(schedule->free_tasks);
644 ttask = silc_list_get(schedule->free_tasks);
645 if (silc_unlikely(!ttask)) {
646 ttask = silc_calloc(1, sizeof(*ttask));
647 if (silc_unlikely(!ttask))
650 silc_list_del(schedule->free_tasks, ttask);
652 ttask->header.type = 1;
653 ttask->header.callback = callback;
654 ttask->header.context = context;
655 ttask->header.valid = TRUE;
658 silc_gettimeofday(&ttask->timeout);
659 if ((seconds + useconds) > 0) {
660 ttask->timeout.tv_sec += seconds + (useconds / 1000000L);
661 ttask->timeout.tv_usec += (useconds % 1000000L);
662 if (ttask->timeout.tv_usec >= 1000000L) {
663 ttask->timeout.tv_sec += 1;
664 ttask->timeout.tv_usec -= 1000000L;
668 SILC_LOG_DEBUG(("New timeout task %p: sec=%d, usec=%d", ttask,
671 /* Add task to correct spot so that the first task in the list has
672 the earliest timeout. */
673 list = schedule->timeout_queue;
674 silc_list_start(list);
676 while ((tmp = silc_list_get(list)) != SILC_LIST_END) {
677 /* If we have shorter timeout, we have found our spot */
678 if (silc_compare_timeval(&ttask->timeout, &tmp->timeout) < 0) {
679 silc_list_insert(schedule->timeout_queue, prev, ttask);
685 silc_list_add(schedule->timeout_queue, ttask);
687 task = (SilcTask)ttask;
689 /* Call notify callback */
690 if (schedule->notify)
691 schedule->notify(schedule, TRUE, task, FALSE, 0, 0, seconds, useconds,
692 schedule->notify_context);
694 } else if (silc_likely(type == SILC_TASK_FD)) {
697 /* Check if fd is already added */
698 if (silc_unlikely(silc_hash_table_find(schedule->fd_queue,
700 NULL, (void *)&task))) {
704 /* Remove invalid task. We must have unique fd key to hash table. */
705 silc_schedule_task_remove(schedule, task);
708 /* Check max tasks */
709 if (silc_unlikely(schedule->max_tasks > 0 &&
710 silc_hash_table_count(schedule->fd_queue) >=
711 schedule->max_tasks)) {
712 SILC_LOG_WARNING(("Scheduler task limit reached: cannot add new task"));
714 silc_set_errno(SILC_ERR_LIMIT);
718 ftask = silc_calloc(1, sizeof(*ftask));
719 if (silc_unlikely(!ftask)) {
724 SILC_LOG_DEBUG(("New fd task %p fd=%d", ftask, fd));
726 ftask->header.type = 0;
727 ftask->header.callback = callback;
728 ftask->header.context = context;
729 ftask->header.valid = TRUE;
730 ftask->events = SILC_TASK_READ;
733 /* Add task and schedule it */
734 if (!silc_hash_table_add(schedule->fd_queue, SILC_32_TO_PTR(fd), ftask)) {
739 if (!schedule_ops.schedule_fd(schedule, schedule->internal,
740 ftask, ftask->events)) {
741 silc_hash_table_del(schedule->fd_queue, SILC_32_TO_PTR(fd));
746 task = (SilcTask)ftask;
748 /* Call notify callback */
749 if (schedule->notify)
750 schedule->notify(schedule, TRUE, task, TRUE, ftask->fd,
751 SILC_TASK_READ, 0, 0, schedule->notify_context);
753 } else if (silc_unlikely(type == SILC_TASK_SIGNAL)) {
754 SILC_SCHEDULE_UNLOCK(schedule);
755 schedule_ops.signal_register(schedule, schedule->internal, fd,
761 SILC_SCHEDULE_UNLOCK(schedule);
764 /* On symbian we wakeup scheduler immediately after adding timeout task
765 in case the task is added outside the scheduler loop (in some active
767 if (task && task->type == 1)
768 silc_schedule_wakeup(schedule);
769 #endif /* SILC_SYMBIAN */
774 /* Invalidates task */
776 SilcBool silc_schedule_task_del(SilcSchedule schedule, SilcTask task)
779 schedule = silc_schedule_get_global();
780 SILC_VERIFY(schedule);
782 silc_set_errno(SILC_ERR_INVALID_ARGUMENT);
787 if (silc_unlikely(task == SILC_ALL_TASKS)) {
788 SilcHashTableList htl;
790 SILC_LOG_DEBUG(("Unregister all tasks"));
792 SILC_SCHEDULE_LOCK(schedule);
794 /* Delete from fd queue */
795 silc_hash_table_list(schedule->fd_queue, &htl);
796 while (silc_hash_table_get(&htl, NULL, (void *)&task)) {
799 /* Call notify callback */
800 if (schedule->notify)
801 schedule->notify(schedule, FALSE, task, TRUE,
802 ((SilcTaskFd)task)->fd, 0, 0, 0,
803 schedule->notify_context);
805 silc_hash_table_list_reset(&htl);
807 /* Delete from timeout queue */
808 silc_list_start(schedule->timeout_queue);
809 while ((task = (SilcTask)silc_list_get(schedule->timeout_queue))) {
812 /* Call notify callback */
813 if (schedule->notify)
814 schedule->notify(schedule, FALSE, task, FALSE, 0, 0, 0, 0,
815 schedule->notify_context);
818 SILC_SCHEDULE_UNLOCK(schedule);
822 SILC_LOG_DEBUG(("Unregistering task %p", task));
823 SILC_SCHEDULE_LOCK(schedule);
826 /* Call notify callback */
827 if (schedule->notify)
828 schedule->notify(schedule, FALSE, task, !task->type, 0, 0, 0, 0,
829 schedule->notify_context);
830 SILC_SCHEDULE_UNLOCK(schedule);
835 /* Invalidate task by fd */
837 SilcBool silc_schedule_task_del_by_fd(SilcSchedule schedule, SilcUInt32 fd)
839 SilcTask task = NULL;
840 SilcBool ret = FALSE;
842 SILC_LOG_DEBUG(("Unregister task by fd %d", fd));
845 schedule = silc_schedule_get_global();
846 SILC_VERIFY(schedule);
848 silc_set_errno(SILC_ERR_INVALID_ARGUMENT);
853 SILC_SCHEDULE_LOCK(schedule);
855 /* fd is unique, so there is only one task with this fd in the table */
856 if (silc_likely(silc_hash_table_find(schedule->fd_queue,
857 SILC_32_TO_PTR(fd), NULL,
859 SILC_LOG_DEBUG(("Deleting task %p", task));
862 /* Call notify callback */
863 if (schedule->notify)
864 schedule->notify(schedule, FALSE, task, TRUE, fd, 0, 0, 0,
865 schedule->notify_context);
869 SILC_SCHEDULE_UNLOCK(schedule);
871 /* If it is signal, remove it */
872 if (silc_unlikely(!task)) {
873 schedule_ops.signal_unregister(schedule, schedule->internal, fd);
878 silc_set_errno(SILC_ERR_NOT_FOUND);
883 /* Invalidate task by task callback. */
885 SilcBool silc_schedule_task_del_by_callback(SilcSchedule schedule,
886 SilcTaskCallback callback)
889 SilcHashTableList htl;
891 SilcBool ret = FALSE;
893 SILC_LOG_DEBUG(("Unregister task by callback"));
896 schedule = silc_schedule_get_global();
897 SILC_VERIFY(schedule);
899 silc_set_errno(SILC_ERR_INVALID_ARGUMENT);
904 SILC_SCHEDULE_LOCK(schedule);
906 /* Delete from fd queue */
907 silc_hash_table_list(schedule->fd_queue, &htl);
908 while (silc_hash_table_get(&htl, NULL, (void *)&task)) {
909 if (task->callback == callback) {
912 /* Call notify callback */
913 if (schedule->notify)
914 schedule->notify(schedule, FALSE, task, TRUE,
915 ((SilcTaskFd)task)->fd, 0, 0, 0,
916 schedule->notify_context);
920 silc_hash_table_list_reset(&htl);
922 /* Delete from timeout queue */
923 list = schedule->timeout_queue;
924 silc_list_start(list);
925 while ((task = (SilcTask)silc_list_get(list))) {
926 if (task->callback == callback) {
929 /* Call notify callback */
930 if (schedule->notify)
931 schedule->notify(schedule, FALSE, task, FALSE, 0, 0, 0, 0,
932 schedule->notify_context);
937 SILC_SCHEDULE_UNLOCK(schedule);
940 silc_set_errno(SILC_ERR_NOT_FOUND);
945 /* Invalidate task by context. */
947 SilcBool silc_schedule_task_del_by_context(SilcSchedule schedule,
951 SilcHashTableList htl;
953 SilcBool ret = FALSE;
955 SILC_LOG_DEBUG(("Unregister task by context"));
958 schedule = silc_schedule_get_global();
959 SILC_VERIFY(schedule);
961 silc_set_errno(SILC_ERR_INVALID_ARGUMENT);
966 SILC_SCHEDULE_LOCK(schedule);
968 /* Delete from fd queue */
969 silc_hash_table_list(schedule->fd_queue, &htl);
970 while (silc_hash_table_get(&htl, NULL, (void *)&task)) {
971 if (task->context == context) {
974 /* Call notify callback */
975 if (schedule->notify)
976 schedule->notify(schedule, FALSE, task, TRUE,
977 ((SilcTaskFd)task)->fd, 0, 0, 0,
978 schedule->notify_context);
982 silc_hash_table_list_reset(&htl);
984 /* Delete from timeout queue */
985 list = schedule->timeout_queue;
986 silc_list_start(list);
987 while ((task = (SilcTask)silc_list_get(list))) {
988 if (task->context == context) {
991 /* Call notify callback */
992 if (schedule->notify)
993 schedule->notify(schedule, FALSE, task, FALSE, 0, 0, 0, 0,
994 schedule->notify_context);
999 SILC_SCHEDULE_UNLOCK(schedule);
1002 silc_set_errno(SILC_ERR_NOT_FOUND);
1007 /* Invalidate task by all */
1009 SilcBool silc_schedule_task_del_by_all(SilcSchedule schedule, int fd,
1010 SilcTaskCallback callback,
1015 SilcBool ret = FALSE;
1017 SILC_LOG_DEBUG(("Unregister task by fd, callback and context"));
1019 /* For fd task, callback and context is irrelevant as fd is unique */
1021 return silc_schedule_task_del_by_fd(schedule, fd);
1024 schedule = silc_schedule_get_global();
1025 SILC_VERIFY(schedule);
1027 silc_set_errno(SILC_ERR_INVALID_ARGUMENT);
1032 SILC_SCHEDULE_LOCK(schedule);
1034 /* Delete from timeout queue */
1035 list = schedule->timeout_queue;
1036 silc_list_start(list);
1037 while ((task = (SilcTask)silc_list_get(list))) {
1038 if (task->callback == callback && task->context == context) {
1039 task->valid = FALSE;
1041 /* Call notify callback */
1042 if (schedule->notify)
1043 schedule->notify(schedule, FALSE, task, FALSE, 0, 0, 0, 0,
1044 schedule->notify_context);
1049 SILC_SCHEDULE_UNLOCK(schedule);
1052 silc_set_errno(SILC_ERR_NOT_FOUND);
1057 /* Sets a file descriptor to be listened by scheduler. One can call this
1058 directly if wanted. This can be called multiple times for one file
1059 descriptor to set different iomasks. */
1061 SilcBool silc_schedule_set_listen_fd(SilcSchedule schedule, SilcUInt32 fd,
1062 SilcTaskEvent mask, SilcBool send_events)
1067 schedule = silc_schedule_get_global();
1068 SILC_VERIFY(schedule);
1070 silc_set_errno(SILC_ERR_INVALID_ARGUMENT);
1075 if (silc_unlikely(!schedule->valid)) {
1076 silc_set_errno(SILC_ERR_NOT_VALID);
1080 SILC_SCHEDULE_LOCK(schedule);
1082 if (silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fd),
1083 NULL, (void *)&task)) {
1084 if (!schedule_ops.schedule_fd(schedule, schedule->internal, task, mask)) {
1085 SILC_SCHEDULE_UNLOCK(schedule);
1088 task->events = mask;
1089 if (silc_unlikely(send_events) && mask) {
1090 task->revents = mask;
1091 silc_schedule_dispatch_fd(schedule);
1094 /* Call notify callback */
1095 if (schedule->notify)
1096 schedule->notify(schedule, TRUE, (SilcTask)task,
1097 TRUE, task->fd, mask, 0, 0,
1098 schedule->notify_context);
1101 SILC_SCHEDULE_UNLOCK(schedule);
1106 /* Returns the file descriptor's current requested event mask. */
1108 SilcTaskEvent silc_schedule_get_fd_events(SilcSchedule schedule,
1112 SilcTaskEvent event = 0;
1115 schedule = silc_schedule_get_global();
1116 SILC_VERIFY(schedule);
1118 silc_set_errno(SILC_ERR_INVALID_ARGUMENT);
1123 if (silc_unlikely(!schedule->valid)) {
1124 silc_set_errno(SILC_ERR_NOT_VALID);
1128 SILC_SCHEDULE_LOCK(schedule);
1129 if (silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fd),
1130 NULL, (void *)&task))
1131 event = task->events;
1132 SILC_SCHEDULE_UNLOCK(schedule);
1137 /* Removes a file descriptor from listen list. */
1139 void silc_schedule_unset_listen_fd(SilcSchedule schedule, SilcUInt32 fd)
1141 silc_schedule_set_listen_fd(schedule, fd, 0, FALSE);