5 Author: Pekka Riikonen <priikone@silcnet.org>
7 Copyright (C) 1998 - 2006 Pekka Riikonen
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
23 /************************** Types and definitions ***************************/
25 /* Platform specific implementation */
26 extern const SilcScheduleOps schedule_ops;
28 static void silc_schedule_task_remove(SilcSchedule schedule, SilcTask task);
29 static void silc_schedule_dispatch_fd(SilcSchedule schedule);
30 static void silc_schedule_dispatch_timeout(SilcSchedule schedule,
31 SilcBool dispatch_all);
34 /************************ Static utility functions **************************/
36 /* Fd task hash table destructor */
38 static void silc_schedule_fd_destructor(void *key, void *context,
44 /* Executes file descriptor tasks. Invalid tasks are removed here. */
46 static void silc_schedule_dispatch_fd(SilcSchedule schedule)
48 SilcHashTableList htl;
53 silc_hash_table_list(schedule->fd_queue, &htl);
54 while (silc_likely(silc_hash_table_get(&htl, (void **)&fd,
58 if (silc_unlikely(!t->valid)) {
59 silc_schedule_task_remove(schedule, t);
62 if (!task->revents || !task->events)
65 /* Is the task ready for reading */
66 if (task->revents & SILC_TASK_READ) {
67 SILC_SCHEDULE_UNLOCK(schedule);
68 t->callback(schedule, schedule->app_context, SILC_TASK_READ,
69 task->fd, t->context);
70 SILC_SCHEDULE_LOCK(schedule);
73 /* Is the task ready for writing */
74 if (t->valid && task->revents & SILC_TASK_WRITE) {
75 SILC_SCHEDULE_UNLOCK(schedule);
76 t->callback(schedule, schedule->app_context, SILC_TASK_WRITE,
77 task->fd, t->context);
78 SILC_SCHEDULE_LOCK(schedule);
81 /* Remove if task was invalidated in the task callback */
82 if (silc_unlikely(!t->valid))
83 silc_schedule_task_remove(schedule, t);
85 silc_hash_table_list_reset(&htl);
88 /* Executes all tasks whose timeout has expired. The task is removed from
89 the task queue after the callback function has returned. Also, invalid
90 tasks are removed here. */
92 static void silc_schedule_dispatch_timeout(SilcSchedule schedule,
93 SilcBool dispatch_all)
97 struct timeval curtime;
100 SILC_LOG_DEBUG(("Running timeout tasks"));
102 silc_gettimeofday(&curtime);
104 /* First task in the task queue has always the earliest timeout. */
105 silc_list_start(schedule->timeout_queue);
106 task = silc_list_get(schedule->timeout_queue);
107 if (silc_unlikely(!task))
112 /* Remove invalid task */
113 if (silc_unlikely(!t->valid)) {
114 silc_schedule_task_remove(schedule, t);
118 /* Execute the task if the timeout has expired */
119 if (!silc_compare_timeval(&task->timeout, &curtime) && !dispatch_all)
123 SILC_SCHEDULE_UNLOCK(schedule);
124 t->callback(schedule, schedule->app_context, SILC_TASK_EXPIRE, 0,
126 SILC_SCHEDULE_LOCK(schedule);
128 /* Remove the expired task */
129 silc_schedule_task_remove(schedule, t);
131 /* Balance when we have lots of small timeouts */
132 if (silc_unlikely((++count) > 40))
134 } while (silc_likely((task = silc_list_get(schedule->timeout_queue))));
137 /* Calculates next timeout. This is the timeout value when at earliest some
138 of the timeout tasks expire. If this is in the past, they will be
141 static void silc_schedule_select_timeout(SilcSchedule schedule)
144 SilcTaskTimeout task;
145 struct timeval curtime;
146 SilcBool dispatch = TRUE;
148 /* Get the current time */
149 silc_gettimeofday(&curtime);
150 schedule->has_timeout = FALSE;
152 /* First task in the task queue has always the earliest timeout. */
153 silc_list_start(schedule->timeout_queue);
154 task = silc_list_get(schedule->timeout_queue);
155 if (silc_unlikely(!task))
160 /* Remove invalid task */
161 if (silc_unlikely(!t->valid)) {
162 silc_schedule_task_remove(schedule, t);
166 /* If the timeout is in past, we will run the task and all other
167 timeout tasks from the past. */
168 if (silc_compare_timeval(&task->timeout, &curtime) && dispatch) {
169 silc_schedule_dispatch_timeout(schedule, FALSE);
170 if (silc_unlikely(!schedule->valid))
173 /* Start selecting new timeout again after dispatch */
174 silc_list_start(schedule->timeout_queue);
179 /* Calculate the next timeout */
180 curtime.tv_sec = task->timeout.tv_sec - curtime.tv_sec;
181 curtime.tv_usec = task->timeout.tv_usec - curtime.tv_usec;
182 if (curtime.tv_sec < 0)
185 /* We wouldn't want to go under zero, check for it. */
186 if (curtime.tv_usec < 0) {
188 if (curtime.tv_sec < 0)
190 curtime.tv_usec += 1000000L;
193 } while ((task = silc_list_get(schedule->timeout_queue)));
195 /* Save the timeout */
197 schedule->timeout = curtime;
198 schedule->has_timeout = TRUE;
199 SILC_LOG_DEBUG(("timeout: sec=%d, usec=%d", schedule->timeout.tv_sec,
200 schedule->timeout.tv_usec));
204 /* Removes task from the scheduler. This must be called with scheduler
207 static void silc_schedule_task_remove(SilcSchedule schedule, SilcTask task)
211 if (silc_unlikely(task == SILC_ALL_TASKS)) {
213 SilcHashTableList htl;
216 /* Delete from fd queue */
217 silc_hash_table_list(schedule->fd_queue, &htl);
218 while (silc_hash_table_get(&htl, (void **)&fd, (void **)&task))
219 silc_hash_table_del(schedule->fd_queue, SILC_32_TO_PTR(fd));
220 silc_hash_table_list_reset(&htl);
222 /* Delete from timeout queue */
223 silc_list_start(schedule->timeout_queue);
224 while ((task = silc_list_get(schedule->timeout_queue))) {
225 silc_list_del(schedule->timeout_queue, task);
232 if (silc_likely(task->type == 1)) {
233 /* Delete from timeout queue */
234 silc_list_del(schedule->timeout_queue, task);
236 /* Put to free list */
237 silc_list_add(schedule->free_tasks, task);
239 /* Delete from fd queue */
240 ftask = (SilcTaskFd)task;
241 silc_hash_table_del(schedule->fd_queue, SILC_32_TO_PTR(ftask->fd));
245 /* Timeout freelist garbage collection */
247 SILC_TASK_CALLBACK(silc_schedule_timeout_gc)
252 if (!schedule->valid)
255 SILC_LOG_DEBUG(("Timeout freelist garbage collection"));
257 SILC_SCHEDULE_LOCK(schedule);
259 if (silc_list_count(schedule->free_tasks) <= 10) {
260 SILC_SCHEDULE_UNLOCK(schedule);
261 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
265 if (silc_list_count(schedule->timeout_queue) >
266 silc_list_count(schedule->free_tasks)) {
267 SILC_SCHEDULE_UNLOCK(schedule);
268 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
273 c = silc_list_count(schedule->free_tasks) / 2;
274 if (c > silc_list_count(schedule->timeout_queue))
275 c = (silc_list_count(schedule->free_tasks) -
276 silc_list_count(schedule->timeout_queue));
277 if (silc_list_count(schedule->free_tasks) - c < 10)
278 c -= (10 - (silc_list_count(schedule->free_tasks) - c));
280 SILC_LOG_DEBUG(("Freeing %d unused tasks, leaving %d", c,
281 silc_list_count(schedule->free_tasks) - c));
283 silc_list_start(schedule->free_tasks);
284 while ((t = silc_list_get(schedule->free_tasks)) && c-- > 0) {
285 silc_list_del(schedule->free_tasks, t);
288 silc_list_start(schedule->free_tasks);
290 SILC_SCHEDULE_UNLOCK(schedule);
292 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
296 #ifdef SILC_DIST_INPLACE
297 /* Print schedule statistics to stdout */
299 void silc_schedule_stats(SilcSchedule schedule)
302 fprintf(stdout, "Schedule %p statistics:\n\n", schedule);
303 fprintf(stdout, "Num FD tasks : %lu (%lu bytes allocated)\n",
304 silc_hash_table_count(schedule->fd_queue),
305 sizeof(*ftask) * silc_hash_table_count(schedule->fd_queue));
306 fprintf(stdout, "Num Timeout tasks : %d (%d bytes allocated)\n",
307 silc_list_count(schedule->timeout_queue),
308 sizeof(struct SilcTaskTimeoutStruct) *
309 silc_list_count(schedule->timeout_queue));
310 fprintf(stdout, "Num Timeout freelist : %d (%d bytes allocated)\n",
311 silc_list_count(schedule->free_tasks),
312 sizeof(struct SilcTaskTimeoutStruct) *
313 silc_list_count(schedule->free_tasks));
315 #endif /* SILC_DIST_INPLACE */
317 /****************************** Public API **********************************/
319 /* Initializes the scheduler. This returns the scheduler context that
320 is given as arugment usually to all silc_schedule_* functions.
321 The `max_tasks' indicates the number of maximum tasks that the
322 scheduler can handle. The `app_context' is application specific
323 context that is delivered to task callbacks. */
325 SilcSchedule silc_schedule_init(int max_tasks, void *app_context)
327 SilcSchedule schedule;
329 SILC_LOG_DEBUG(("Initializing scheduler"));
331 schedule = silc_calloc(1, sizeof(*schedule));
336 silc_hash_table_alloc(0, silc_hash_uint, NULL, NULL, NULL,
337 silc_schedule_fd_destructor, NULL, TRUE);
338 if (!schedule->fd_queue)
341 silc_list_init(schedule->timeout_queue, struct SilcTaskTimeoutStruct, next);
342 silc_list_init(schedule->free_tasks, struct SilcTaskTimeoutStruct, next);
344 schedule->app_context = app_context;
345 schedule->valid = TRUE;
346 schedule->max_tasks = max_tasks;
348 /* Allocate scheduler lock */
349 silc_mutex_alloc(&schedule->lock);
351 /* Initialize the platform specific scheduler. */
352 schedule->internal = schedule_ops.init(schedule, app_context);
354 /* Timeout freelist garbage collection */
355 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
361 /* Uninitializes the schedule. This is called when the program is ready
362 to end. This removes all tasks and task queues. Returns FALSE if the
363 scheduler could not be uninitialized. This happens when the scheduler
364 is still valid and silc_schedule_stop has not been called. */
366 SilcBool silc_schedule_uninit(SilcSchedule schedule)
370 SILC_LOG_DEBUG(("Uninitializing scheduler"));
372 if (schedule->valid == TRUE)
375 /* Dispatch all timeouts before going away */
376 SILC_SCHEDULE_LOCK(schedule);
377 silc_schedule_dispatch_timeout(schedule, TRUE);
378 SILC_SCHEDULE_UNLOCK(schedule);
380 /* Deliver signals before going away */
381 if (schedule->signal_tasks) {
382 schedule_ops.signals_call(schedule, schedule->internal);
383 schedule->signal_tasks = FALSE;
386 /* Unregister all tasks */
387 silc_schedule_task_del(schedule, SILC_ALL_TASKS);
388 silc_schedule_task_remove(schedule, SILC_ALL_TASKS);
390 /* Delete timeout task freelist */
391 silc_list_start(schedule->free_tasks);
392 while ((task = silc_list_get(schedule->free_tasks)))
395 /* Unregister all task queues */
396 silc_hash_table_free(schedule->fd_queue);
398 /* Uninit the platform specific scheduler. */
399 schedule_ops.uninit(schedule, schedule->internal);
401 silc_mutex_free(schedule->lock);
407 /* Stops the schedule even if it is not supposed to be stopped yet.
408 After calling this, one should call silc_schedule_uninit (after the
409 silc_schedule has returned). */
411 void silc_schedule_stop(SilcSchedule schedule)
413 SILC_LOG_DEBUG(("Stopping scheduler"));
414 SILC_SCHEDULE_LOCK(schedule);
415 schedule->valid = FALSE;
416 SILC_SCHEDULE_UNLOCK(schedule);
419 /* Runs the scheduler once and then returns. Must be called locked. */
421 static SilcBool silc_schedule_iterate(SilcSchedule schedule, int timeout_usecs)
423 struct timeval timeout;
427 SILC_LOG_DEBUG(("In scheduler loop"));
429 /* Deliver signals if any has been set to be called */
430 if (silc_unlikely(schedule->signal_tasks)) {
431 SILC_SCHEDULE_UNLOCK(schedule);
432 schedule_ops.signals_call(schedule, schedule->internal);
433 schedule->signal_tasks = FALSE;
434 SILC_SCHEDULE_LOCK(schedule);
437 /* Check if scheduler is valid */
438 if (silc_unlikely(schedule->valid == FALSE)) {
439 SILC_LOG_DEBUG(("Scheduler not valid anymore, exiting"));
443 /* Calculate next timeout for silc_select(). This is the timeout value
444 when at earliest some of the timeout tasks expire. This may dispatch
445 already expired timeouts. */
446 silc_schedule_select_timeout(schedule);
448 /* Check if scheduler is valid */
449 if (silc_unlikely(schedule->valid == FALSE)) {
450 SILC_LOG_DEBUG(("Scheduler not valid anymore, exiting"));
454 if (timeout_usecs >= 0) {
456 timeout.tv_usec = timeout_usecs;
457 schedule->timeout = timeout;
458 schedule->has_timeout = TRUE;
461 /* This is the main silc_select(). The program blocks here until some
462 of the selected file descriptors change status or the selected
464 SILC_LOG_DEBUG(("Select"));
465 ret = schedule_ops.select(schedule, schedule->internal);
467 if (silc_likely(ret == 0)) {
469 SILC_LOG_DEBUG(("Running timeout tasks"));
470 if (silc_likely(silc_list_count(schedule->timeout_queue)))
471 silc_schedule_dispatch_timeout(schedule, FALSE);
474 } else if (silc_likely(ret > 0)) {
475 /* There is some data available now */
476 SILC_LOG_DEBUG(("Running fd tasks"));
477 silc_schedule_dispatch_fd(schedule);
482 if (silc_likely(errno == EINTR))
484 SILC_LOG_ERROR(("Error in select()/poll(): %s", strerror(errno)));
487 } while (timeout_usecs == -1);
492 /* Runs the scheduler once and then returns. */
494 SilcBool silc_schedule_one(SilcSchedule schedule, int timeout_usecs)
497 SILC_SCHEDULE_LOCK(schedule);
498 ret = silc_schedule_iterate(schedule, timeout_usecs);
499 SILC_SCHEDULE_UNLOCK(schedule);
503 /* Runs the scheduler and blocks here. When this returns the scheduler
506 void silc_schedule(SilcSchedule schedule)
508 SILC_LOG_DEBUG(("Running scheduler"));
510 /* Start the scheduler loop */
511 SILC_SCHEDULE_LOCK(schedule);
512 silc_schedule_iterate(schedule, -1);
513 SILC_SCHEDULE_UNLOCK(schedule);
516 /* Wakes up the scheduler. This is used only in multi-threaded
517 environments where threads may add new tasks or remove old tasks
518 from task queues. This is called to wake up the scheduler in the
519 main thread so that it detects the changes in the task queues.
520 If threads support is not compiled in this function has no effect.
521 Implementation of this function is platform specific. */
523 void silc_schedule_wakeup(SilcSchedule schedule)
526 SILC_LOG_DEBUG(("Wakeup scheduler"));
527 SILC_SCHEDULE_LOCK(schedule);
528 schedule_ops.wakeup(schedule, schedule->internal);
529 SILC_SCHEDULE_UNLOCK(schedule);
533 /* Returns the application specific context that was saved into the
534 scheduler in silc_schedule_init function. The context is also
535 returned to application in task callback functions, but this function
536 may be used to get it as well if needed. */
538 void *silc_schedule_get_context(SilcSchedule schedule)
540 return schedule->app_context;
543 /* Add new task to the scheduler */
545 SilcTask silc_schedule_task_add(SilcSchedule schedule, SilcUInt32 fd,
546 SilcTaskCallback callback, void *context,
547 long seconds, long useconds,
550 SilcTask task = NULL;
552 if (silc_unlikely(!schedule->valid))
555 SILC_SCHEDULE_LOCK(schedule);
557 if (silc_likely(type == SILC_TASK_TIMEOUT)) {
558 SilcTaskTimeout tmp, prev, ttask;
561 silc_list_start(schedule->free_tasks);
562 ttask = silc_list_get(schedule->free_tasks);
563 if (silc_unlikely(!ttask)) {
564 ttask = silc_calloc(1, sizeof(*ttask));
565 if (silc_unlikely(!ttask))
568 silc_list_del(schedule->free_tasks, ttask);
570 ttask->header.type = 1;
571 ttask->header.callback = callback;
572 ttask->header.context = context;
573 ttask->header.valid = TRUE;
576 silc_gettimeofday(&ttask->timeout);
577 if ((seconds + useconds) > 0) {
578 ttask->timeout.tv_sec += seconds + (useconds / 1000000L);
579 ttask->timeout.tv_usec += (useconds % 1000000L);
580 if (ttask->timeout.tv_usec >= 1000000L) {
581 ttask->timeout.tv_sec += 1;
582 ttask->timeout.tv_usec -= 1000000L;
586 SILC_LOG_DEBUG(("New timeout task %p: sec=%d, usec=%d", ttask,
589 /* Add task to correct spot so that the first task in the list has
590 the earliest timeout. */
591 list = schedule->timeout_queue;
592 silc_list_start(list);
594 while ((tmp = silc_list_get(list)) != SILC_LIST_END) {
595 /* If we have shorter timeout, we have found our spot */
596 if (silc_compare_timeval(&ttask->timeout, &tmp->timeout)) {
597 silc_list_insert(schedule->timeout_queue, prev, ttask);
603 silc_list_add(schedule->timeout_queue, ttask);
605 task = (SilcTask)ttask;
607 } else if (silc_likely(type == SILC_TASK_FD)) {
608 /* Check if fd is already added */
609 if (silc_unlikely(silc_hash_table_find(schedule->fd_queue,
611 NULL, (void **)&task)))
614 /* Check max tasks */
615 if (silc_unlikely(schedule->max_tasks > 0 &&
616 silc_hash_table_count(schedule->fd_queue) >=
617 schedule->max_tasks)) {
618 SILC_LOG_WARNING(("Scheduler task limit reached: cannot add new task"));
622 SilcTaskFd ftask = silc_calloc(1, sizeof(*ftask));
623 if (silc_unlikely(!ftask))
626 SILC_LOG_DEBUG(("New fd task %p fd=%d", ftask, fd));
628 ftask->header.type = 0;
629 ftask->header.callback = callback;
630 ftask->header.context = context;
631 ftask->header.valid = TRUE;
632 ftask->events = SILC_TASK_READ;
636 silc_hash_table_add(schedule->fd_queue, SILC_32_TO_PTR(fd), ftask);
638 task = (SilcTask)ftask;
640 } else if (silc_unlikely(type == SILC_TASK_SIGNAL)) {
641 SILC_SCHEDULE_UNLOCK(schedule);
642 schedule_ops.signal_register(schedule, schedule->internal, fd,
648 SILC_SCHEDULE_UNLOCK(schedule);
652 /* Invalidates task */
654 void silc_schedule_task_del(SilcSchedule schedule, SilcTask task)
656 if (silc_unlikely(task == SILC_ALL_TASKS)) {
657 SilcHashTableList htl;
659 SILC_LOG_DEBUG(("Unregister all tasks"));
661 SILC_SCHEDULE_LOCK(schedule);
663 /* Delete from fd queue */
664 silc_hash_table_list(schedule->fd_queue, &htl);
665 while (silc_hash_table_get(&htl, NULL, (void **)&task))
667 silc_hash_table_list_reset(&htl);
669 /* Delete from timeout queue */
670 silc_list_start(schedule->timeout_queue);
671 while ((task = (SilcTask)silc_list_get(schedule->timeout_queue))
675 SILC_SCHEDULE_UNLOCK(schedule);
679 SILC_LOG_DEBUG(("Unregistering task %p", task));
680 SILC_SCHEDULE_LOCK(schedule);
682 SILC_SCHEDULE_UNLOCK(schedule);
685 /* Invalidate task by fd */
687 void silc_schedule_task_del_by_fd(SilcSchedule schedule, SilcUInt32 fd)
689 SilcTask task = NULL;
691 SILC_LOG_DEBUG(("Unregister task by fd %d", fd));
693 SILC_SCHEDULE_LOCK(schedule);
695 /* fd is unique, so there is only one task with this fd in the table */
696 if (silc_likely(silc_hash_table_find(schedule->fd_queue,
697 SILC_32_TO_PTR(fd), NULL,
701 SILC_SCHEDULE_UNLOCK(schedule);
703 /* If it is signal, remove it */
704 if (silc_unlikely(!task))
705 schedule_ops.signal_unregister(schedule, schedule->internal, fd);
708 /* Invalidate task by task callback. */
710 void silc_schedule_task_del_by_callback(SilcSchedule schedule,
711 SilcTaskCallback callback)
714 SilcHashTableList htl;
717 SILC_LOG_DEBUG(("Unregister task by callback"));
719 SILC_SCHEDULE_LOCK(schedule);
721 /* Delete from fd queue */
722 silc_hash_table_list(schedule->fd_queue, &htl);
723 while (silc_hash_table_get(&htl, NULL, (void **)&task)) {
724 if (task->callback == callback)
727 silc_hash_table_list_reset(&htl);
729 /* Delete from timeout queue */
730 list = schedule->timeout_queue;
731 silc_list_start(list);
732 while ((task = (SilcTask)silc_list_get(list))) {
733 if (task->callback == callback)
737 SILC_SCHEDULE_UNLOCK(schedule);
740 /* Invalidate task by context. */
742 void silc_schedule_task_del_by_context(SilcSchedule schedule, void *context)
745 SilcHashTableList htl;
748 SILC_LOG_DEBUG(("Unregister task by context"));
750 SILC_SCHEDULE_LOCK(schedule);
752 /* Delete from fd queue */
753 silc_hash_table_list(schedule->fd_queue, &htl);
754 while (silc_hash_table_get(&htl, NULL, (void **)&task)) {
755 if (task->context == context)
758 silc_hash_table_list_reset(&htl);
760 /* Delete from timeout queue */
761 list = schedule->timeout_queue;
762 silc_list_start(list);
763 while ((task = (SilcTask)silc_list_get(list))) {
764 if (task->context == context)
768 SILC_SCHEDULE_UNLOCK(schedule);
771 /* Invalidate task by all */
773 void silc_schedule_task_del_by_all(SilcSchedule schedule, int fd,
774 SilcTaskCallback callback, void *context)
779 SILC_LOG_DEBUG(("Unregister task by fd, callback and context"));
781 /* For fd task, callback and context is irrelevant as fd is unique */
783 silc_schedule_task_del_by_fd(schedule, fd);
785 SILC_SCHEDULE_LOCK(schedule);
787 /* Delete from timeout queue */
788 list = schedule->timeout_queue;
789 silc_list_start(list);
790 while ((task = (SilcTask)silc_list_get(list))) {
791 if (task->callback == callback && task->context == context)
795 SILC_SCHEDULE_UNLOCK(schedule);
798 /* Sets a file descriptor to be listened by scheduler. One can call this
799 directly if wanted. This can be called multiple times for one file
800 descriptor to set different iomasks. */
802 void silc_schedule_set_listen_fd(SilcSchedule schedule, SilcUInt32 fd,
803 SilcTaskEvent mask, SilcBool send_events)
807 if (silc_unlikely(!schedule->valid))
810 SILC_SCHEDULE_LOCK(schedule);
812 if (silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fd),
813 NULL, (void **)&task)) {
815 if (silc_unlikely(send_events)) {
816 task->revents = mask;
817 silc_schedule_dispatch_fd(schedule);
821 SILC_SCHEDULE_UNLOCK(schedule);
824 /* Removes a file descriptor from listen list. */
826 void silc_schedule_unset_listen_fd(SilcSchedule schedule, SilcUInt32 fd)
828 silc_schedule_set_listen_fd(schedule, fd, 0, FALSE);