5 Author: Pekka Riikonen <priikone@silcnet.org>
7 Copyright (C) 1998 - 2006 Pekka Riikonen
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
23 /************************** Types and definitions ***************************/
25 /* Platform specific implementation */
26 extern const SilcScheduleOps schedule_ops;
28 static void silc_schedule_task_remove(SilcSchedule schedule, SilcTask task);
29 static void silc_schedule_dispatch_fd(SilcSchedule schedule);
30 static void silc_schedule_dispatch_timeout(SilcSchedule schedule,
31 SilcBool dispatch_all);
34 /************************ Static utility functions **************************/
36 /* Fd task hash table destructor */
38 static void silc_schedule_fd_destructor(void *key, void *context,
44 /* Executes file descriptor tasks. Invalid tasks are removed here. */
46 static void silc_schedule_dispatch_fd(SilcSchedule schedule)
48 SilcHashTableList htl;
53 silc_hash_table_list(schedule->fd_queue, &htl);
54 while (silc_hash_table_get(&htl, (void **)&fd, (void **)&task)) {
57 if (silc_unlikely(!t->valid)) {
58 silc_schedule_task_remove(schedule, t);
61 if (!task->revents || !task->events)
64 /* Is the task ready for reading */
65 if (task->revents & SILC_TASK_READ) {
66 SILC_SCHEDULE_UNLOCK(schedule);
67 t->callback(schedule, schedule->app_context, SILC_TASK_READ,
68 task->fd, t->context);
69 SILC_SCHEDULE_LOCK(schedule);
72 /* Is the task ready for writing */
73 if (t->valid && task->revents & SILC_TASK_WRITE) {
74 SILC_SCHEDULE_UNLOCK(schedule);
75 t->callback(schedule, schedule->app_context, SILC_TASK_WRITE,
76 task->fd, t->context);
77 SILC_SCHEDULE_LOCK(schedule);
80 /* Remove if task was invalidated in the task callback */
81 if (silc_unlikely(!t->valid))
82 silc_schedule_task_remove(schedule, t);
84 silc_hash_table_list_reset(&htl);
87 /* Executes all tasks whose timeout has expired. The task is removed from
88 the task queue after the callback function has returned. Also, invalid
89 tasks are removed here. */
91 static void silc_schedule_dispatch_timeout(SilcSchedule schedule,
92 SilcBool dispatch_all)
96 struct timeval curtime;
99 SILC_LOG_DEBUG(("Running timeout tasks"));
101 silc_gettimeofday(&curtime);
103 /* First task in the task queue has always the earliest timeout. */
104 silc_list_start(schedule->timeout_queue);
105 task = silc_list_get(schedule->timeout_queue);
109 /* Remove invalid task */
110 if (silc_unlikely(!t->valid)) {
111 silc_schedule_task_remove(schedule, t);
115 SILC_SCHEDULE_UNLOCK(schedule);
117 /* Execute the task if the timeout has expired */
118 if (!silc_compare_timeval(&task->timeout, &curtime) && !dispatch_all) {
119 SILC_SCHEDULE_LOCK(schedule);
124 t->callback(schedule, schedule->app_context, SILC_TASK_EXPIRE, 0,
127 SILC_SCHEDULE_LOCK(schedule);
129 /* Remove the expired task */
130 silc_schedule_task_remove(schedule, t);
132 /* Balance when we have lots of small timeouts */
133 if (silc_unlikely((++count) > 40))
135 } while (silc_likely((task = silc_list_get(schedule->timeout_queue))));
138 /* Calculates next timeout. This is the timeout value when at earliest some
139 of the timeout tasks expire. If this is in the past, they will be
142 static void silc_schedule_select_timeout(SilcSchedule schedule)
145 SilcTaskTimeout task;
146 struct timeval curtime;
147 SilcBool dispatch = TRUE;
149 /* Get the current time */
150 silc_gettimeofday(&curtime);
151 schedule->has_timeout = FALSE;
153 /* First task in the task queue has always the earliest timeout. */
154 silc_list_start(schedule->timeout_queue);
155 task = silc_list_get(schedule->timeout_queue);
159 /* Remove invalid task */
160 if (silc_unlikely(!t->valid)) {
161 silc_schedule_task_remove(schedule, t);
165 /* If the timeout is in past, we will run the task and all other
166 timeout tasks from the past. */
167 if (silc_compare_timeval(&task->timeout, &curtime) && dispatch) {
168 silc_schedule_dispatch_timeout(schedule, FALSE);
169 if (silc_unlikely(!schedule->valid))
172 /* Start selecting new timeout again after dispatch */
173 silc_list_start(schedule->timeout_queue);
178 /* Calculate the next timeout */
179 curtime.tv_sec = task->timeout.tv_sec - curtime.tv_sec;
180 curtime.tv_usec = task->timeout.tv_usec - curtime.tv_usec;
181 if (curtime.tv_sec < 0)
184 /* We wouldn't want to go under zero, check for it. */
185 if (curtime.tv_usec < 0) {
187 if (curtime.tv_sec < 0)
189 curtime.tv_usec += 1000000L;
192 } while ((task = silc_list_get(schedule->timeout_queue)));
194 /* Save the timeout */
196 schedule->timeout = curtime;
197 schedule->has_timeout = TRUE;
198 SILC_LOG_DEBUG(("timeout: sec=%d, usec=%d", schedule->timeout.tv_sec,
199 schedule->timeout.tv_usec));
203 /* Removes task from the scheduler. This must be called with scheduler
206 static void silc_schedule_task_remove(SilcSchedule schedule, SilcTask task)
210 if (silc_unlikely(task == SILC_ALL_TASKS)) {
212 SilcHashTableList htl;
215 /* Delete from fd queue */
216 silc_hash_table_list(schedule->fd_queue, &htl);
217 while (silc_hash_table_get(&htl, (void **)&fd, (void **)&task))
218 silc_hash_table_del(schedule->fd_queue, SILC_32_TO_PTR(fd));
219 silc_hash_table_list_reset(&htl);
221 /* Delete from timeout queue */
222 silc_list_start(schedule->timeout_queue);
223 while ((task = silc_list_get(schedule->timeout_queue))) {
224 silc_list_del(schedule->timeout_queue, task);
231 if (silc_likely(task->type == 1)) {
232 /* Delete from timeout queue */
233 silc_list_del(schedule->timeout_queue, task);
235 /* Put to free list */
236 silc_list_add(schedule->free_tasks, task);
238 /* Delete from fd queue */
239 ftask = (SilcTaskFd)task;
240 silc_hash_table_del(schedule->fd_queue, SILC_32_TO_PTR(ftask->fd));
244 /* Timeout freelist garbage collection */
246 SILC_TASK_CALLBACK(silc_schedule_timeout_gc)
251 if (!schedule->valid)
254 SILC_LOG_DEBUG(("Timeout freelist garbage collection"));
256 SILC_SCHEDULE_LOCK(schedule);
258 if (silc_list_count(schedule->free_tasks) <= 10) {
259 SILC_SCHEDULE_UNLOCK(schedule);
260 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
264 if (silc_list_count(schedule->timeout_queue) >
265 silc_list_count(schedule->free_tasks)) {
266 SILC_SCHEDULE_UNLOCK(schedule);
267 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
272 c = silc_list_count(schedule->free_tasks) / 2;
273 if (c > silc_list_count(schedule->timeout_queue))
274 c = (silc_list_count(schedule->free_tasks) -
275 silc_list_count(schedule->timeout_queue));
276 if (silc_list_count(schedule->free_tasks) - c < 10)
277 c -= (10 - (silc_list_count(schedule->free_tasks) - c));
279 SILC_LOG_DEBUG(("Freeing %d unused tasks, leaving %d", c,
280 silc_list_count(schedule->free_tasks) - c));
282 silc_list_start(schedule->free_tasks);
283 while ((t = silc_list_get(schedule->free_tasks)) && c-- > 0) {
284 silc_list_del(schedule->free_tasks, t);
287 silc_list_start(schedule->free_tasks);
289 SILC_SCHEDULE_UNLOCK(schedule);
291 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
295 #ifdef SILC_DIST_INPLACE
296 /* Print schedule statistics to stdout */
298 void silc_schedule_stats(SilcSchedule schedule)
301 fprintf(stdout, "Schedule %p statistics:\n\n", schedule);
302 fprintf(stdout, "Num FD tasks : %lu (%lu bytes allocated)\n",
303 silc_hash_table_count(schedule->fd_queue),
304 sizeof(*ftask) * silc_hash_table_count(schedule->fd_queue));
305 fprintf(stdout, "Num Timeout tasks : %d (%d bytes allocated)\n",
306 silc_list_count(schedule->timeout_queue),
307 sizeof(struct SilcTaskTimeoutStruct) *
308 silc_list_count(schedule->timeout_queue));
309 fprintf(stdout, "Num Timeout freelist : %d (%d bytes allocated)\n",
310 silc_list_count(schedule->free_tasks),
311 sizeof(struct SilcTaskTimeoutStruct) *
312 silc_list_count(schedule->free_tasks));
314 #endif /* SILC_DIST_INPLACE */
316 /****************************** Public API **********************************/
318 /* Initializes the scheduler. This returns the scheduler context that
319 is given as arugment usually to all silc_schedule_* functions.
320 The `max_tasks' indicates the number of maximum tasks that the
321 scheduler can handle. The `app_context' is application specific
322 context that is delivered to task callbacks. */
324 SilcSchedule silc_schedule_init(int max_tasks, void *app_context)
326 SilcSchedule schedule;
328 SILC_LOG_DEBUG(("Initializing scheduler"));
330 schedule = silc_calloc(1, sizeof(*schedule));
335 silc_hash_table_alloc(0, silc_hash_uint, NULL, NULL, NULL,
336 silc_schedule_fd_destructor, NULL, TRUE);
337 if (!schedule->fd_queue)
340 silc_list_init(schedule->timeout_queue, struct SilcTaskTimeoutStruct, next);
341 silc_list_init(schedule->free_tasks, struct SilcTaskTimeoutStruct, next);
343 schedule->app_context = app_context;
344 schedule->valid = TRUE;
345 schedule->max_tasks = max_tasks;
347 /* Allocate scheduler lock */
348 silc_mutex_alloc(&schedule->lock);
350 /* Initialize the platform specific scheduler. */
351 schedule->internal = schedule_ops.init(schedule, app_context);
353 /* Timeout freelist garbage collection */
354 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
360 /* Uninitializes the schedule. This is called when the program is ready
361 to end. This removes all tasks and task queues. Returns FALSE if the
362 scheduler could not be uninitialized. This happens when the scheduler
363 is still valid and silc_schedule_stop has not been called. */
365 SilcBool silc_schedule_uninit(SilcSchedule schedule)
369 SILC_LOG_DEBUG(("Uninitializing scheduler"));
371 if (schedule->valid == TRUE)
374 /* Dispatch all timeouts before going away */
375 SILC_SCHEDULE_LOCK(schedule);
376 silc_schedule_dispatch_timeout(schedule, TRUE);
377 SILC_SCHEDULE_UNLOCK(schedule);
379 /* Deliver signals before going away */
380 if (schedule->signal_tasks) {
381 schedule_ops.signals_call(schedule, schedule->internal);
382 schedule->signal_tasks = FALSE;
385 /* Unregister all tasks */
386 silc_schedule_task_del(schedule, SILC_ALL_TASKS);
387 silc_schedule_task_remove(schedule, SILC_ALL_TASKS);
389 /* Delete timeout task freelist */
390 silc_list_start(schedule->free_tasks);
391 while ((task = silc_list_get(schedule->free_tasks)))
394 /* Unregister all task queues */
395 silc_hash_table_free(schedule->fd_queue);
397 /* Uninit the platform specific scheduler. */
398 schedule_ops.uninit(schedule, schedule->internal);
400 silc_mutex_free(schedule->lock);
406 /* Stops the schedule even if it is not supposed to be stopped yet.
407 After calling this, one should call silc_schedule_uninit (after the
408 silc_schedule has returned). */
410 void silc_schedule_stop(SilcSchedule schedule)
412 SILC_LOG_DEBUG(("Stopping scheduler"));
413 SILC_SCHEDULE_LOCK(schedule);
414 schedule->valid = FALSE;
415 SILC_SCHEDULE_UNLOCK(schedule);
418 /* Runs the scheduler once and then returns. Must be called locked. */
420 static SilcBool silc_schedule_iterate(SilcSchedule schedule, int timeout_usecs)
422 struct timeval timeout;
426 SILC_LOG_DEBUG(("In scheduler loop"));
428 /* Deliver signals if any has been set to be called */
429 if (silc_unlikely(schedule->signal_tasks)) {
430 SILC_SCHEDULE_UNLOCK(schedule);
431 schedule_ops.signals_call(schedule, schedule->internal);
432 schedule->signal_tasks = FALSE;
433 SILC_SCHEDULE_LOCK(schedule);
436 /* Check if scheduler is valid */
437 if (silc_unlikely(schedule->valid == FALSE)) {
438 SILC_LOG_DEBUG(("Scheduler not valid anymore, exiting"));
442 /* Calculate next timeout for silc_select(). This is the timeout value
443 when at earliest some of the timeout tasks expire. This may dispatch
444 already expired timeouts. */
445 silc_schedule_select_timeout(schedule);
447 /* Check if scheduler is valid */
448 if (silc_unlikely(schedule->valid == FALSE)) {
449 SILC_LOG_DEBUG(("Scheduler not valid anymore, exiting"));
453 if (timeout_usecs >= 0) {
455 timeout.tv_usec = timeout_usecs;
456 schedule->timeout = timeout;
457 schedule->has_timeout = TRUE;
460 /* This is the main silc_select(). The program blocks here until some
461 of the selected file descriptors change status or the selected
463 SILC_LOG_DEBUG(("Select"));
464 ret = schedule_ops.select(schedule, schedule->internal);
466 if (silc_likely(ret == 0)) {
468 SILC_LOG_DEBUG(("Running timeout tasks"));
469 if (silc_likely(silc_list_count(schedule->timeout_queue)))
470 silc_schedule_dispatch_timeout(schedule, FALSE);
473 } else if (silc_likely(ret > 0)) {
474 /* There is some data available now */
475 SILC_LOG_DEBUG(("Running fd tasks"));
476 silc_schedule_dispatch_fd(schedule);
481 if (silc_likely(errno == EINTR))
483 SILC_LOG_ERROR(("Error in select()/poll(): %s", strerror(errno)));
486 } while (timeout_usecs == -1);
491 /* Runs the scheduler once and then returns. */
493 SilcBool silc_schedule_one(SilcSchedule schedule, int timeout_usecs)
496 SILC_SCHEDULE_LOCK(schedule);
497 ret = silc_schedule_iterate(schedule, timeout_usecs);
498 SILC_SCHEDULE_UNLOCK(schedule);
502 /* Runs the scheduler and blocks here. When this returns the scheduler
505 void silc_schedule(SilcSchedule schedule)
507 SILC_LOG_DEBUG(("Running scheduler"));
509 /* Start the scheduler loop */
510 SILC_SCHEDULE_LOCK(schedule);
511 silc_schedule_iterate(schedule, -1);
512 SILC_SCHEDULE_UNLOCK(schedule);
515 /* Wakes up the scheduler. This is used only in multi-threaded
516 environments where threads may add new tasks or remove old tasks
517 from task queues. This is called to wake up the scheduler in the
518 main thread so that it detects the changes in the task queues.
519 If threads support is not compiled in this function has no effect.
520 Implementation of this function is platform specific. */
522 void silc_schedule_wakeup(SilcSchedule schedule)
525 SILC_LOG_DEBUG(("Wakeup scheduler"));
526 SILC_SCHEDULE_LOCK(schedule);
527 schedule_ops.wakeup(schedule, schedule->internal);
528 SILC_SCHEDULE_UNLOCK(schedule);
532 /* Returns the application specific context that was saved into the
533 scheduler in silc_schedule_init function. The context is also
534 returned to application in task callback functions, but this function
535 may be used to get it as well if needed. */
537 void *silc_schedule_get_context(SilcSchedule schedule)
539 return schedule->app_context;
542 /* Add new task to the scheduler */
544 SilcTask silc_schedule_task_add(SilcSchedule schedule, SilcUInt32 fd,
545 SilcTaskCallback callback, void *context,
546 long seconds, long useconds,
549 SilcTask task = NULL;
551 if (silc_unlikely(!schedule->valid))
554 SILC_SCHEDULE_LOCK(schedule);
556 if (silc_likely(type == SILC_TASK_TIMEOUT)) {
557 SilcTaskTimeout tmp, prev, ttask;
560 silc_list_start(schedule->free_tasks);
561 ttask = silc_list_get(schedule->free_tasks);
562 if (silc_unlikely(!ttask)) {
563 ttask = silc_calloc(1, sizeof(*ttask));
564 if (silc_unlikely(!ttask))
567 silc_list_del(schedule->free_tasks, ttask);
569 ttask->header.type = 1;
570 ttask->header.callback = callback;
571 ttask->header.context = context;
572 ttask->header.valid = TRUE;
575 silc_gettimeofday(&ttask->timeout);
576 if ((seconds + useconds) > 0) {
577 ttask->timeout.tv_sec += seconds + (useconds / 1000000L);
578 ttask->timeout.tv_usec += (useconds % 1000000L);
579 if (ttask->timeout.tv_usec >= 1000000L) {
580 ttask->timeout.tv_sec += 1;
581 ttask->timeout.tv_usec -= 1000000L;
585 SILC_LOG_DEBUG(("New timeout task %p: sec=%d, usec=%d", ttask,
588 /* Add task to correct spot so that the first task in the list has
589 the earliest timeout. */
590 list = schedule->timeout_queue;
591 silc_list_start(list);
593 while ((tmp = silc_list_get(list)) != SILC_LIST_END) {
594 /* If we have shorter timeout, we have found our spot */
595 if (silc_compare_timeval(&ttask->timeout, &tmp->timeout)) {
596 silc_list_insert(schedule->timeout_queue, prev, ttask);
602 silc_list_add(schedule->timeout_queue, ttask);
604 task = (SilcTask)ttask;
606 } else if (silc_likely(type == SILC_TASK_FD)) {
607 /* Check if fd is already added */
608 if (silc_unlikely(silc_hash_table_find(schedule->fd_queue,
610 NULL, (void **)&task)))
613 /* Check max tasks */
614 if (silc_unlikely(schedule->max_tasks > 0 &&
615 silc_hash_table_count(schedule->fd_queue) >=
616 schedule->max_tasks)) {
617 SILC_LOG_WARNING(("Scheduler task limit reached: cannot add new task"));
621 SilcTaskFd ftask = silc_calloc(1, sizeof(*ftask));
622 if (silc_unlikely(!ftask))
625 SILC_LOG_DEBUG(("New fd task %p fd=%d", ftask, fd));
627 ftask->header.type = 0;
628 ftask->header.callback = callback;
629 ftask->header.context = context;
630 ftask->header.valid = TRUE;
631 ftask->events = SILC_TASK_READ;
635 silc_hash_table_add(schedule->fd_queue, SILC_32_TO_PTR(fd), ftask);
637 task = (SilcTask)ftask;
639 } else if (silc_unlikely(type == SILC_TASK_SIGNAL)) {
640 SILC_SCHEDULE_UNLOCK(schedule);
641 schedule_ops.signal_register(schedule, schedule->internal, fd,
647 SILC_SCHEDULE_UNLOCK(schedule);
651 /* Invalidates task */
653 void silc_schedule_task_del(SilcSchedule schedule, SilcTask task)
655 if (silc_unlikely(task == SILC_ALL_TASKS)) {
656 SilcHashTableList htl;
658 SILC_LOG_DEBUG(("Unregister all tasks"));
660 SILC_SCHEDULE_LOCK(schedule);
662 /* Delete from fd queue */
663 silc_hash_table_list(schedule->fd_queue, &htl);
664 while (silc_hash_table_get(&htl, NULL, (void **)&task))
666 silc_hash_table_list_reset(&htl);
668 /* Delete from timeout queue */
669 silc_list_start(schedule->timeout_queue);
670 while ((task = (SilcTask)silc_list_get(schedule->timeout_queue))
674 SILC_SCHEDULE_UNLOCK(schedule);
678 SILC_LOG_DEBUG(("Unregistering task %p", task));
679 SILC_SCHEDULE_LOCK(schedule);
681 SILC_SCHEDULE_UNLOCK(schedule);
684 /* Invalidate task by fd */
686 void silc_schedule_task_del_by_fd(SilcSchedule schedule, SilcUInt32 fd)
688 SilcTask task = NULL;
690 SILC_LOG_DEBUG(("Unregister task by fd %d", fd));
692 SILC_SCHEDULE_LOCK(schedule);
694 /* fd is unique, so there is only one task with this fd in the table */
695 if (silc_likely(silc_hash_table_find(schedule->fd_queue,
696 SILC_32_TO_PTR(fd), NULL,
700 SILC_SCHEDULE_UNLOCK(schedule);
702 /* If it is signal, remove it */
703 if (silc_unlikely(!task))
704 schedule_ops.signal_unregister(schedule, schedule->internal, fd);
707 /* Invalidate task by task callback. */
709 void silc_schedule_task_del_by_callback(SilcSchedule schedule,
710 SilcTaskCallback callback)
713 SilcHashTableList htl;
716 SILC_LOG_DEBUG(("Unregister task by callback"));
718 SILC_SCHEDULE_LOCK(schedule);
720 /* Delete from fd queue */
721 silc_hash_table_list(schedule->fd_queue, &htl);
722 while (silc_hash_table_get(&htl, NULL, (void **)&task)) {
723 if (task->callback == callback)
726 silc_hash_table_list_reset(&htl);
728 /* Delete from timeout queue */
729 list = schedule->timeout_queue;
730 silc_list_start(list);
731 while ((task = (SilcTask)silc_list_get(list))) {
732 if (task->callback == callback)
736 SILC_SCHEDULE_UNLOCK(schedule);
739 /* Invalidate task by context. */
741 void silc_schedule_task_del_by_context(SilcSchedule schedule, void *context)
744 SilcHashTableList htl;
747 SILC_LOG_DEBUG(("Unregister task by context"));
749 SILC_SCHEDULE_LOCK(schedule);
751 /* Delete from fd queue */
752 silc_hash_table_list(schedule->fd_queue, &htl);
753 while (silc_hash_table_get(&htl, NULL, (void **)&task)) {
754 if (task->context == context)
757 silc_hash_table_list_reset(&htl);
759 /* Delete from timeout queue */
760 list = schedule->timeout_queue;
761 silc_list_start(list);
762 while ((task = (SilcTask)silc_list_get(list))) {
763 if (task->context == context)
767 SILC_SCHEDULE_UNLOCK(schedule);
770 /* Invalidate task by all */
772 void silc_schedule_task_del_by_all(SilcSchedule schedule, int fd,
773 SilcTaskCallback callback, void *context)
778 SILC_LOG_DEBUG(("Unregister task by fd, callback and context"));
780 /* For fd task, callback and context is irrelevant as fd is unique */
782 silc_schedule_task_del_by_fd(schedule, fd);
784 SILC_SCHEDULE_LOCK(schedule);
786 /* Delete from timeout queue */
787 list = schedule->timeout_queue;
788 silc_list_start(list);
789 while ((task = (SilcTask)silc_list_get(list))) {
790 if (task->callback == callback && task->context == context)
794 SILC_SCHEDULE_UNLOCK(schedule);
797 /* Sets a file descriptor to be listened by scheduler. One can call this
798 directly if wanted. This can be called multiple times for one file
799 descriptor to set different iomasks. */
801 void silc_schedule_set_listen_fd(SilcSchedule schedule, SilcUInt32 fd,
802 SilcTaskEvent mask, SilcBool send_events)
806 if (silc_unlikely(!schedule->valid))
809 SILC_SCHEDULE_LOCK(schedule);
811 if (silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fd),
812 NULL, (void **)&task)) {
814 if (silc_unlikely(send_events)) {
815 task->revents = mask;
816 silc_schedule_dispatch_fd(schedule);
820 SILC_SCHEDULE_UNLOCK(schedule);
823 /* Removes a file descriptor from listen list. */
825 void silc_schedule_unset_listen_fd(SilcSchedule schedule, SilcUInt32 fd)
827 silc_schedule_set_listen_fd(schedule, fd, 0, FALSE);