5 Author: Pekka Riikonen <priikone@silcnet.org>
7 Copyright (C) 1998 - 2007 Pekka Riikonen
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
23 /************************** Types and definitions ***************************/
25 /* Platform specific implementation */
26 extern const SilcScheduleOps schedule_ops;
28 static void silc_schedule_task_remove(SilcSchedule schedule, SilcTask task);
29 static void silc_schedule_dispatch_fd(SilcSchedule schedule);
30 static void silc_schedule_dispatch_timeout(SilcSchedule schedule,
31 SilcBool dispatch_all);
34 /************************ Static utility functions **************************/
36 /* Fd task hash table destructor */
38 static void silc_schedule_fd_destructor(void *key, void *context,
44 /* Executes file descriptor tasks. Invalid tasks are removed here. */
46 static void silc_schedule_dispatch_fd(SilcSchedule schedule)
51 /* The dispatch list includes only valid tasks, and tasks that have
52 something to dispatch. Dispatching is atomic; no matter if another
53 thread invalidates a task when we unlock, we dispatch to completion. */
54 SILC_SCHEDULE_UNLOCK(schedule);
55 silc_list_start(schedule->fd_dispatch);
56 while ((task = silc_list_get(schedule->fd_dispatch))) {
59 /* Is the task ready for reading */
60 if (task->revents & SILC_TASK_READ)
61 t->callback(schedule, schedule->app_context, SILC_TASK_READ,
62 task->fd, t->context);
64 /* Is the task ready for writing */
65 if (t->valid && task->revents & SILC_TASK_WRITE)
66 t->callback(schedule, schedule->app_context, SILC_TASK_WRITE,
67 task->fd, t->context);
69 SILC_SCHEDULE_LOCK(schedule);
71 /* Remove invalidated tasks */
72 silc_list_start(schedule->fd_dispatch);
73 while ((task = silc_list_get(schedule->fd_dispatch)))
74 if (silc_unlikely(!task->header.valid))
75 silc_schedule_task_remove(schedule, (SilcTask)task);
78 /* Executes all tasks whose timeout has expired. The task is removed from
79 the task queue after the callback function has returned. Also, invalid
80 tasks are removed here. */
82 static void silc_schedule_dispatch_timeout(SilcSchedule schedule,
83 SilcBool dispatch_all)
87 struct timeval curtime;
90 SILC_LOG_DEBUG(("Running timeout tasks"));
92 silc_gettimeofday(&curtime);
94 /* First task in the task queue has always the earliest timeout. */
95 silc_list_start(schedule->timeout_queue);
96 task = silc_list_get(schedule->timeout_queue);
97 if (silc_unlikely(!task))
102 /* Remove invalid task */
103 if (silc_unlikely(!t->valid)) {
104 silc_schedule_task_remove(schedule, t);
108 /* Execute the task if the timeout has expired */
109 if (!silc_compare_timeval(&task->timeout, &curtime) && !dispatch_all)
113 SILC_SCHEDULE_UNLOCK(schedule);
114 t->callback(schedule, schedule->app_context, SILC_TASK_EXPIRE, 0,
116 SILC_SCHEDULE_LOCK(schedule);
118 /* Remove the expired task */
119 silc_schedule_task_remove(schedule, t);
121 /* Balance when we have lots of small timeouts */
122 if (silc_unlikely((++count) > 40))
124 } while (silc_likely((task = silc_list_get(schedule->timeout_queue))));
127 /* Calculates next timeout. This is the timeout value when at earliest some
128 of the timeout tasks expire. If this is in the past, they will be
131 static void silc_schedule_select_timeout(SilcSchedule schedule)
134 SilcTaskTimeout task;
135 struct timeval curtime;
136 SilcBool dispatch = TRUE;
138 /* Get the current time */
139 silc_gettimeofday(&curtime);
140 schedule->has_timeout = FALSE;
142 /* First task in the task queue has always the earliest timeout. */
143 silc_list_start(schedule->timeout_queue);
144 task = silc_list_get(schedule->timeout_queue);
145 if (silc_unlikely(!task))
150 /* Remove invalid task */
151 if (silc_unlikely(!t->valid)) {
152 silc_schedule_task_remove(schedule, t);
156 /* If the timeout is in past, we will run the task and all other
157 timeout tasks from the past. */
158 if (silc_compare_timeval(&task->timeout, &curtime) && dispatch) {
159 silc_schedule_dispatch_timeout(schedule, FALSE);
160 if (silc_unlikely(!schedule->valid))
163 /* Start selecting new timeout again after dispatch */
164 silc_list_start(schedule->timeout_queue);
169 /* Calculate the next timeout */
170 curtime.tv_sec = task->timeout.tv_sec - curtime.tv_sec;
171 curtime.tv_usec = task->timeout.tv_usec - curtime.tv_usec;
172 if (curtime.tv_sec < 0)
175 /* We wouldn't want to go under zero, check for it. */
176 if (curtime.tv_usec < 0) {
178 if (curtime.tv_sec < 0)
180 curtime.tv_usec += 1000000L;
183 } while ((task = silc_list_get(schedule->timeout_queue)));
185 /* Save the timeout */
187 schedule->timeout = curtime;
188 schedule->has_timeout = TRUE;
189 SILC_LOG_DEBUG(("timeout: sec=%d, usec=%d", schedule->timeout.tv_sec,
190 schedule->timeout.tv_usec));
194 /* Removes task from the scheduler. This must be called with scheduler
197 static void silc_schedule_task_remove(SilcSchedule schedule, SilcTask task)
201 if (silc_unlikely(task == SILC_ALL_TASKS)) {
203 SilcHashTableList htl;
206 /* Delete from fd queue */
207 silc_hash_table_list(schedule->fd_queue, &htl);
208 while (silc_hash_table_get(&htl, (void *)&fd, (void *)&task))
209 silc_hash_table_del(schedule->fd_queue, SILC_32_TO_PTR(fd));
210 silc_hash_table_list_reset(&htl);
212 /* Delete from timeout queue */
213 silc_list_start(schedule->timeout_queue);
214 while ((task = silc_list_get(schedule->timeout_queue))) {
215 silc_list_del(schedule->timeout_queue, task);
222 if (silc_likely(task->type == 1)) {
223 /* Delete from timeout queue */
224 silc_list_del(schedule->timeout_queue, task);
226 /* Put to free list */
227 silc_list_add(schedule->free_tasks, task);
229 /* Delete from fd queue */
230 ftask = (SilcTaskFd)task;
231 silc_hash_table_del(schedule->fd_queue, SILC_32_TO_PTR(ftask->fd));
235 /* Timeout freelist garbage collection */
237 SILC_TASK_CALLBACK(silc_schedule_timeout_gc)
242 if (!schedule->valid)
245 SILC_LOG_DEBUG(("Timeout freelist garbage collection"));
247 SILC_SCHEDULE_LOCK(schedule);
249 if (silc_list_count(schedule->free_tasks) <= 10) {
250 SILC_SCHEDULE_UNLOCK(schedule);
251 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
255 if (silc_list_count(schedule->timeout_queue) >
256 silc_list_count(schedule->free_tasks)) {
257 SILC_SCHEDULE_UNLOCK(schedule);
258 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
263 c = silc_list_count(schedule->free_tasks) / 2;
264 if (c > silc_list_count(schedule->timeout_queue))
265 c = (silc_list_count(schedule->free_tasks) -
266 silc_list_count(schedule->timeout_queue));
267 if (silc_list_count(schedule->free_tasks) - c < 10)
268 c -= (10 - (silc_list_count(schedule->free_tasks) - c));
270 SILC_LOG_DEBUG(("Freeing %d unused tasks, leaving %d", c,
271 silc_list_count(schedule->free_tasks) - c));
273 silc_list_start(schedule->free_tasks);
274 while ((t = silc_list_get(schedule->free_tasks)) && c-- > 0) {
275 silc_list_del(schedule->free_tasks, t);
278 silc_list_start(schedule->free_tasks);
280 SILC_SCHEDULE_UNLOCK(schedule);
282 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
286 #ifdef SILC_DIST_INPLACE
287 /* Print schedule statistics to stdout */
289 void silc_schedule_stats(SilcSchedule schedule)
292 fprintf(stdout, "Schedule %p statistics:\n\n", schedule);
293 fprintf(stdout, "Num FD tasks : %lu (%lu bytes allocated)\n",
294 silc_hash_table_count(schedule->fd_queue),
295 sizeof(*ftask) * silc_hash_table_count(schedule->fd_queue));
296 fprintf(stdout, "Num Timeout tasks : %d (%d bytes allocated)\n",
297 silc_list_count(schedule->timeout_queue),
298 sizeof(struct SilcTaskTimeoutStruct) *
299 silc_list_count(schedule->timeout_queue));
300 fprintf(stdout, "Num Timeout freelist : %d (%d bytes allocated)\n",
301 silc_list_count(schedule->free_tasks),
302 sizeof(struct SilcTaskTimeoutStruct) *
303 silc_list_count(schedule->free_tasks));
305 #endif /* SILC_DIST_INPLACE */
307 /****************************** Public API **********************************/
309 /* Initializes the scheduler. This returns the scheduler context that
310 is given as arugment usually to all silc_schedule_* functions.
311 The `max_tasks' indicates the number of maximum tasks that the
312 scheduler can handle. The `app_context' is application specific
313 context that is delivered to task callbacks. */
315 SilcSchedule silc_schedule_init(int max_tasks, void *app_context)
317 SilcSchedule schedule;
319 SILC_LOG_DEBUG(("Initializing scheduler"));
321 schedule = silc_calloc(1, sizeof(*schedule));
326 silc_hash_table_alloc(0, silc_hash_uint, NULL, NULL, NULL,
327 silc_schedule_fd_destructor, NULL, TRUE);
328 if (!schedule->fd_queue)
331 silc_list_init(schedule->timeout_queue, struct SilcTaskStruct, next);
332 silc_list_init(schedule->free_tasks, struct SilcTaskStruct, next);
334 schedule->app_context = app_context;
335 schedule->valid = TRUE;
336 schedule->max_tasks = max_tasks;
338 /* Allocate scheduler lock */
339 silc_mutex_alloc(&schedule->lock);
341 /* Initialize the platform specific scheduler. */
342 schedule->internal = schedule_ops.init(schedule, app_context);
344 /* Timeout freelist garbage collection */
345 silc_schedule_task_add_timeout(schedule, silc_schedule_timeout_gc,
351 /* Uninitializes the schedule. This is called when the program is ready
352 to end. This removes all tasks and task queues. Returns FALSE if the
353 scheduler could not be uninitialized. This happens when the scheduler
354 is still valid and silc_schedule_stop has not been called. */
356 SilcBool silc_schedule_uninit(SilcSchedule schedule)
360 SILC_LOG_DEBUG(("Uninitializing scheduler"));
362 if (schedule->valid == TRUE)
365 /* Dispatch all timeouts before going away */
366 SILC_SCHEDULE_LOCK(schedule);
367 silc_schedule_dispatch_timeout(schedule, TRUE);
368 SILC_SCHEDULE_UNLOCK(schedule);
370 /* Deliver signals before going away */
371 if (schedule->signal_tasks) {
372 schedule_ops.signals_call(schedule, schedule->internal);
373 schedule->signal_tasks = FALSE;
376 /* Unregister all tasks */
377 silc_schedule_task_del(schedule, SILC_ALL_TASKS);
378 silc_schedule_task_remove(schedule, SILC_ALL_TASKS);
380 /* Delete timeout task freelist */
381 silc_list_start(schedule->free_tasks);
382 while ((task = silc_list_get(schedule->free_tasks)))
385 /* Unregister all task queues */
386 silc_hash_table_free(schedule->fd_queue);
388 /* Uninit the platform specific scheduler. */
389 schedule_ops.uninit(schedule, schedule->internal);
391 silc_mutex_free(schedule->lock);
397 /* Stops the schedule even if it is not supposed to be stopped yet.
398 After calling this, one should call silc_schedule_uninit (after the
399 silc_schedule has returned). */
401 void silc_schedule_stop(SilcSchedule schedule)
403 SILC_LOG_DEBUG(("Stopping scheduler"));
404 SILC_SCHEDULE_LOCK(schedule);
405 schedule->valid = FALSE;
406 SILC_SCHEDULE_UNLOCK(schedule);
409 /* Runs the scheduler once and then returns. Must be called locked. */
411 static SilcBool silc_schedule_iterate(SilcSchedule schedule, int timeout_usecs)
413 struct timeval timeout;
417 SILC_LOG_DEBUG(("In scheduler loop"));
419 /* Deliver signals if any has been set to be called */
420 if (silc_unlikely(schedule->signal_tasks)) {
421 SILC_SCHEDULE_UNLOCK(schedule);
422 schedule_ops.signals_call(schedule, schedule->internal);
423 schedule->signal_tasks = FALSE;
424 SILC_SCHEDULE_LOCK(schedule);
427 /* Check if scheduler is valid */
428 if (silc_unlikely(schedule->valid == FALSE)) {
429 SILC_LOG_DEBUG(("Scheduler not valid anymore, exiting"));
433 /* Calculate next timeout for silc_select(). This is the timeout value
434 when at earliest some of the timeout tasks expire. This may dispatch
435 already expired timeouts. */
436 silc_schedule_select_timeout(schedule);
438 /* Check if scheduler is valid */
439 if (silc_unlikely(schedule->valid == FALSE)) {
440 SILC_LOG_DEBUG(("Scheduler not valid anymore, exiting"));
444 if (timeout_usecs >= 0) {
446 timeout.tv_usec = timeout_usecs;
447 schedule->timeout = timeout;
448 schedule->has_timeout = TRUE;
451 /* This is the main silc_select(). The program blocks here until some
452 of the selected file descriptors change status or the selected
454 SILC_LOG_DEBUG(("Select"));
455 ret = schedule_ops.schedule(schedule, schedule->internal);
457 if (silc_likely(ret == 0)) {
459 SILC_LOG_DEBUG(("Running timeout tasks"));
460 if (silc_likely(silc_list_count(schedule->timeout_queue)))
461 silc_schedule_dispatch_timeout(schedule, FALSE);
464 } else if (silc_likely(ret > 0)) {
465 /* There is some data available now */
466 SILC_LOG_DEBUG(("Running fd tasks"));
467 silc_schedule_dispatch_fd(schedule);
472 if (silc_likely(errno == EINTR))
474 SILC_LOG_ERROR(("Error in select()/poll(): %s", strerror(errno)));
477 } while (timeout_usecs == -1);
482 /* Runs the scheduler once and then returns. */
484 SilcBool silc_schedule_one(SilcSchedule schedule, int timeout_usecs)
487 SILC_SCHEDULE_LOCK(schedule);
488 ret = silc_schedule_iterate(schedule, timeout_usecs);
489 SILC_SCHEDULE_UNLOCK(schedule);
493 /* Runs the scheduler and blocks here. When this returns the scheduler
496 void silc_schedule(SilcSchedule schedule)
498 SILC_LOG_DEBUG(("Running scheduler"));
500 /* Start the scheduler loop */
501 SILC_SCHEDULE_LOCK(schedule);
502 silc_schedule_iterate(schedule, -1);
503 SILC_SCHEDULE_UNLOCK(schedule);
506 /* Wakes up the scheduler. This is used only in multi-threaded
507 environments where threads may add new tasks or remove old tasks
508 from task queues. This is called to wake up the scheduler in the
509 main thread so that it detects the changes in the task queues.
510 If threads support is not compiled in this function has no effect.
511 Implementation of this function is platform specific. */
513 void silc_schedule_wakeup(SilcSchedule schedule)
516 SILC_LOG_DEBUG(("Wakeup scheduler"));
517 SILC_SCHEDULE_LOCK(schedule);
518 schedule_ops.wakeup(schedule, schedule->internal);
519 SILC_SCHEDULE_UNLOCK(schedule);
523 /* Returns the application specific context that was saved into the
524 scheduler in silc_schedule_init function. The context is also
525 returned to application in task callback functions, but this function
526 may be used to get it as well if needed. */
528 void *silc_schedule_get_context(SilcSchedule schedule)
530 return schedule->app_context;
533 /* Add new task to the scheduler */
535 SilcTask silc_schedule_task_add(SilcSchedule schedule, SilcUInt32 fd,
536 SilcTaskCallback callback, void *context,
537 long seconds, long useconds,
540 SilcTask task = NULL;
542 if (silc_unlikely(!schedule->valid))
545 SILC_SCHEDULE_LOCK(schedule);
547 if (silc_likely(type == SILC_TASK_TIMEOUT)) {
548 SilcTaskTimeout tmp, prev, ttask;
551 silc_list_start(schedule->free_tasks);
552 ttask = silc_list_get(schedule->free_tasks);
553 if (silc_unlikely(!ttask)) {
554 ttask = silc_calloc(1, sizeof(*ttask));
555 if (silc_unlikely(!ttask))
558 silc_list_del(schedule->free_tasks, ttask);
560 ttask->header.type = 1;
561 ttask->header.callback = callback;
562 ttask->header.context = context;
563 ttask->header.valid = TRUE;
566 silc_gettimeofday(&ttask->timeout);
567 if ((seconds + useconds) > 0) {
568 ttask->timeout.tv_sec += seconds + (useconds / 1000000L);
569 ttask->timeout.tv_usec += (useconds % 1000000L);
570 if (ttask->timeout.tv_usec >= 1000000L) {
571 ttask->timeout.tv_sec += 1;
572 ttask->timeout.tv_usec -= 1000000L;
576 SILC_LOG_DEBUG(("New timeout task %p: sec=%d, usec=%d", ttask,
579 /* Add task to correct spot so that the first task in the list has
580 the earliest timeout. */
581 list = schedule->timeout_queue;
582 silc_list_start(list);
584 while ((tmp = silc_list_get(list)) != SILC_LIST_END) {
585 /* If we have shorter timeout, we have found our spot */
586 if (silc_compare_timeval(&ttask->timeout, &tmp->timeout)) {
587 silc_list_insert(schedule->timeout_queue, prev, ttask);
593 silc_list_add(schedule->timeout_queue, ttask);
595 task = (SilcTask)ttask;
597 } else if (silc_likely(type == SILC_TASK_FD)) {
600 /* Check if fd is already added */
601 if (silc_unlikely(silc_hash_table_find(schedule->fd_queue,
603 NULL, (void *)&task))) {
607 /* Remove invalid task. We must have unique fd key to hash table. */
608 silc_schedule_task_remove(schedule, task);
611 /* Check max tasks */
612 if (silc_unlikely(schedule->max_tasks > 0 &&
613 silc_hash_table_count(schedule->fd_queue) >=
614 schedule->max_tasks)) {
615 SILC_LOG_WARNING(("Scheduler task limit reached: cannot add new task"));
620 ftask = silc_calloc(1, sizeof(*ftask));
621 if (silc_unlikely(!ftask)) {
626 SILC_LOG_DEBUG(("New fd task %p fd=%d", ftask, fd));
628 ftask->header.type = 0;
629 ftask->header.callback = callback;
630 ftask->header.context = context;
631 ftask->header.valid = TRUE;
632 ftask->events = SILC_TASK_READ;
635 /* Add task and schedule it */
636 if (!silc_hash_table_add(schedule->fd_queue, SILC_32_TO_PTR(fd), ftask)) {
641 if (!schedule_ops.schedule_fd(schedule, schedule->internal,
642 ftask, ftask->events)) {
643 silc_hash_table_del(schedule->fd_queue, SILC_32_TO_PTR(fd));
648 task = (SilcTask)ftask;
650 } else if (silc_unlikely(type == SILC_TASK_SIGNAL)) {
651 SILC_SCHEDULE_UNLOCK(schedule);
652 schedule_ops.signal_register(schedule, schedule->internal, fd,
658 SILC_SCHEDULE_UNLOCK(schedule);
662 /* Invalidates task */
664 SilcBool silc_schedule_task_del(SilcSchedule schedule, SilcTask task)
666 if (silc_unlikely(task == SILC_ALL_TASKS)) {
667 SilcHashTableList htl;
669 SILC_LOG_DEBUG(("Unregister all tasks"));
671 SILC_SCHEDULE_LOCK(schedule);
673 /* Delete from fd queue */
674 silc_hash_table_list(schedule->fd_queue, &htl);
675 while (silc_hash_table_get(&htl, NULL, (void *)&task))
677 silc_hash_table_list_reset(&htl);
679 /* Delete from timeout queue */
680 silc_list_start(schedule->timeout_queue);
681 while ((task = (SilcTask)silc_list_get(schedule->timeout_queue))
685 SILC_SCHEDULE_UNLOCK(schedule);
689 SILC_LOG_DEBUG(("Unregistering task %p", task));
690 SILC_SCHEDULE_LOCK(schedule);
692 SILC_SCHEDULE_UNLOCK(schedule);
697 /* Invalidate task by fd */
699 SilcBool silc_schedule_task_del_by_fd(SilcSchedule schedule, SilcUInt32 fd)
701 SilcTask task = NULL;
702 SilcBool ret = FALSE;
704 SILC_LOG_DEBUG(("Unregister task by fd %d", fd));
706 SILC_SCHEDULE_LOCK(schedule);
708 /* fd is unique, so there is only one task with this fd in the table */
709 if (silc_likely(silc_hash_table_find(schedule->fd_queue,
710 SILC_32_TO_PTR(fd), NULL,
712 SILC_LOG_DEBUG(("Deleting task %p", task));
717 SILC_SCHEDULE_UNLOCK(schedule);
719 /* If it is signal, remove it */
720 if (silc_unlikely(!task)) {
721 schedule_ops.signal_unregister(schedule, schedule->internal, fd);
728 /* Invalidate task by task callback. */
730 SilcBool silc_schedule_task_del_by_callback(SilcSchedule schedule,
731 SilcTaskCallback callback)
734 SilcHashTableList htl;
736 SilcBool ret = FALSE;
738 SILC_LOG_DEBUG(("Unregister task by callback"));
740 SILC_SCHEDULE_LOCK(schedule);
742 /* Delete from fd queue */
743 silc_hash_table_list(schedule->fd_queue, &htl);
744 while (silc_hash_table_get(&htl, NULL, (void *)&task)) {
745 if (task->callback == callback) {
750 silc_hash_table_list_reset(&htl);
752 /* Delete from timeout queue */
753 list = schedule->timeout_queue;
754 silc_list_start(list);
755 while ((task = (SilcTask)silc_list_get(list))) {
756 if (task->callback == callback) {
762 SILC_SCHEDULE_UNLOCK(schedule);
767 /* Invalidate task by context. */
769 SilcBool silc_schedule_task_del_by_context(SilcSchedule schedule,
773 SilcHashTableList htl;
775 SilcBool ret = FALSE;
777 SILC_LOG_DEBUG(("Unregister task by context"));
779 SILC_SCHEDULE_LOCK(schedule);
781 /* Delete from fd queue */
782 silc_hash_table_list(schedule->fd_queue, &htl);
783 while (silc_hash_table_get(&htl, NULL, (void *)&task)) {
784 if (task->context == context) {
789 silc_hash_table_list_reset(&htl);
791 /* Delete from timeout queue */
792 list = schedule->timeout_queue;
793 silc_list_start(list);
794 while ((task = (SilcTask)silc_list_get(list))) {
795 if (task->context == context) {
801 SILC_SCHEDULE_UNLOCK(schedule);
806 /* Invalidate task by all */
808 SilcBool silc_schedule_task_del_by_all(SilcSchedule schedule, int fd,
809 SilcTaskCallback callback,
814 SilcBool ret = FALSE;
816 SILC_LOG_DEBUG(("Unregister task by fd, callback and context"));
818 /* For fd task, callback and context is irrelevant as fd is unique */
820 return silc_schedule_task_del_by_fd(schedule, fd);
822 SILC_SCHEDULE_LOCK(schedule);
824 /* Delete from timeout queue */
825 list = schedule->timeout_queue;
826 silc_list_start(list);
827 while ((task = (SilcTask)silc_list_get(list))) {
828 if (task->callback == callback && task->context == context) {
834 SILC_SCHEDULE_UNLOCK(schedule);
839 /* Sets a file descriptor to be listened by scheduler. One can call this
840 directly if wanted. This can be called multiple times for one file
841 descriptor to set different iomasks. */
843 SilcBool silc_schedule_set_listen_fd(SilcSchedule schedule, SilcUInt32 fd,
844 SilcTaskEvent mask, SilcBool send_events)
848 if (silc_unlikely(!schedule->valid))
851 SILC_SCHEDULE_LOCK(schedule);
853 if (silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fd),
854 NULL, (void *)&task)) {
855 if (!schedule_ops.schedule_fd(schedule, schedule->internal, task, mask)) {
856 SILC_SCHEDULE_UNLOCK(schedule);
860 if (silc_unlikely(send_events) && mask) {
861 task->revents = mask;
862 silc_schedule_dispatch_fd(schedule);
866 SILC_SCHEDULE_UNLOCK(schedule);
871 /* Returns the file descriptor's current requested event mask. */
873 SilcTaskEvent silc_schedule_get_fd_events(SilcSchedule schedule,
877 SilcTaskEvent event = 0;
879 if (silc_unlikely(!schedule->valid))
882 SILC_SCHEDULE_LOCK(schedule);
883 if (silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fd),
884 NULL, (void *)&task))
885 event = task->events;
886 SILC_SCHEDULE_UNLOCK(schedule);
891 /* Removes a file descriptor from listen list. */
893 void silc_schedule_unset_listen_fd(SilcSchedule schedule, SilcUInt32 fd)
895 silc_schedule_set_listen_fd(schedule, fd, 0, FALSE);