5 Author: Pekka Riikonen <priikone@silcnet.org>
7 Copyright (C) 1998 - 2008 Pekka Riikonen
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
20 #include "silcruntime.h"
22 #if defined(HAVE_EPOLL_WAIT)
23 #include <sys/epoll.h>
24 #elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
28 const SilcScheduleOps schedule_ops;
30 /* Internal context. */
32 #if defined(HAVE_EPOLL_WAIT)
33 struct epoll_event *fds;
36 #elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
40 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
45 sigset_t signals_blocked;
50 SilcTaskCallback callback;
53 SilcSchedule schedule;
56 #define SIGNAL_COUNT 32
58 #if defined(HAVE_EPOLL_WAIT)
60 /* Linux's fast epoll system (level triggered) */
62 int silc_epoll(SilcSchedule schedule, void *context)
64 SilcUnixScheduler internal = context;
66 struct epoll_event *fds = internal->fds;
67 SilcUInt32 fds_count = internal->fds_count;
68 int ret, i, timeout = -1;
70 /* Allocate larger fd table if needed */
71 i = silc_hash_table_count(schedule->fd_queue);
73 fds = silc_realloc(internal->fds, sizeof(*internal->fds) *
74 (fds_count + (i / 2)));
75 if (silc_likely(fds)) {
77 internal->fds_count = fds_count = fds_count + (i / 2);
81 if (schedule->has_timeout)
82 timeout = ((schedule->timeout.tv_sec * 1000) +
83 (schedule->timeout.tv_usec / 1000));
85 SILC_SCHEDULE_UNLOCK(schedule);
86 ret = epoll_wait(internal->epfd, fds, fds_count, timeout);
87 SILC_SCHEDULE_LOCK(schedule);
91 silc_list_init(schedule->fd_dispatch, struct SilcTaskStruct, next);
93 for (i = 0; i < ret; i++) {
94 task = fds[i].data.ptr;
96 if (!task->header.valid || !task->events) {
97 epoll_ctl(internal->epfd, EPOLL_CTL_DEL, task->fd, &fds[i]);
100 if (fds[i].events & (EPOLLIN | EPOLLPRI | EPOLLHUP | EPOLLERR))
101 task->revents |= SILC_TASK_READ;
102 if (fds[i].events & EPOLLOUT)
103 task->revents |= SILC_TASK_WRITE;
104 silc_list_add(schedule->fd_dispatch, task);
110 #elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
112 /* Calls normal poll() system call. */
114 int silc_poll(SilcSchedule schedule, void *context)
116 SilcUnixScheduler internal = context;
117 SilcHashTableList htl;
119 struct pollfd *fds = internal->fds;
120 SilcUInt32 fds_count = internal->fds_count;
121 int fd, ret, i = 0, timeout = -1;
124 silc_hash_table_list(schedule->fd_queue, &htl);
125 while (silc_hash_table_get(&htl, &fdp, (void *)&task)) {
128 fd = SILC_PTR_TO_32(fdp);
130 /* Allocate larger fd table if needed */
131 if (i >= fds_count) {
132 struct rlimit nofile;
134 fds = silc_realloc(internal->fds, sizeof(*internal->fds) *
135 (fds_count + (fds_count / 2)));
136 if (silc_unlikely(!fds))
139 internal->fds_count = fds_count = fds_count + (fds_count / 2);
140 internal->nofile.rlim_cur = fds_count;
141 if (fds_count > internal->nofile.rlim_max)
142 internal->nofile.rlim_max = fds_count;
143 if (setrlimit(RLIMIT_NOFILE, &nofile) < 0)
149 task->revents = fds[i].revents = 0;
151 if (task->events & SILC_TASK_READ)
152 fds[i].events |= (POLLIN | POLLPRI);
153 if (task->events & SILC_TASK_WRITE)
154 fds[i].events |= POLLOUT;
157 silc_hash_table_list_reset(&htl);
158 silc_list_init(schedule->fd_dispatch, struct SilcTaskStruct, next);
160 if (schedule->has_timeout)
161 timeout = ((schedule->timeout.tv_sec * 1000) +
162 (schedule->timeout.tv_usec / 1000));
165 SILC_SCHEDULE_UNLOCK(schedule);
166 ret = poll(fds, fds_count, timeout);
167 SILC_SCHEDULE_LOCK(schedule);
171 for (i = 0; i < fds_count; i++) {
174 if (!silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fds[i].fd),
175 NULL, (void *)&task))
177 if (!task->header.valid || !task->events)
181 if (fd & (POLLIN | POLLPRI | POLLERR | POLLHUP | POLLNVAL))
182 task->revents |= SILC_TASK_READ;
184 task->revents |= SILC_TASK_WRITE;
185 silc_list_add(schedule->fd_dispatch, task);
193 /* Calls normal select() system call. */
195 int silc_select(SilcSchedule schedule, void *context)
197 SilcHashTableList htl;
200 int fd, max_fd = 0, ret;
206 silc_hash_table_list(schedule->fd_queue, &htl);
207 while (silc_hash_table_get(&htl, &fdp, (void *)&task)) {
210 fd = SILC_PTR_TO_32(fdp);
213 if (fd >= FD_SETSIZE)
215 #endif /* FD_SETSIZE */
220 if (task->events & SILC_TASK_READ)
222 if (task->events & SILC_TASK_WRITE)
227 silc_hash_table_list_reset(&htl);
228 silc_list_init(schedule->fd_dispatch, struct SilcTaskStruct, next);
230 SILC_SCHEDULE_UNLOCK(schedule);
231 ret = select(max_fd + 1, &in, &out, NULL, (schedule->has_timeout ?
232 &schedule->timeout : NULL));
233 SILC_SCHEDULE_LOCK(schedule);
237 silc_hash_table_list(schedule->fd_queue, &htl);
238 while (silc_hash_table_get(&htl, &fdp, (void *)&task)) {
239 if (!task->header.valid || !task->events)
241 fd = SILC_PTR_TO_32(fdp);
244 if (fd >= FD_SETSIZE)
246 #endif /* FD_SETSIZE */
248 if (FD_ISSET(fd, &in))
249 task->revents |= SILC_TASK_READ;
250 if (FD_ISSET(fd, &out))
251 task->revents |= SILC_TASK_WRITE;
252 silc_list_add(schedule->fd_dispatch, task);
254 silc_hash_table_list_reset(&htl);
259 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
261 /* Schedule `task' with events `event_mask'. Zero `event_mask' unschedules. */
263 SilcBool silc_schedule_internal_schedule_fd(SilcSchedule schedule,
266 SilcTaskEvent event_mask)
268 #if defined(HAVE_EPOLL_WAIT)
269 SilcUnixScheduler internal = (SilcUnixScheduler)context;
270 struct epoll_event event;
275 SILC_LOG_DEBUG(("Scheduling fd %lu, mask %x", task->fd, event_mask));
277 memset(&event, 0, sizeof(event));
278 if (event_mask & SILC_TASK_READ)
279 event.events |= (EPOLLIN | EPOLLPRI);
280 if (event_mask & SILC_TASK_WRITE)
281 event.events |= EPOLLOUT;
283 /* Zero mask unschedules task */
284 if (silc_unlikely(!event.events)) {
285 if (epoll_ctl(internal->epfd, EPOLL_CTL_DEL, task->fd, &event)) {
286 SILC_LOG_DEBUG(("epoll_ctl (DEL): %s", strerror(errno)));
289 task->scheduled = FALSE;
293 /* Schedule the task */
294 if (silc_unlikely(!task->scheduled)) {
295 event.data.ptr = task;
296 if (epoll_ctl(internal->epfd, EPOLL_CTL_ADD, task->fd, &event)) {
297 SILC_LOG_DEBUG(("epoll_ctl (ADD): %s", strerror(errno)));
300 task->scheduled = TRUE;
304 /* Schedule for specific mask */
305 event.data.ptr = task;
306 if (epoll_ctl(internal->epfd, EPOLL_CTL_MOD, task->fd, &event)) {
307 SILC_LOG_DEBUG(("epoll_ctl (MOD): %s", strerror(errno)));
310 #endif /* HAVE_EPOLL_WAIT */
316 SILC_TASK_CALLBACK(silc_schedule_wakeup_cb)
318 SilcUnixScheduler internal = (SilcUnixScheduler)context;
321 SILC_LOG_DEBUG(("Wokeup"));
323 (void)read(internal->wakeup_pipe[0], &c, 1);
326 SILC_TASK_CALLBACK(silc_schedule_wakeup_init)
328 SilcUnixScheduler internal = schedule->internal;
330 internal->wakeup_task =
331 silc_schedule_task_add(schedule, internal->wakeup_pipe[0],
332 silc_schedule_wakeup_cb, internal,
334 if (!internal->wakeup_task) {
335 SILC_LOG_WARNING(("Could not add a wakeup task, threads won't work"));
336 close(internal->wakeup_pipe[0]);
339 silc_schedule_internal_schedule_fd(schedule, internal,
340 (SilcTaskFd)internal->wakeup_task,
343 #endif /* SILC_THREADS */
345 /* Initializes the platform specific scheduler. This for example initializes
346 the wakeup mechanism of the scheduler. In multi-threaded environment
347 the scheduler needs to be woken up when tasks are added or removed from
348 the task queues. Returns context to the platform specific scheduler. */
350 void *silc_schedule_internal_init(SilcSchedule schedule,
353 SilcUnixScheduler internal;
354 SilcUnixSignal *signal_call;
357 internal = silc_scalloc(schedule->stack, 1, sizeof(*internal));
361 #if defined(HAVE_EPOLL_WAIT)
362 internal->epfd = epoll_create(4);
363 if (internal->epfd < 0) {
364 SILC_LOG_ERROR(("epoll_create() failed: %s", strerror(errno)));
367 internal->fds = silc_calloc(4, sizeof(*internal->fds));
368 if (!internal->fds) {
369 close(internal->epfd);
372 internal->fds_count = 4;
373 #elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
374 getrlimit(RLIMIT_NOFILE, &internal->nofile);
376 if (schedule->max_tasks > 0) {
377 internal->nofile.rlim_cur = schedule->max_tasks;
378 if (schedule->max_tasks > internal->nofile.rlim_max)
379 internal->nofile.rlim_max = schedule->max_tasks;
380 setrlimit(RLIMIT_NOFILE, &internal->nofile);
381 getrlimit(RLIMIT_NOFILE, &internal->nofile);
382 schedule->max_tasks = internal->nofile.rlim_max;
385 internal->fds = silc_calloc(internal->nofile.rlim_cur,
386 sizeof(*internal->fds));
389 internal->fds_count = internal->nofile.rlim_cur;
390 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
392 sigemptyset(&internal->signals);
395 if (pipe(internal->wakeup_pipe)) {
396 SILC_LOG_ERROR(("pipe() fails: %s", strerror(errno)));
400 silc_schedule_task_add_timeout(schedule, silc_schedule_wakeup_init,
402 #endif /* SILC_THREADS */
404 internal->app_context = app_context;
406 signal_call = silc_global_get_var("srtsignals", TRUE);
408 signal_call = silc_global_set_var("srtsignals",
409 sizeof(*signal_call) * SIGNAL_COUNT,
412 for (i = 0; i < SIGNAL_COUNT; i++) {
413 signal_call[i].sig = 0;
414 signal_call[i].call = FALSE;
415 signal_call[i].schedule = schedule;
419 return (void *)internal;
422 void silc_schedule_internal_signals_block(SilcSchedule schedule,
424 void silc_schedule_internal_signals_unblock(SilcSchedule schedule,
427 /* Uninitializes the platform specific scheduler context. */
429 void silc_schedule_internal_uninit(SilcSchedule schedule, void *context)
431 SilcUnixScheduler internal = (SilcUnixScheduler)context;
437 close(internal->wakeup_pipe[0]);
438 close(internal->wakeup_pipe[1]);
441 #if defined(HAVE_EPOLL_WAIT)
442 close(internal->epfd);
443 silc_free(internal->fds);
444 #elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
445 silc_free(internal->fds);
446 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
448 silc_global_del_var("srtsignals", TRUE);
451 /* Wakes up the scheduler */
453 void silc_schedule_internal_wakeup(SilcSchedule schedule, void *context)
456 SilcUnixScheduler internal = (SilcUnixScheduler)context;
458 if (!internal || !internal->wakeup_task)
461 SILC_LOG_DEBUG(("Wakeup"));
463 (void)write(internal->wakeup_pipe[1], "!", 1);
469 static void silc_schedule_internal_sighandler(int signal)
472 SilcUnixSignal *signal_call = silc_global_get_var("srtsignals", TRUE);
477 SILC_LOG_DEBUG(("Start"));
479 for (i = 0; i < SIGNAL_COUNT; i++) {
480 if (signal_call[i].sig == signal) {
481 signal_call[i].call = TRUE;
482 signal_call[i].schedule->signal_tasks = TRUE;
483 SILC_LOG_DEBUG(("Scheduling signal %d to be called",
484 signal_call[i].sig));
490 void silc_schedule_internal_signal_register(SilcSchedule schedule,
493 SilcTaskCallback callback,
494 void *callback_context)
496 SilcUnixScheduler internal = (SilcUnixScheduler)context;
497 SilcUnixSignal *signal_call = silc_global_get_var("srtsignals", TRUE);
500 if (!internal || !signal_call)
503 SILC_LOG_DEBUG(("Registering signal %d", sig));
505 silc_schedule_internal_signals_block(schedule, context);
507 for (i = 0; i < SIGNAL_COUNT; i++) {
508 if (!signal_call[i].sig) {
509 signal_call[i].sig = sig;
510 signal_call[i].callback = callback;
511 signal_call[i].context = callback_context;
512 signal_call[i].schedule = schedule;
513 signal_call[i].call = FALSE;
514 signal(sig, silc_schedule_internal_sighandler);
519 silc_schedule_internal_signals_unblock(schedule, context);
520 sigaddset(&internal->signals, sig);
523 void silc_schedule_internal_signal_unregister(SilcSchedule schedule,
527 SilcUnixScheduler internal = (SilcUnixScheduler)context;
528 SilcUnixSignal *signal_call = silc_global_get_var("srtsignals", TRUE);
531 if (!internal || !signal_call)
534 SILC_LOG_DEBUG(("Unregistering signal %d", sig));
536 silc_schedule_internal_signals_block(schedule, context);
538 for (i = 0; i < SIGNAL_COUNT; i++) {
539 if (signal_call[i].sig == sig) {
540 signal_call[i].sig = 0;
541 signal_call[i].callback = NULL;
542 signal_call[i].context = NULL;
543 signal_call[i].schedule = NULL;
544 signal_call[i].call = FALSE;
545 signal(sig, SIG_DFL);
549 silc_schedule_internal_signals_unblock(schedule, context);
550 sigdelset(&internal->signals, sig);
553 /* Call all signals */
555 void silc_schedule_internal_signals_call(SilcSchedule schedule, void *context)
557 SilcUnixScheduler internal = (SilcUnixScheduler)context;
558 SilcUnixSignal *signal_call = silc_global_get_var("srtsignals", TRUE);
561 SILC_LOG_DEBUG(("Start"));
563 if (!internal || !signal_call)
566 silc_schedule_internal_signals_block(schedule, context);
568 for (i = 0; i < SIGNAL_COUNT; i++) {
569 if (signal_call[i].call &&
570 signal_call[i].callback) {
571 SILC_LOG_DEBUG(("Calling signal %d callback",
572 signal_call[i].sig));
573 silc_schedule_internal_signals_unblock(schedule, context);
574 signal_call[i].callback(schedule, internal->app_context,
577 signal_call[i].context);
578 signal_call[i].call = FALSE;
579 silc_schedule_internal_signals_block(schedule, context);
583 silc_schedule_internal_signals_unblock(schedule, context);
586 /* Block registered signals in scheduler. */
588 void silc_schedule_internal_signals_block(SilcSchedule schedule, void *context)
590 SilcUnixScheduler internal = (SilcUnixScheduler)context;
595 sigprocmask(SIG_BLOCK, &internal->signals, &internal->signals_blocked);
598 /* Unblock registered signals in schedule. */
600 void silc_schedule_internal_signals_unblock(SilcSchedule schedule,
603 SilcUnixScheduler internal = (SilcUnixScheduler)context;
608 sigprocmask(SIG_SETMASK, &internal->signals_blocked, NULL);
611 const SilcScheduleOps schedule_ops =
613 silc_schedule_internal_init,
614 silc_schedule_internal_uninit,
615 #if defined(HAVE_EPOLL_WAIT)
617 #elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
621 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
622 silc_schedule_internal_schedule_fd,
623 silc_schedule_internal_wakeup,
624 silc_schedule_internal_signal_register,
625 silc_schedule_internal_signal_unregister,
626 silc_schedule_internal_signals_call,
627 silc_schedule_internal_signals_block,
628 silc_schedule_internal_signals_unblock,