5 Author: Pekka Riikonen <priikone@silcnet.org>
7 Copyright (C) 1998 - 2007 Pekka Riikonen
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
23 #if defined(HAVE_EPOLL_WAIT)
24 #include <sys/epoll.h>
25 #elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
29 const SilcScheduleOps schedule_ops;
31 /* Internal context. */
33 #if defined(HAVE_EPOLL_WAIT)
34 struct epoll_event *fds;
37 #elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
41 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
46 sigset_t signals_blocked;
51 SilcTaskCallback callback;
54 SilcSchedule schedule;
57 #define SIGNAL_COUNT 32
58 SilcUnixSignal signal_call[SIGNAL_COUNT];
60 #if defined(HAVE_EPOLL_WAIT)
62 /* Linux's fast epoll system (level triggered) */
64 int silc_epoll(SilcSchedule schedule, void *context)
66 SilcUnixScheduler internal = context;
68 struct epoll_event *fds = internal->fds;
69 SilcUInt32 fds_count = internal->fds_count;
70 int ret, i, timeout = -1;
72 /* Allocate larger fd table if needed */
73 i = silc_hash_table_count(schedule->fd_queue);
75 fds = silc_realloc(internal->fds, sizeof(*internal->fds) *
76 (fds_count + (i / 2)));
77 if (silc_likely(fds)) {
79 internal->fds_count = fds_count = fds_count + (i / 2);
83 if (schedule->has_timeout)
84 timeout = ((schedule->timeout.tv_sec * 1000) +
85 (schedule->timeout.tv_usec / 1000));
87 SILC_SCHEDULE_UNLOCK(schedule);
88 ret = epoll_wait(internal->epfd, fds, fds_count, timeout);
89 SILC_SCHEDULE_LOCK(schedule);
93 silc_list_init(schedule->fd_dispatch, struct SilcTaskStruct, next);
95 for (i = 0; i < ret; i++) {
96 task = fds[i].data.ptr;
98 if (!task->header.valid || !task->events) {
99 epoll_ctl(internal->epfd, EPOLL_CTL_DEL, task->fd, &fds[i]);
102 if (fds[i].events & (EPOLLIN | EPOLLPRI | EPOLLHUP | EPOLLERR))
103 task->revents |= SILC_TASK_READ;
104 if (fds[i].events & EPOLLOUT)
105 task->revents |= SILC_TASK_WRITE;
106 silc_list_add(schedule->fd_dispatch, task);
112 #elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
114 /* Calls normal poll() system call. */
116 int silc_poll(SilcSchedule schedule, void *context)
118 SilcUnixScheduler internal = context;
119 SilcHashTableList htl;
121 struct pollfd *fds = internal->fds;
122 SilcUInt32 fds_count = internal->fds_count;
123 int fd, ret, i = 0, timeout = -1;
126 silc_hash_table_list(schedule->fd_queue, &htl);
127 while (silc_hash_table_get(&htl, &fdp, (void *)&task)) {
130 fd = SILC_PTR_TO_32(fdp);
132 /* Allocate larger fd table if needed */
133 if (i >= fds_count) {
134 struct rlimit nofile;
136 fds = silc_realloc(internal->fds, sizeof(*internal->fds) *
137 (fds_count + (fds_count / 2)));
138 if (silc_unlikely(!fds))
141 internal->fds_count = fds_count = fds_count + (fds_count / 2);
142 internal->nofile.rlim_cur = fds_count;
143 if (fds_count > internal->nofile.rlim_max)
144 internal->nofile.rlim_max = fds_count;
145 if (setrlimit(RLIMIT_NOFILE, &nofile) < 0)
151 task->revents = fds[i].revents = 0;
153 if (task->events & SILC_TASK_READ)
154 fds[i].events |= (POLLIN | POLLPRI);
155 if (task->events & SILC_TASK_WRITE)
156 fds[i].events |= POLLOUT;
159 silc_hash_table_list_reset(&htl);
160 silc_list_init(schedule->fd_dispatch, struct SilcTaskStruct, next);
162 if (schedule->has_timeout)
163 timeout = ((schedule->timeout.tv_sec * 1000) +
164 (schedule->timeout.tv_usec / 1000));
167 SILC_SCHEDULE_UNLOCK(schedule);
168 ret = poll(fds, fds_count, timeout);
169 SILC_SCHEDULE_LOCK(schedule);
173 for (i = 0; i < fds_count; i++) {
176 if (!silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fds[i].fd),
177 NULL, (void *)&task))
179 if (!task->header.valid || !task->events)
183 if (fd & (POLLIN | POLLPRI | POLLERR | POLLHUP | POLLNVAL))
184 task->revents |= SILC_TASK_READ;
186 task->revents |= SILC_TASK_WRITE;
187 silc_list_add(schedule->fd_dispatch, task);
195 /* Calls normal select() system call. */
197 int silc_select(SilcSchedule schedule, void *context)
199 SilcHashTableList htl;
202 int fd, max_fd = 0, ret;
208 silc_hash_table_list(schedule->fd_queue, &htl);
209 while (silc_hash_table_get(&htl, &fdp, (void *)&task)) {
212 fd = SILC_PTR_TO_32(fdp);
215 if (fd >= FD_SETSIZE)
217 #endif /* FD_SETSIZE */
222 if (task->events & SILC_TASK_READ)
224 if (task->events & SILC_TASK_WRITE)
229 silc_hash_table_list_reset(&htl);
230 silc_list_init(schedule->fd_dispatch, struct SilcTaskStruct, next);
232 SILC_SCHEDULE_UNLOCK(schedule);
233 ret = select(max_fd + 1, &in, &out, NULL, (schedule->has_timeout ?
234 &schedule->timeout : NULL));
235 SILC_SCHEDULE_LOCK(schedule);
239 silc_hash_table_list(schedule->fd_queue, &htl);
240 while (silc_hash_table_get(&htl, &fdp, (void *)&task)) {
241 if (!task->header.valid || !task->events)
243 fd = SILC_PTR_TO_32(fdp);
246 if (fd >= FD_SETSIZE)
248 #endif /* FD_SETSIZE */
250 if (FD_ISSET(fd, &in))
251 task->revents |= SILC_TASK_READ;
252 if (FD_ISSET(fd, &out))
253 task->revents |= SILC_TASK_WRITE;
254 silc_list_add(schedule->fd_dispatch, task);
256 silc_hash_table_list_reset(&htl);
261 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
263 /* Schedule `task' with events `event_mask'. Zero `event_mask' unschedules. */
265 SilcBool silc_schedule_internal_schedule_fd(SilcSchedule schedule,
268 SilcTaskEvent event_mask)
270 #if defined(HAVE_EPOLL_WAIT)
271 SilcUnixScheduler internal = (SilcUnixScheduler)context;
272 struct epoll_event event;
277 SILC_LOG_DEBUG(("Scheduling fd %lu, mask %x", task->fd, event_mask));
279 memset(&event, 0, sizeof(event));
280 if (event_mask & SILC_TASK_READ)
281 event.events |= (EPOLLIN | EPOLLPRI);
282 if (event_mask & SILC_TASK_WRITE)
283 event.events |= EPOLLOUT;
285 /* Zero mask unschedules task */
286 if (silc_unlikely(!event.events)) {
287 if (epoll_ctl(internal->epfd, EPOLL_CTL_DEL, task->fd, &event)) {
288 SILC_LOG_DEBUG(("epoll_ctl (DEL): %s", strerror(errno)));
294 /* Schedule the task */
295 if (silc_unlikely(!task->scheduled)) {
296 event.data.ptr = task;
297 if (epoll_ctl(internal->epfd, EPOLL_CTL_ADD, task->fd, &event)) {
298 SILC_LOG_DEBUG(("epoll_ctl (ADD): %s", strerror(errno)));
301 task->scheduled = TRUE;
305 /* Schedule for specific mask */
306 event.data.ptr = task;
307 if (epoll_ctl(internal->epfd, EPOLL_CTL_MOD, task->fd, &event)) {
308 SILC_LOG_DEBUG(("epoll_ctl (MOD): %s", strerror(errno)));
311 #endif /* HAVE_EPOLL_WAIT */
317 SILC_TASK_CALLBACK(silc_schedule_wakeup_cb)
319 SilcUnixScheduler internal = (SilcUnixScheduler)context;
322 SILC_LOG_DEBUG(("Wokeup"));
324 (void)read(internal->wakeup_pipe[0], &c, 1);
327 SILC_TASK_CALLBACK(silc_schedule_wakeup_init)
329 SilcUnixScheduler internal = schedule->internal;
331 internal->wakeup_task =
332 silc_schedule_task_add(schedule, internal->wakeup_pipe[0],
333 silc_schedule_wakeup_cb, internal,
335 if (!internal->wakeup_task) {
336 SILC_LOG_WARNING(("Could not add a wakeup task, threads won't work"));
337 close(internal->wakeup_pipe[0]);
340 silc_schedule_internal_schedule_fd(schedule, internal,
341 (SilcTaskFd)internal->wakeup_task,
344 #endif /* SILC_THREADS */
346 /* Initializes the platform specific scheduler. This for example initializes
347 the wakeup mechanism of the scheduler. In multi-threaded environment
348 the scheduler needs to be woken up when tasks are added or removed from
349 the task queues. Returns context to the platform specific scheduler. */
351 void *silc_schedule_internal_init(SilcSchedule schedule,
354 SilcUnixScheduler internal;
357 internal = silc_scalloc(schedule->stack, 1, sizeof(*internal));
361 #if defined(HAVE_EPOLL_WAIT)
362 internal->epfd = epoll_create(4);
363 if (internal->epfd < 0) {
364 SILC_LOG_ERROR(("epoll_create() failed: %s", strerror(errno)));
367 internal->fds = silc_calloc(4, sizeof(*internal->fds));
368 if (!internal->fds) {
369 close(internal->epfd);
372 internal->fds_count = 4;
373 #elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
374 getrlimit(RLIMIT_NOFILE, &internal->nofile);
376 if (schedule->max_tasks > 0) {
377 internal->nofile.rlim_cur = schedule->max_tasks;
378 if (schedule->max_tasks > internal->nofile.rlim_max)
379 internal->nofile.rlim_max = schedule->max_tasks;
380 setrlimit(RLIMIT_NOFILE, &internal->nofile);
381 getrlimit(RLIMIT_NOFILE, &internal->nofile);
382 schedule->max_tasks = internal->nofile.rlim_max;
385 internal->fds = silc_calloc(internal->nofile.rlim_cur,
386 sizeof(*internal->fds));
389 internal->fds_count = internal->nofile.rlim_cur;
390 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
392 sigemptyset(&internal->signals);
395 if (pipe(internal->wakeup_pipe)) {
396 SILC_LOG_ERROR(("pipe() fails: %s", strerror(errno)));
400 silc_schedule_task_add_timeout(schedule, silc_schedule_wakeup_init,
402 #endif /* SILC_THREADS */
404 internal->app_context = app_context;
406 for (i = 0; i < SIGNAL_COUNT; i++) {
407 signal_call[i].sig = 0;
408 signal_call[i].call = FALSE;
409 signal_call[i].schedule = schedule;
412 return (void *)internal;
415 void silc_schedule_internal_signals_block(SilcSchedule schedule,
417 void silc_schedule_internal_signals_unblock(SilcSchedule schedule,
420 /* Uninitializes the platform specific scheduler context. */
422 void silc_schedule_internal_uninit(SilcSchedule schedule, void *context)
424 SilcUnixScheduler internal = (SilcUnixScheduler)context;
430 close(internal->wakeup_pipe[0]);
431 close(internal->wakeup_pipe[1]);
434 #if defined(HAVE_EPOLL_WAIT)
435 close(internal->epfd);
436 silc_free(internal->fds);
437 #elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
438 silc_free(internal->fds);
439 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
442 /* Wakes up the scheduler */
444 void silc_schedule_internal_wakeup(SilcSchedule schedule, void *context)
447 SilcUnixScheduler internal = (SilcUnixScheduler)context;
449 if (!internal || !internal->wakeup_task)
452 SILC_LOG_DEBUG(("Wakeup"));
454 (void)write(internal->wakeup_pipe[1], "!", 1);
460 static void silc_schedule_internal_sighandler(int signal)
464 SILC_LOG_DEBUG(("Start"));
466 for (i = 0; i < SIGNAL_COUNT; i++) {
467 if (signal_call[i].sig == signal) {
468 signal_call[i].call = TRUE;
469 signal_call[i].schedule->signal_tasks = TRUE;
470 SILC_LOG_DEBUG(("Scheduling signal %d to be called",
471 signal_call[i].sig));
477 void silc_schedule_internal_signal_register(SilcSchedule schedule,
480 SilcTaskCallback callback,
481 void *callback_context)
483 SilcUnixScheduler internal = (SilcUnixScheduler)context;
489 SILC_LOG_DEBUG(("Registering signal %d", sig));
491 silc_schedule_internal_signals_block(schedule, context);
493 for (i = 0; i < SIGNAL_COUNT; i++) {
494 if (!signal_call[i].sig) {
495 signal_call[i].sig = sig;
496 signal_call[i].callback = callback;
497 signal_call[i].context = callback_context;
498 signal_call[i].schedule = schedule;
499 signal_call[i].call = FALSE;
500 signal(sig, silc_schedule_internal_sighandler);
505 silc_schedule_internal_signals_unblock(schedule, context);
506 sigaddset(&internal->signals, sig);
509 void silc_schedule_internal_signal_unregister(SilcSchedule schedule,
513 SilcUnixScheduler internal = (SilcUnixScheduler)context;
519 SILC_LOG_DEBUG(("Unregistering signal %d", sig));
521 silc_schedule_internal_signals_block(schedule, context);
523 for (i = 0; i < SIGNAL_COUNT; i++) {
524 if (signal_call[i].sig == sig) {
525 signal_call[i].sig = 0;
526 signal_call[i].callback = NULL;
527 signal_call[i].context = NULL;
528 signal_call[i].schedule = NULL;
529 signal_call[i].call = FALSE;
530 signal(sig, SIG_DFL);
534 silc_schedule_internal_signals_unblock(schedule, context);
535 sigdelset(&internal->signals, sig);
538 /* Call all signals */
540 void silc_schedule_internal_signals_call(SilcSchedule schedule, void *context)
542 SilcUnixScheduler internal = (SilcUnixScheduler)context;
545 SILC_LOG_DEBUG(("Start"));
550 silc_schedule_internal_signals_block(schedule, context);
552 for (i = 0; i < SIGNAL_COUNT; i++) {
553 if (signal_call[i].call &&
554 signal_call[i].callback) {
555 SILC_LOG_DEBUG(("Calling signal %d callback",
556 signal_call[i].sig));
557 silc_schedule_internal_signals_unblock(schedule, context);
558 signal_call[i].callback(schedule, internal->app_context,
561 signal_call[i].context);
562 signal_call[i].call = FALSE;
563 silc_schedule_internal_signals_block(schedule, context);
567 silc_schedule_internal_signals_unblock(schedule, context);
570 /* Block registered signals in scheduler. */
572 void silc_schedule_internal_signals_block(SilcSchedule schedule, void *context)
574 SilcUnixScheduler internal = (SilcUnixScheduler)context;
579 sigprocmask(SIG_BLOCK, &internal->signals, &internal->signals_blocked);
582 /* Unblock registered signals in schedule. */
584 void silc_schedule_internal_signals_unblock(SilcSchedule schedule,
587 SilcUnixScheduler internal = (SilcUnixScheduler)context;
592 sigprocmask(SIG_SETMASK, &internal->signals_blocked, NULL);
595 const SilcScheduleOps schedule_ops =
597 silc_schedule_internal_init,
598 silc_schedule_internal_uninit,
599 #if defined(HAVE_EPOLL_WAIT)
601 #elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
605 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
606 silc_schedule_internal_schedule_fd,
607 silc_schedule_internal_wakeup,
608 silc_schedule_internal_signal_register,
609 silc_schedule_internal_signal_unregister,
610 silc_schedule_internal_signals_call,
611 silc_schedule_internal_signals_block,
612 silc_schedule_internal_signals_unblock,