5 Author: Pekka Riikonen <priikone@silcnet.org>
7 Copyright (C) 1998 - 2008 Pekka Riikonen
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
23 #if defined(HAVE_EPOLL_WAIT)
24 #include <sys/epoll.h>
25 #elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
29 const SilcScheduleOps schedule_ops;
31 /* Internal context. */
33 #if defined(HAVE_EPOLL_WAIT)
34 struct epoll_event *fds;
37 #elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
41 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
46 sigset_t signals_blocked;
51 SilcTaskCallback callback;
54 SilcSchedule schedule;
57 #define SIGNAL_COUNT 32
58 SilcUnixSignal signal_call[SIGNAL_COUNT];
60 #if defined(HAVE_EPOLL_WAIT)
62 /* Linux's fast epoll system (level triggered) */
64 int silc_epoll(SilcSchedule schedule, void *context)
66 SilcUnixScheduler internal = context;
68 struct epoll_event *fds = internal->fds;
69 SilcUInt32 fds_count = internal->fds_count;
70 int ret, i, timeout = -1;
72 /* Allocate larger fd table if needed */
73 i = silc_hash_table_count(schedule->fd_queue);
75 fds = silc_realloc(internal->fds, sizeof(*internal->fds) *
76 (fds_count + (i / 2)));
77 if (silc_likely(fds)) {
79 internal->fds_count = fds_count = fds_count + (i / 2);
83 if (schedule->has_timeout)
84 timeout = ((schedule->timeout.tv_sec * 1000) +
85 (schedule->timeout.tv_usec / 1000));
87 SILC_SCHEDULE_UNLOCK(schedule);
88 ret = epoll_wait(internal->epfd, fds, fds_count, timeout);
89 SILC_SCHEDULE_LOCK(schedule);
93 silc_list_init(schedule->fd_dispatch, struct SilcTaskStruct, next);
95 for (i = 0; i < ret; i++) {
96 task = fds[i].data.ptr;
98 if (!task->header.valid || !task->events) {
99 epoll_ctl(internal->epfd, EPOLL_CTL_DEL, task->fd, &fds[i]);
102 if (fds[i].events & (EPOLLIN | EPOLLPRI | EPOLLHUP | EPOLLERR))
103 task->revents |= SILC_TASK_READ;
104 if (fds[i].events & EPOLLOUT)
105 task->revents |= SILC_TASK_WRITE;
106 silc_list_add(schedule->fd_dispatch, task);
112 #elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
114 /* Calls normal poll() system call. */
116 int silc_poll(SilcSchedule schedule, void *context)
118 SilcUnixScheduler internal = context;
119 SilcHashTableList htl;
121 struct pollfd *fds = internal->fds;
122 SilcUInt32 fds_count = internal->fds_count;
123 int fd, ret, i = 0, timeout = -1;
126 silc_hash_table_list(schedule->fd_queue, &htl);
127 while (silc_hash_table_get(&htl, &fdp, (void *)&task)) {
130 fd = SILC_PTR_TO_32(fdp);
132 /* Allocate larger fd table if needed */
133 if (i >= fds_count) {
134 struct rlimit nofile;
136 fds = silc_realloc(internal->fds, sizeof(*internal->fds) *
137 (fds_count + (fds_count / 2)));
138 if (silc_unlikely(!fds))
141 internal->fds_count = fds_count = fds_count + (fds_count / 2);
142 internal->nofile.rlim_cur = fds_count;
143 if (fds_count > internal->nofile.rlim_max)
144 internal->nofile.rlim_max = fds_count;
145 if (setrlimit(RLIMIT_NOFILE, &nofile) < 0)
151 task->revents = fds[i].revents = 0;
153 if (task->events & SILC_TASK_READ)
154 fds[i].events |= (POLLIN | POLLPRI);
155 if (task->events & SILC_TASK_WRITE)
156 fds[i].events |= POLLOUT;
159 silc_hash_table_list_reset(&htl);
160 silc_list_init(schedule->fd_dispatch, struct SilcTaskStruct, next);
162 if (schedule->has_timeout)
163 timeout = ((schedule->timeout.tv_sec * 1000) +
164 (schedule->timeout.tv_usec / 1000));
167 SILC_SCHEDULE_UNLOCK(schedule);
168 ret = poll(fds, fds_count, timeout);
169 SILC_SCHEDULE_LOCK(schedule);
173 for (i = 0; i < fds_count; i++) {
176 if (!silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fds[i].fd),
177 NULL, (void *)&task))
179 if (!task->header.valid || !task->events)
183 if (fd & (POLLIN | POLLPRI | POLLERR | POLLHUP | POLLNVAL))
184 task->revents |= SILC_TASK_READ;
186 task->revents |= SILC_TASK_WRITE;
187 silc_list_add(schedule->fd_dispatch, task);
195 /* Calls normal select() system call. */
197 int silc_select(SilcSchedule schedule, void *context)
199 SilcHashTableList htl;
202 int fd, max_fd = 0, ret;
208 silc_hash_table_list(schedule->fd_queue, &htl);
209 while (silc_hash_table_get(&htl, &fdp, (void *)&task)) {
212 fd = SILC_PTR_TO_32(fdp);
215 if (fd >= FD_SETSIZE)
217 #endif /* FD_SETSIZE */
222 if (task->events & SILC_TASK_READ)
224 if (task->events & SILC_TASK_WRITE)
229 silc_hash_table_list_reset(&htl);
230 silc_list_init(schedule->fd_dispatch, struct SilcTaskStruct, next);
232 SILC_SCHEDULE_UNLOCK(schedule);
233 ret = select(max_fd + 1, &in, &out, NULL, (schedule->has_timeout ?
234 &schedule->timeout : NULL));
235 SILC_SCHEDULE_LOCK(schedule);
239 silc_hash_table_list(schedule->fd_queue, &htl);
240 while (silc_hash_table_get(&htl, &fdp, (void *)&task)) {
241 if (!task->header.valid || !task->events)
243 fd = SILC_PTR_TO_32(fdp);
246 if (fd >= FD_SETSIZE)
248 #endif /* FD_SETSIZE */
250 if (FD_ISSET(fd, &in))
251 task->revents |= SILC_TASK_READ;
252 if (FD_ISSET(fd, &out))
253 task->revents |= SILC_TASK_WRITE;
254 silc_list_add(schedule->fd_dispatch, task);
256 silc_hash_table_list_reset(&htl);
261 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
263 /* Schedule `task' with events `event_mask'. Zero `event_mask' unschedules. */
265 SilcBool silc_schedule_internal_schedule_fd(SilcSchedule schedule,
268 SilcTaskEvent event_mask)
270 #if defined(HAVE_EPOLL_WAIT)
271 SilcUnixScheduler internal = (SilcUnixScheduler)context;
272 struct epoll_event event;
277 SILC_LOG_DEBUG(("Scheduling fd %u, mask %x", task->fd, event_mask));
279 memset(&event, 0, sizeof(event));
280 if (event_mask & SILC_TASK_READ)
281 event.events |= (EPOLLIN | EPOLLPRI);
282 if (event_mask & SILC_TASK_WRITE)
283 event.events |= EPOLLOUT;
285 /* Zero mask unschedules task */
286 if (silc_unlikely(!event.events)) {
287 if (epoll_ctl(internal->epfd, EPOLL_CTL_DEL, task->fd, &event)) {
288 SILC_LOG_DEBUG(("epoll_ctl (DEL): %s", strerror(errno)));
291 task->scheduled = FALSE;
295 /* Schedule the task */
296 if (silc_unlikely(!task->scheduled)) {
297 event.data.ptr = task;
298 if (epoll_ctl(internal->epfd, EPOLL_CTL_ADD, task->fd, &event)) {
299 SILC_LOG_DEBUG(("epoll_ctl (ADD): %s", strerror(errno)));
302 task->scheduled = TRUE;
306 /* Schedule for specific mask */
307 event.data.ptr = task;
308 if (epoll_ctl(internal->epfd, EPOLL_CTL_MOD, task->fd, &event)) {
309 SILC_LOG_DEBUG(("epoll_ctl (MOD): %s", strerror(errno)));
312 #endif /* HAVE_EPOLL_WAIT */
318 SILC_TASK_CALLBACK(silc_schedule_wakeup_cb)
320 SilcUnixScheduler internal = (SilcUnixScheduler)context;
323 SILC_LOG_DEBUG(("Wokeup"));
325 (void)read(internal->wakeup_pipe[0], &c, 1);
328 SILC_TASK_CALLBACK(silc_schedule_wakeup_init)
330 SilcUnixScheduler internal = schedule->internal;
332 internal->wakeup_task =
333 silc_schedule_task_add(schedule, internal->wakeup_pipe[0],
334 silc_schedule_wakeup_cb, internal,
336 if (!internal->wakeup_task) {
337 SILC_LOG_WARNING(("Could not add a wakeup task, threads won't work"));
338 close(internal->wakeup_pipe[0]);
341 silc_schedule_internal_schedule_fd(schedule, internal,
342 (SilcTaskFd)internal->wakeup_task,
345 #endif /* SILC_THREADS */
347 /* Initializes the platform specific scheduler. This for example initializes
348 the wakeup mechanism of the scheduler. In multi-threaded environment
349 the scheduler needs to be woken up when tasks are added or removed from
350 the task queues. Returns context to the platform specific scheduler. */
352 void *silc_schedule_internal_init(SilcSchedule schedule,
355 SilcUnixScheduler internal;
358 internal = silc_calloc(1, sizeof(*internal));
362 #if defined(HAVE_EPOLL_WAIT)
363 internal->epfd = epoll_create(4);
364 if (internal->epfd < 0) {
365 SILC_LOG_ERROR(("epoll_create() failed: %s", strerror(errno)));
368 internal->fds = silc_calloc(4, sizeof(*internal->fds));
369 if (!internal->fds) {
370 close(internal->epfd);
373 internal->fds_count = 4;
374 #elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
375 getrlimit(RLIMIT_NOFILE, &internal->nofile);
377 if (schedule->max_tasks > 0) {
378 internal->nofile.rlim_cur = schedule->max_tasks;
379 if (schedule->max_tasks > internal->nofile.rlim_max)
380 internal->nofile.rlim_max = schedule->max_tasks;
381 setrlimit(RLIMIT_NOFILE, &internal->nofile);
382 getrlimit(RLIMIT_NOFILE, &internal->nofile);
383 schedule->max_tasks = internal->nofile.rlim_max;
386 internal->fds = silc_calloc(internal->nofile.rlim_cur,
387 sizeof(*internal->fds));
390 internal->fds_count = internal->nofile.rlim_cur;
391 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
393 sigemptyset(&internal->signals);
396 if (pipe(internal->wakeup_pipe)) {
397 SILC_LOG_ERROR(("pipe() fails: %s", strerror(errno)));
402 silc_schedule_task_add_timeout(schedule, silc_schedule_wakeup_init,
404 #endif /* SILC_THREADS */
406 internal->app_context = app_context;
408 for (i = 0; i < SIGNAL_COUNT; i++) {
409 signal_call[i].sig = 0;
410 signal_call[i].call = FALSE;
411 signal_call[i].schedule = schedule;
414 return (void *)internal;
417 void silc_schedule_internal_signals_block(SilcSchedule schedule,
419 void silc_schedule_internal_signals_unblock(SilcSchedule schedule,
422 /* Uninitializes the platform specific scheduler context. */
424 void silc_schedule_internal_uninit(SilcSchedule schedule, void *context)
426 SilcUnixScheduler internal = (SilcUnixScheduler)context;
432 close(internal->wakeup_pipe[0]);
433 close(internal->wakeup_pipe[1]);
436 #if defined(HAVE_EPOLL_WAIT)
437 close(internal->epfd);
438 silc_free(internal->fds);
439 #elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
440 silc_free(internal->fds);
441 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
446 /* Wakes up the scheduler */
448 void silc_schedule_internal_wakeup(SilcSchedule schedule, void *context)
451 SilcUnixScheduler internal = (SilcUnixScheduler)context;
453 if (!internal || !internal->wakeup_task)
456 SILC_LOG_DEBUG(("Wakeup"));
458 (void)write(internal->wakeup_pipe[1], "!", 1);
464 static void silc_schedule_internal_sighandler(int signal)
468 SILC_LOG_DEBUG(("Start"));
470 for (i = 0; i < SIGNAL_COUNT; i++) {
471 if (signal_call[i].sig == signal) {
472 signal_call[i].call = TRUE;
473 signal_call[i].schedule->signal_tasks = TRUE;
474 SILC_LOG_DEBUG(("Scheduling signal %d to be called",
475 signal_call[i].sig));
481 void silc_schedule_internal_signal_register(SilcSchedule schedule,
484 SilcTaskCallback callback,
485 void *callback_context)
487 SilcUnixScheduler internal = (SilcUnixScheduler)context;
493 SILC_LOG_DEBUG(("Registering signal %d", sig));
495 silc_schedule_internal_signals_block(schedule, context);
497 for (i = 0; i < SIGNAL_COUNT; i++) {
498 if (!signal_call[i].sig) {
499 signal_call[i].sig = sig;
500 signal_call[i].callback = callback;
501 signal_call[i].context = callback_context;
502 signal_call[i].schedule = schedule;
503 signal_call[i].call = FALSE;
504 signal(sig, silc_schedule_internal_sighandler);
509 silc_schedule_internal_signals_unblock(schedule, context);
510 sigaddset(&internal->signals, sig);
513 void silc_schedule_internal_signal_unregister(SilcSchedule schedule,
517 SilcUnixScheduler internal = (SilcUnixScheduler)context;
523 SILC_LOG_DEBUG(("Unregistering signal %d", sig));
525 silc_schedule_internal_signals_block(schedule, context);
527 for (i = 0; i < SIGNAL_COUNT; i++) {
528 if (signal_call[i].sig == sig) {
529 signal_call[i].sig = 0;
530 signal_call[i].callback = NULL;
531 signal_call[i].context = NULL;
532 signal_call[i].schedule = NULL;
533 signal_call[i].call = FALSE;
534 signal(sig, SIG_DFL);
538 silc_schedule_internal_signals_unblock(schedule, context);
539 sigdelset(&internal->signals, sig);
542 /* Call all signals */
544 void silc_schedule_internal_signals_call(SilcSchedule schedule, void *context)
546 SilcUnixScheduler internal = (SilcUnixScheduler)context;
549 SILC_LOG_DEBUG(("Start"));
554 silc_schedule_internal_signals_block(schedule, context);
556 for (i = 0; i < SIGNAL_COUNT; i++) {
557 if (signal_call[i].call &&
558 signal_call[i].callback) {
559 SILC_LOG_DEBUG(("Calling signal %d callback",
560 signal_call[i].sig));
561 silc_schedule_internal_signals_unblock(schedule, context);
562 signal_call[i].callback(schedule, internal->app_context,
565 signal_call[i].context);
566 signal_call[i].call = FALSE;
567 silc_schedule_internal_signals_block(schedule, context);
571 silc_schedule_internal_signals_unblock(schedule, context);
574 /* Block registered signals in scheduler. */
576 void silc_schedule_internal_signals_block(SilcSchedule schedule, void *context)
578 SilcUnixScheduler internal = (SilcUnixScheduler)context;
583 sigprocmask(SIG_BLOCK, &internal->signals, &internal->signals_blocked);
586 /* Unblock registered signals in schedule. */
588 void silc_schedule_internal_signals_unblock(SilcSchedule schedule,
591 SilcUnixScheduler internal = (SilcUnixScheduler)context;
596 sigprocmask(SIG_SETMASK, &internal->signals_blocked, NULL);
599 const SilcScheduleOps schedule_ops =
601 silc_schedule_internal_init,
602 silc_schedule_internal_uninit,
603 #if defined(HAVE_EPOLL_WAIT)
605 #elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
609 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
610 silc_schedule_internal_schedule_fd,
611 silc_schedule_internal_wakeup,
612 silc_schedule_internal_signal_register,
613 silc_schedule_internal_signal_unregister,
614 silc_schedule_internal_signals_call,
615 silc_schedule_internal_signals_block,
616 silc_schedule_internal_signals_unblock,