5 Author: Pekka Riikonen <priikone@silcnet.org>
7 Copyright (C) 1998 - 2006 Pekka Riikonen
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
23 #if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
27 const SilcScheduleOps schedule_ops;
29 /* Internal context. */
31 #if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
35 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
40 sigset_t signals_blocked;
45 SilcTaskCallback callback;
48 SilcSchedule schedule;
51 #define SIGNAL_COUNT 32
52 SilcUnixSignal signal_call[SIGNAL_COUNT];
54 #if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
56 /* Calls normal poll() system call. */
58 int silc_poll(SilcSchedule schedule, void *context)
60 SilcUnixScheduler internal = context;
61 SilcHashTableList htl;
63 struct pollfd *fds = internal->fds;
64 SilcUInt32 fds_count = internal->fds_count;
65 int fd, ret, i = 0, timeout = -1;
67 silc_hash_table_list(schedule->fd_queue, &htl);
68 while (silc_hash_table_get(&htl, (void **)&fd, (void **)&task)) {
72 /* Allocate larger fd table if needed */
76 fds = silc_realloc(internal->fds, sizeof(*internal->fds) *
77 (fds_count + (fds_count / 2)));
78 if (silc_unlikely(!fds))
81 internal->fds_count = fds_count = fds_count + (fds_count / 2);
82 internal->nofile.rlim_cur = fds_count;
83 if (fds_count > internal->nofile.rlim_max)
84 internal->nofile.rlim_max = fds_count;
85 if (setrlimit(RLIMIT_NOFILE, &nofile) < 0)
91 task->revents = fds[i].revents = 0;
93 if (task->events & SILC_TASK_READ)
94 fds[i].events |= (POLLIN | POLLPRI);
95 if (task->events & SILC_TASK_WRITE)
96 fds[i].events |= POLLOUT;
99 silc_hash_table_list_reset(&htl);
101 if (schedule->has_timeout)
102 timeout = ((schedule->timeout.tv_sec * 1000) +
103 (schedule->timeout.tv_usec / 1000));
106 SILC_SCHEDULE_UNLOCK(schedule);
107 ret = poll(fds, fds_count, timeout);
108 SILC_SCHEDULE_LOCK(schedule);
112 for (i = 0; i < fds_count; i++) {
115 if (!silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fds[i].fd),
116 NULL, (void **)&task))
120 if (fd & (POLLIN | POLLPRI | POLLERR | POLLHUP | POLLNVAL))
121 task->revents |= SILC_TASK_READ;
123 task->revents |= SILC_TASK_WRITE;
131 /* Calls normal select() system call. */
133 int silc_select(SilcSchedule schedule, void *context)
135 SilcHashTableList htl;
138 int fd, max_fd = 0, ret;
143 silc_hash_table_list(schedule->fd_queue, &htl);
144 while (silc_hash_table_get(&htl, (void **)&fd, (void **)&task)) {
149 if (fd >= FD_SETSIZE)
151 #endif /* FD_SETSIZE */
156 if (task->events & SILC_TASK_READ)
158 if (task->events & SILC_TASK_WRITE)
163 silc_hash_table_list_reset(&htl);
165 SILC_SCHEDULE_UNLOCK(schedule);
166 ret = select(max_fd + 1, &in, &out, NULL, (schedule->has_timeout ?
167 &schedule->timeout : NULL));
168 SILC_SCHEDULE_LOCK(schedule);
172 silc_hash_table_list(schedule->fd_queue, &htl);
173 while (silc_hash_table_get(&htl, (void **)&fd, (void **)&task)) {
178 if (fd >= FD_SETSIZE)
180 #endif /* FD_SETSIZE */
182 if (FD_ISSET(fd, &in))
183 task->revents |= SILC_TASK_READ;
184 if (FD_ISSET(fd, &out))
185 task->revents |= SILC_TASK_WRITE;
187 silc_hash_table_list_reset(&htl);
192 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
196 SILC_TASK_CALLBACK(silc_schedule_wakeup_cb)
198 SilcUnixScheduler internal = (SilcUnixScheduler)context;
201 SILC_LOG_DEBUG(("Wokeup"));
203 read(internal->wakeup_pipe[0], &c, 1);
206 #endif /* SILC_THREADS */
208 /* Initializes the platform specific scheduler. This for example initializes
209 the wakeup mechanism of the scheduler. In multi-threaded environment
210 the scheduler needs to be wakenup when tasks are added or removed from
211 the task queues. Returns context to the platform specific scheduler. */
213 void *silc_schedule_internal_init(SilcSchedule schedule,
216 SilcUnixScheduler internal;
219 internal = silc_calloc(1, sizeof(*internal));
223 #if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
224 getrlimit(RLIMIT_NOFILE, &internal->nofile);
226 if (schedule->max_tasks > 0) {
227 internal->nofile.rlim_cur = schedule->max_tasks;
228 if (schedule->max_tasks > internal->nofile.rlim_max)
229 internal->nofile.rlim_max = schedule->max_tasks;
230 setrlimit(RLIMIT_NOFILE, &internal->nofile);
231 getrlimit(RLIMIT_NOFILE, &internal->nofile);
232 schedule->max_tasks = internal->nofile.rlim_max;
235 internal->fds = silc_calloc(internal->nofile.rlim_cur,
236 sizeof(*internal->fds));
239 internal->fds_count = internal->nofile.rlim_cur;
240 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
242 sigemptyset(&internal->signals);
245 if (pipe(internal->wakeup_pipe)) {
246 SILC_LOG_ERROR(("pipe() fails: %s", strerror(errno)));
251 internal->wakeup_task =
252 silc_schedule_task_add(schedule, internal->wakeup_pipe[0],
253 silc_schedule_wakeup_cb, internal,
255 if (!internal->wakeup_task) {
256 SILC_LOG_ERROR(("Could not add a wakeup task, threads won't work"));
257 close(internal->wakeup_pipe[0]);
258 close(internal->wakeup_pipe[1]);
264 internal->app_context = app_context;
266 for (i = 0; i < SIGNAL_COUNT; i++) {
267 signal_call[i].sig = 0;
268 signal_call[i].call = FALSE;
269 signal_call[i].schedule = schedule;
272 return (void *)internal;
275 void silc_schedule_internal_signals_block(SilcSchedule schedule,
277 void silc_schedule_internal_signals_unblock(SilcSchedule schedule,
280 /* Uninitializes the platform specific scheduler context. */
282 void silc_schedule_internal_uninit(SilcSchedule schedule, void *context)
284 SilcUnixScheduler internal = (SilcUnixScheduler)context;
290 close(internal->wakeup_pipe[0]);
291 close(internal->wakeup_pipe[1]);
294 #if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
295 silc_free(internal->fds);
296 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
301 /* Wakes up the scheduler */
303 void silc_schedule_internal_wakeup(SilcSchedule schedule, void *context)
306 SilcUnixScheduler internal = (SilcUnixScheduler)context;
311 SILC_LOG_DEBUG(("Wakeup"));
313 write(internal->wakeup_pipe[1], "!", 1);
319 static void silc_schedule_internal_sighandler(int signal)
323 for (i = 0; i < SIGNAL_COUNT; i++) {
324 if (signal_call[i].sig == signal) {
325 signal_call[i].call = TRUE;
326 signal_call[i].schedule->signal_tasks = TRUE;
327 SILC_LOG_DEBUG(("Scheduling signal %d to be called",
328 signal_call[i].sig));
334 void silc_schedule_internal_signal_register(SilcSchedule schedule,
337 SilcTaskCallback callback,
338 void *callback_context)
340 SilcUnixScheduler internal = (SilcUnixScheduler)context;
346 SILC_LOG_DEBUG(("Registering signal %d", sig));
348 silc_schedule_internal_signals_block(schedule, context);
350 for (i = 0; i < SIGNAL_COUNT; i++) {
351 if (!signal_call[i].sig) {
352 signal_call[i].sig = sig;
353 signal_call[i].callback = callback;
354 signal_call[i].context = callback_context;
355 signal_call[i].call = FALSE;
356 signal(sig, silc_schedule_internal_sighandler);
361 silc_schedule_internal_signals_unblock(schedule, context);
362 sigaddset(&internal->signals, sig);
365 void silc_schedule_internal_signal_unregister(SilcSchedule schedule,
369 SilcUnixScheduler internal = (SilcUnixScheduler)context;
375 SILC_LOG_DEBUG(("Unregistering signal %d", sig));
377 silc_schedule_internal_signals_block(schedule, context);
379 for (i = 0; i < SIGNAL_COUNT; i++) {
380 if (signal_call[i].sig == sig) {
381 signal_call[i].sig = 0;
382 signal_call[i].callback = NULL;
383 signal_call[i].context = NULL;
384 signal_call[i].call = FALSE;
385 signal(sig, SIG_DFL);
389 silc_schedule_internal_signals_unblock(schedule, context);
390 sigdelset(&internal->signals, sig);
393 /* Call all signals */
395 void silc_schedule_internal_signals_call(SilcSchedule schedule, void *context)
397 SilcUnixScheduler internal = (SilcUnixScheduler)context;
400 SILC_LOG_DEBUG(("Start"));
405 silc_schedule_internal_signals_block(schedule, context);
407 for (i = 0; i < SIGNAL_COUNT; i++) {
408 if (signal_call[i].call &&
409 signal_call[i].callback) {
410 SILC_LOG_DEBUG(("Calling signal %d callback",
411 signal_call[i].sig));
412 signal_call[i].callback(schedule, internal->app_context,
415 signal_call[i].context);
416 signal_call[i].call = FALSE;
420 silc_schedule_internal_signals_unblock(schedule, context);
423 /* Block registered signals in scheduler. */
425 void silc_schedule_internal_signals_block(SilcSchedule schedule, void *context)
427 SilcUnixScheduler internal = (SilcUnixScheduler)context;
432 sigprocmask(SIG_BLOCK, &internal->signals, &internal->signals_blocked);
435 /* Unblock registered signals in schedule. */
437 void silc_schedule_internal_signals_unblock(SilcSchedule schedule,
440 SilcUnixScheduler internal = (SilcUnixScheduler)context;
445 sigprocmask(SIG_SETMASK, &internal->signals_blocked, NULL);
448 const SilcScheduleOps schedule_ops =
450 silc_schedule_internal_init,
451 silc_schedule_internal_uninit,
452 #if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
456 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
457 silc_schedule_internal_wakeup,
458 silc_schedule_internal_signal_register,
459 silc_schedule_internal_signal_unregister,
460 silc_schedule_internal_signals_call,
461 silc_schedule_internal_signals_block,
462 silc_schedule_internal_signals_unblock,