5 Author: Pekka Riikonen <priikone@silcnet.org>
7 Copyright (C) 1998 - 2005 Pekka Riikonen
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
21 #include "silcincludes.h"
23 #if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
27 const SilcScheduleOps schedule_ops;
29 #define SIGNAL_COUNT 32
33 SilcTaskCallback callback;
38 /* Internal context. */
40 #if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
44 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
49 sigset_t signals_blocked;
50 SilcUnixSignal signal_call[SIGNAL_COUNT];
53 #if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
55 /* Calls normal poll() system call. */
57 int silc_poll(SilcSchedule schedule, void *context)
59 SilcUnixScheduler internal = context;
60 SilcHashTableList htl;
62 struct pollfd *fds = internal->fds;
63 SilcUInt32 fds_count = internal->fds_count;
64 int fd, ret, i = 0, timeout = -1;
66 silc_hash_table_list(schedule->fd_queue, &htl);
67 while (silc_hash_table_get(&htl, (void **)&fd, (void **)&task)) {
71 /* Allocate larger fd table if needed */
75 fds = silc_realloc(internal->fds, sizeof(*internal->fds) *
76 (fds_count + (fds_count / 2)));
80 internal->fds_count = fds_count = fds_count + (fds_count / 2);
81 internal->nofile.rlim_cur = fds_count;
82 if (fds_count > internal->nofile.rlim_max)
83 internal->nofile.rlim_max = fds_count;
84 if (setrlimit(RLIMIT_NOFILE, &nofile) < 0)
90 task->revents = fds[i].revents = 0;
92 if (task->events & SILC_TASK_READ)
93 fds[i].events |= (POLLIN | POLLPRI);
94 if (task->events & SILC_TASK_WRITE)
95 fds[i].events |= POLLOUT;
98 silc_hash_table_list_reset(&htl);
100 if (schedule->has_timeout)
101 timeout = ((schedule->timeout.tv_sec * 1000) +
102 (schedule->timeout.tv_usec / 1000));
105 SILC_SCHEDULE_UNLOCK(schedule);
106 ret = poll(fds, fds_count, timeout);
107 SILC_SCHEDULE_LOCK(schedule);
111 for (i = 0; i < fds_count; i++) {
114 if (!silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fds[i].fd),
115 NULL, (void **)&task))
119 if (fd & (POLLIN | POLLPRI | POLLERR | POLLHUP | POLLNVAL))
120 task->revents |= SILC_TASK_READ;
122 task->revents |= SILC_TASK_WRITE;
130 /* Calls normal select() system call. */
132 int silc_select(SilcSchedule schedule, void *context)
134 SilcHashTableList htl;
137 int fd, max_fd = 0, ret;
142 silc_hash_table_list(schedule->fd_queue, &htl);
143 while (silc_hash_table_get(&htl, (void **)&fd, (void **)&task)) {
148 if (fd >= FD_SETSIZE)
150 #endif /* FD_SETSIZE */
155 if (task->events & SILC_TASK_READ)
157 if (task->events & SILC_TASK_WRITE)
162 silc_hash_table_list_reset(&htl);
164 SILC_SCHEDULE_UNLOCK(schedule);
165 ret = select(max_fd + 1, &in, &out, NULL, (schedule->has_timeout ?
166 &schedule->timeout : NULL));
167 SILC_SCHEDULE_LOCK(schedule);
171 silc_hash_table_list(schedule->fd_queue, &htl);
172 while (silc_hash_table_get(&htl, (void **)&fd, (void **)&task)) {
177 if (fd >= FD_SETSIZE)
179 #endif /* FD_SETSIZE */
181 if (FD_ISSET(fd, &in))
182 task->revents |= SILC_TASK_READ;
183 if (FD_ISSET(fd, &out))
184 task->revents |= SILC_TASK_WRITE;
186 silc_hash_table_list_reset(&htl);
191 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
195 SILC_TASK_CALLBACK(silc_schedule_wakeup_cb)
197 SilcUnixScheduler internal = (SilcUnixScheduler)context;
200 SILC_LOG_DEBUG(("Wokeup"));
202 read(internal->wakeup_pipe[0], &c, 1);
205 #endif /* SILC_THREADS */
207 /* Initializes the platform specific scheduler. This for example initializes
208 the wakeup mechanism of the scheduler. In multi-threaded environment
209 the scheduler needs to be wakenup when tasks are added or removed from
210 the task queues. Returns context to the platform specific scheduler. */
212 void *silc_schedule_internal_init(SilcSchedule schedule,
215 SilcUnixScheduler internal;
217 internal = silc_calloc(1, sizeof(*internal));
221 #if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
222 getrlimit(RLIMIT_NOFILE, &internal->nofile);
224 if (schedule->max_tasks > 0) {
225 internal->nofile.rlim_cur = schedule->max_tasks;
226 if (schedule->max_tasks > internal->nofile.rlim_max)
227 internal->nofile.rlim_max = schedule->max_tasks;
228 setrlimit(RLIMIT_NOFILE, &internal->nofile);
229 getrlimit(RLIMIT_NOFILE, &internal->nofile);
230 schedule->max_tasks = internal->nofile.rlim_max;
233 internal->fds = silc_calloc(internal->nofile.rlim_cur,
234 sizeof(*internal->fds));
237 internal->fds_count = internal->nofile.rlim_cur;
238 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
240 sigemptyset(&internal->signals);
243 if (pipe(internal->wakeup_pipe)) {
244 SILC_LOG_ERROR(("pipe() fails: %s", strerror(errno)));
249 internal->wakeup_task =
250 silc_schedule_task_add(schedule, internal->wakeup_pipe[0],
251 silc_schedule_wakeup_cb, internal,
253 if (!internal->wakeup_task) {
254 SILC_LOG_ERROR(("Could not add a wakeup task, threads won't work"));
255 close(internal->wakeup_pipe[0]);
256 close(internal->wakeup_pipe[1]);
262 internal->app_context = app_context;
264 return (void *)internal;
267 void silc_schedule_internal_signals_block(SilcSchedule schedule,
269 void silc_schedule_internal_signals_unblock(SilcSchedule schedule,
272 /* Uninitializes the platform specific scheduler context. */
274 void silc_schedule_internal_uninit(SilcSchedule schedule, void *context)
276 SilcUnixScheduler internal = (SilcUnixScheduler)context;
282 close(internal->wakeup_pipe[0]);
283 close(internal->wakeup_pipe[1]);
286 #if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
287 silc_free(internal->fds);
288 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
293 /* Wakes up the scheduler */
295 void silc_schedule_internal_wakeup(SilcSchedule schedule, void *context)
298 SilcUnixScheduler internal = (SilcUnixScheduler)context;
303 SILC_LOG_DEBUG(("Wakeup"));
305 write(internal->wakeup_pipe[1], "!", 1);
309 void silc_schedule_internal_signal_register(SilcSchedule schedule,
312 SilcTaskCallback callback,
313 void *callback_context)
315 SilcUnixScheduler internal = (SilcUnixScheduler)context;
321 SILC_LOG_DEBUG(("Registering signal %d", signal));
323 silc_schedule_internal_signals_block(schedule, context);
325 for (i = 0; i < SIGNAL_COUNT; i++) {
326 if (!internal->signal_call[i].signal) {
327 internal->signal_call[i].signal = signal;
328 internal->signal_call[i].callback = callback;
329 internal->signal_call[i].context = callback_context;
330 internal->signal_call[i].call = FALSE;
335 silc_schedule_internal_signals_unblock(schedule, context);
336 sigaddset(&internal->signals, signal);
339 void silc_schedule_internal_signal_unregister(SilcSchedule schedule,
342 SilcTaskCallback callback,
343 void *callback_context)
345 SilcUnixScheduler internal = (SilcUnixScheduler)context;
351 SILC_LOG_DEBUG(("Unregistering signal %d", signal));
353 silc_schedule_internal_signals_block(schedule, context);
355 for (i = 0; i < SIGNAL_COUNT; i++) {
356 if (internal->signal_call[i].signal == signal &&
357 internal->signal_call[i].callback == callback &&
358 internal->signal_call[i].context == callback_context) {
359 internal->signal_call[i].signal = 0;
360 internal->signal_call[i].callback = NULL;
361 internal->signal_call[i].context = NULL;
362 internal->signal_call[i].call = FALSE;
366 silc_schedule_internal_signals_unblock(schedule, context);
367 sigdelset(&internal->signals, signal);
370 /* Mark signal to be called later. */
372 void silc_schedule_internal_signal_call(SilcSchedule schedule,
373 void *context, SilcUInt32 signal)
375 SilcUnixScheduler internal = (SilcUnixScheduler)context;
381 silc_schedule_internal_signals_block(schedule, context);
383 for (i = 0; i < SIGNAL_COUNT; i++) {
384 if (internal->signal_call[i].signal == signal) {
385 internal->signal_call[i].call = TRUE;
386 SILC_LOG_DEBUG(("Scheduling signal %d to be called",
387 internal->signal_call[i].signal));
391 silc_schedule_internal_signals_unblock(schedule, context);
394 /* Call all signals */
396 void silc_schedule_internal_signals_call(SilcSchedule schedule, void *context)
398 SilcUnixScheduler internal = (SilcUnixScheduler)context;
401 SILC_LOG_DEBUG(("Start"));
406 silc_schedule_internal_signals_block(schedule, context);
408 for (i = 0; i < SIGNAL_COUNT; i++) {
409 if (internal->signal_call[i].call &&
410 internal->signal_call[i].callback) {
411 SILC_LOG_DEBUG(("Calling signal %d callback",
412 internal->signal_call[i].signal));
413 internal->signal_call[i].callback(schedule, internal->app_context,
415 internal->signal_call[i].signal,
416 internal->signal_call[i].context);
417 internal->signal_call[i].call = FALSE;
421 silc_schedule_internal_signals_unblock(schedule, context);
424 /* Block registered signals in scheduler. */
426 void silc_schedule_internal_signals_block(SilcSchedule schedule, void *context)
428 SilcUnixScheduler internal = (SilcUnixScheduler)context;
433 sigprocmask(SIG_BLOCK, &internal->signals, &internal->signals_blocked);
436 /* Unblock registered signals in schedule. */
438 void silc_schedule_internal_signals_unblock(SilcSchedule schedule,
441 SilcUnixScheduler internal = (SilcUnixScheduler)context;
446 sigprocmask(SIG_SETMASK, &internal->signals_blocked, NULL);
449 const SilcScheduleOps schedule_ops =
451 silc_schedule_internal_init,
452 silc_schedule_internal_uninit,
453 #if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
457 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
458 silc_schedule_internal_wakeup,
459 silc_schedule_internal_signal_register,
460 silc_schedule_internal_signal_unregister,
461 silc_schedule_internal_signal_call,
462 silc_schedule_internal_signals_call,
463 silc_schedule_internal_signals_block,
464 silc_schedule_internal_signals_unblock,