X-Git-Url: http://git.silcnet.org/gitweb/?a=blobdiff_plain;f=lib%2Fsilcutil%2Funix%2Fsilcunixschedule.c;h=0856358764ec216f67c5c5dfa7e9872f3c7b8ed3;hb=7c09989ba3aa3b85f8494821806400984c7cd588;hp=c0393c7349793a0efd6eb7cddb034304312f7e29;hpb=c27a4ecc3e616e8a5ee09b8ca888ed6ff3e501f7;p=silc.git diff --git a/lib/silcutil/unix/silcunixschedule.c b/lib/silcutil/unix/silcunixschedule.c index c0393c73..08563587 100644 --- a/lib/silcutil/unix/silcunixschedule.c +++ b/lib/silcutil/unix/silcunixschedule.c @@ -4,7 +4,7 @@ Author: Pekka Riikonen - Copyright (C) 1998 - 2005 Pekka Riikonen + Copyright (C) 1998 - 2007 Pekka Riikonen This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -18,26 +18,23 @@ */ /* $Id$ */ -#include "silcincludes.h" +#include "silc.h" -#if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE) +#if defined(HAVE_EPOLL_WAIT) +#include +#elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE) #include #endif const SilcScheduleOps schedule_ops; -#define SIGNAL_COUNT 32 - -typedef struct { - SilcUInt32 signal; - SilcTaskCallback callback; - void *context; - SilcBool call; -} SilcUnixSignal; - /* Internal context. */ typedef struct { -#if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE) +#if defined(HAVE_EPOLL_WAIT) + struct epoll_event *fds; + SilcUInt32 fds_count; + int epfd; +#elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE) struct rlimit nofile; struct pollfd *fds; SilcUInt32 fds_count; @@ -47,10 +44,72 @@ typedef struct { SilcTask wakeup_task; sigset_t signals; sigset_t signals_blocked; - SilcUnixSignal signal_call[SIGNAL_COUNT]; } *SilcUnixScheduler; -#if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE) +typedef struct { + SilcUInt32 sig; + SilcTaskCallback callback; + void *context; + SilcBool call; + SilcSchedule schedule; +} SilcUnixSignal; + +#define SIGNAL_COUNT 32 +SilcUnixSignal signal_call[SIGNAL_COUNT]; + +#if defined(HAVE_EPOLL_WAIT) + +/* Linux's fast epoll system (level triggered) */ + +int silc_epoll(SilcSchedule schedule, void *context) +{ + SilcUnixScheduler internal = context; + SilcTaskFd task; + struct epoll_event *fds = internal->fds; + SilcUInt32 fds_count = internal->fds_count; + int ret, i, timeout = -1; + + /* Allocate larger fd table if needed */ + i = silc_hash_table_count(schedule->fd_queue); + if (i > fds_count) { + fds = silc_realloc(internal->fds, sizeof(*internal->fds) * + (fds_count + (i / 2))); + if (silc_likely(fds)) { + internal->fds = fds; + internal->fds_count = fds_count = fds_count + (i / 2); + } + } + + if (schedule->has_timeout) + timeout = ((schedule->timeout.tv_sec * 1000) + + (schedule->timeout.tv_usec / 1000)); + + SILC_SCHEDULE_UNLOCK(schedule); + ret = epoll_wait(internal->epfd, fds, fds_count, timeout); + SILC_SCHEDULE_LOCK(schedule); + if (ret <= 0) + return ret; + + silc_list_init(schedule->fd_dispatch, struct SilcTaskStruct, next); + + for (i = 0; i < ret; i++) { + task = fds[i].data.ptr; + task->revents = 0; + if (!task->header.valid || !task->events) { + epoll_ctl(internal->epfd, EPOLL_CTL_DEL, task->fd, &fds[i]); + continue; + } + if (fds[i].events & (EPOLLIN | EPOLLPRI | EPOLLHUP | EPOLLERR)) + task->revents |= SILC_TASK_READ; + if (fds[i].events & EPOLLOUT) + task->revents |= SILC_TASK_WRITE; + silc_list_add(schedule->fd_dispatch, task); + } + + return ret; +} + +#elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE) /* Calls normal poll() system call. */ @@ -62,11 +121,13 @@ int silc_poll(SilcSchedule schedule, void *context) struct pollfd *fds = internal->fds; SilcUInt32 fds_count = internal->fds_count; int fd, ret, i = 0, timeout = -1; + void *fdp; silc_hash_table_list(schedule->fd_queue, &htl); - while (silc_hash_table_get(&htl, (void **)&fd, (void **)&task)) { + while (silc_hash_table_get(&htl, &fdp, (void *)&task)) { if (!task->events) continue; + fd = SILC_PTR_TO_32(fdp); /* Allocate larger fd table if needed */ if (i >= fds_count) { @@ -74,7 +135,7 @@ int silc_poll(SilcSchedule schedule, void *context) fds = silc_realloc(internal->fds, sizeof(*internal->fds) * (fds_count + (fds_count / 2))); - if (!fds) + if (silc_unlikely(!fds)) break; internal->fds = fds; internal->fds_count = fds_count = fds_count + (fds_count / 2); @@ -96,6 +157,7 @@ int silc_poll(SilcSchedule schedule, void *context) i++; } silc_hash_table_list_reset(&htl); + silc_list_init(schedule->fd_dispatch, struct SilcTaskStruct, next); if (schedule->has_timeout) timeout = ((schedule->timeout.tv_sec * 1000) + @@ -112,7 +174,9 @@ int silc_poll(SilcSchedule schedule, void *context) if (!fds[i].revents) continue; if (!silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fds[i].fd), - NULL, (void **)&task)) + NULL, (void *)&task)) + continue; + if (!task->header.valid || !task->events) continue; fd = fds[i].revents; @@ -120,6 +184,7 @@ int silc_poll(SilcSchedule schedule, void *context) task->revents |= SILC_TASK_READ; if (fd & POLLOUT) task->revents |= SILC_TASK_WRITE; + silc_list_add(schedule->fd_dispatch, task); } return ret; @@ -135,14 +200,16 @@ int silc_select(SilcSchedule schedule, void *context) SilcTaskFd task; fd_set in, out; int fd, max_fd = 0, ret; + void *fdp; FD_ZERO(&in); FD_ZERO(&out); silc_hash_table_list(schedule->fd_queue, &htl); - while (silc_hash_table_get(&htl, (void **)&fd, (void **)&task)) { + while (silc_hash_table_get(&htl, &fdp, (void *)&task)) { if (!task->events) continue; + fd = SILC_PTR_TO_32(fdp); #ifdef FD_SETSIZE if (fd >= FD_SETSIZE) @@ -160,6 +227,7 @@ int silc_select(SilcSchedule schedule, void *context) task->revents = 0; } silc_hash_table_list_reset(&htl); + silc_list_init(schedule->fd_dispatch, struct SilcTaskStruct, next); SILC_SCHEDULE_UNLOCK(schedule); ret = select(max_fd + 1, &in, &out, NULL, (schedule->has_timeout ? @@ -169,9 +237,10 @@ int silc_select(SilcSchedule schedule, void *context) return ret; silc_hash_table_list(schedule->fd_queue, &htl); - while (silc_hash_table_get(&htl, (void **)&fd, (void **)&task)) { - if (!task->events) + while (silc_hash_table_get(&htl, &fdp, (void *)&task)) { + if (!task->header.valid || !task->events) continue; + fd = SILC_PTR_TO_32(fdp); #ifdef FD_SETSIZE if (fd >= FD_SETSIZE) @@ -182,6 +251,7 @@ int silc_select(SilcSchedule schedule, void *context) task->revents |= SILC_TASK_READ; if (FD_ISSET(fd, &out)) task->revents |= SILC_TASK_WRITE; + silc_list_add(schedule->fd_dispatch, task); } silc_hash_table_list_reset(&htl); @@ -190,6 +260,58 @@ int silc_select(SilcSchedule schedule, void *context) #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */ +/* Schedule `task' with events `event_mask'. Zero `event_mask' unschedules. */ + +SilcBool silc_schedule_internal_schedule_fd(SilcSchedule schedule, + void *context, + SilcTaskFd task, + SilcTaskEvent event_mask) +{ +#if defined(HAVE_EPOLL_WAIT) + SilcUnixScheduler internal = (SilcUnixScheduler)context; + struct epoll_event event; + + if (!internal) + return TRUE; + + SILC_LOG_DEBUG(("Scheduling fd %lu, mask %x", task->fd, event_mask)); + + memset(&event, 0, sizeof(event)); + if (event_mask & SILC_TASK_READ) + event.events |= (EPOLLIN | EPOLLPRI); + if (event_mask & SILC_TASK_WRITE) + event.events |= EPOLLOUT; + + /* Zero mask unschedules task */ + if (silc_unlikely(!event.events)) { + if (epoll_ctl(internal->epfd, EPOLL_CTL_DEL, task->fd, &event)) { + SILC_LOG_DEBUG(("epoll_ctl (DEL): %s", strerror(errno))); + return FALSE; + } + return TRUE; + } + + /* Schedule the task */ + if (silc_unlikely(!task->scheduled)) { + event.data.ptr = task; + if (epoll_ctl(internal->epfd, EPOLL_CTL_ADD, task->fd, &event)) { + SILC_LOG_DEBUG(("epoll_ctl (ADD): %s", strerror(errno))); + return FALSE; + } + task->scheduled = TRUE; + return TRUE; + } + + /* Schedule for specific mask */ + event.data.ptr = task; + if (epoll_ctl(internal->epfd, EPOLL_CTL_MOD, task->fd, &event)) { + SILC_LOG_DEBUG(("epoll_ctl (MOD): %s", strerror(errno))); + return FALSE; + } +#endif /* HAVE_EPOLL_WAIT */ + return TRUE; +} + #ifdef SILC_THREADS SILC_TASK_CALLBACK(silc_schedule_wakeup_cb) @@ -199,26 +321,56 @@ SILC_TASK_CALLBACK(silc_schedule_wakeup_cb) SILC_LOG_DEBUG(("Wokeup")); - read(internal->wakeup_pipe[0], &c, 1); + (void)read(internal->wakeup_pipe[0], &c, 1); } +SILC_TASK_CALLBACK(silc_schedule_wakeup_init) +{ + SilcUnixScheduler internal = schedule->internal; + + internal->wakeup_task = + silc_schedule_task_add(schedule, internal->wakeup_pipe[0], + silc_schedule_wakeup_cb, internal, + 0, 0, SILC_TASK_FD); + if (!internal->wakeup_task) { + SILC_LOG_WARNING(("Could not add a wakeup task, threads won't work")); + close(internal->wakeup_pipe[0]); + return; + } + silc_schedule_internal_schedule_fd(schedule, internal, + (SilcTaskFd)internal->wakeup_task, + SILC_TASK_READ); +} #endif /* SILC_THREADS */ /* Initializes the platform specific scheduler. This for example initializes the wakeup mechanism of the scheduler. In multi-threaded environment - the scheduler needs to be wakenup when tasks are added or removed from + the scheduler needs to be woken up when tasks are added or removed from the task queues. Returns context to the platform specific scheduler. */ void *silc_schedule_internal_init(SilcSchedule schedule, void *app_context) { SilcUnixScheduler internal; + int i; internal = silc_calloc(1, sizeof(*internal)); if (!internal) return NULL; -#if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE) +#if defined(HAVE_EPOLL_WAIT) + internal->epfd = epoll_create(4); + if (internal->epfd < 0) { + SILC_LOG_ERROR(("epoll_create() failed: %s", strerror(errno))); + return NULL; + } + internal->fds = silc_calloc(4, sizeof(*internal->fds)); + if (!internal->fds) { + close(internal->epfd); + return NULL; + } + internal->fds_count = 4; +#elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE) getrlimit(RLIMIT_NOFILE, &internal->nofile); if (schedule->max_tasks > 0) { @@ -246,21 +398,18 @@ void *silc_schedule_internal_init(SilcSchedule schedule, return NULL; } - internal->wakeup_task = - silc_schedule_task_add(schedule, internal->wakeup_pipe[0], - silc_schedule_wakeup_cb, internal, - 0, 0, SILC_TASK_FD); - if (!internal->wakeup_task) { - SILC_LOG_ERROR(("Could not add a wakeup task, threads won't work")); - close(internal->wakeup_pipe[0]); - close(internal->wakeup_pipe[1]); - silc_free(internal); - return NULL; - } -#endif + silc_schedule_task_add_timeout(schedule, silc_schedule_wakeup_init, + internal, 0, 0); +#endif /* SILC_THREADS */ internal->app_context = app_context; + for (i = 0; i < SIGNAL_COUNT; i++) { + signal_call[i].sig = 0; + signal_call[i].call = FALSE; + signal_call[i].schedule = schedule; + } + return (void *)internal; } @@ -283,7 +432,10 @@ void silc_schedule_internal_uninit(SilcSchedule schedule, void *context) close(internal->wakeup_pipe[1]); #endif -#if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE) +#if defined(HAVE_EPOLL_WAIT) + close(internal->epfd); + silc_free(internal->fds); +#elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE) silc_free(internal->fds); #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */ @@ -297,50 +449,39 @@ void silc_schedule_internal_wakeup(SilcSchedule schedule, void *context) #ifdef SILC_THREADS SilcUnixScheduler internal = (SilcUnixScheduler)context; - if (!internal) + if (!internal || !internal->wakeup_task) return; SILC_LOG_DEBUG(("Wakeup")); - write(internal->wakeup_pipe[1], "!", 1); + (void)write(internal->wakeup_pipe[1], "!", 1); #endif } -void silc_schedule_internal_signal_register(SilcSchedule schedule, - void *context, - SilcUInt32 signal, - SilcTaskCallback callback, - void *callback_context) +/* Signal handler */ + +static void silc_schedule_internal_sighandler(int signal) { - SilcUnixScheduler internal = (SilcUnixScheduler)context; int i; - if (!internal) - return; - - SILC_LOG_DEBUG(("Registering signal %d", signal)); - - silc_schedule_internal_signals_block(schedule, context); + SILC_LOG_DEBUG(("Start")); for (i = 0; i < SIGNAL_COUNT; i++) { - if (!internal->signal_call[i].signal) { - internal->signal_call[i].signal = signal; - internal->signal_call[i].callback = callback; - internal->signal_call[i].context = callback_context; - internal->signal_call[i].call = FALSE; + if (signal_call[i].sig == signal) { + signal_call[i].call = TRUE; + signal_call[i].schedule->signal_tasks = TRUE; + SILC_LOG_DEBUG(("Scheduling signal %d to be called", + signal_call[i].sig)); break; } } - - silc_schedule_internal_signals_unblock(schedule, context); - sigaddset(&internal->signals, signal); } -void silc_schedule_internal_signal_unregister(SilcSchedule schedule, - void *context, - SilcUInt32 signal, - SilcTaskCallback callback, - void *callback_context) +void silc_schedule_internal_signal_register(SilcSchedule schedule, + void *context, + SilcUInt32 sig, + SilcTaskCallback callback, + void *callback_context) { SilcUnixScheduler internal = (SilcUnixScheduler)context; int i; @@ -348,29 +489,29 @@ void silc_schedule_internal_signal_unregister(SilcSchedule schedule, if (!internal) return; - SILC_LOG_DEBUG(("Unregistering signal %d", signal)); + SILC_LOG_DEBUG(("Registering signal %d", sig)); silc_schedule_internal_signals_block(schedule, context); for (i = 0; i < SIGNAL_COUNT; i++) { - if (internal->signal_call[i].signal == signal && - internal->signal_call[i].callback == callback && - internal->signal_call[i].context == callback_context) { - internal->signal_call[i].signal = 0; - internal->signal_call[i].callback = NULL; - internal->signal_call[i].context = NULL; - internal->signal_call[i].call = FALSE; + if (!signal_call[i].sig) { + signal_call[i].sig = sig; + signal_call[i].callback = callback; + signal_call[i].context = callback_context; + signal_call[i].schedule = schedule; + signal_call[i].call = FALSE; + signal(sig, silc_schedule_internal_sighandler); + break; } } silc_schedule_internal_signals_unblock(schedule, context); - sigdelset(&internal->signals, signal); + sigaddset(&internal->signals, sig); } -/* Mark signal to be called later. */ - -void silc_schedule_internal_signal_call(SilcSchedule schedule, - void *context, SilcUInt32 signal) +void silc_schedule_internal_signal_unregister(SilcSchedule schedule, + void *context, + SilcUInt32 sig) { SilcUnixScheduler internal = (SilcUnixScheduler)context; int i; @@ -378,17 +519,23 @@ void silc_schedule_internal_signal_call(SilcSchedule schedule, if (!internal) return; + SILC_LOG_DEBUG(("Unregistering signal %d", sig)); + silc_schedule_internal_signals_block(schedule, context); for (i = 0; i < SIGNAL_COUNT; i++) { - if (internal->signal_call[i].signal == signal) { - internal->signal_call[i].call = TRUE; - SILC_LOG_DEBUG(("Scheduling signal %d to be called", - internal->signal_call[i].signal)); + if (signal_call[i].sig == sig) { + signal_call[i].sig = 0; + signal_call[i].callback = NULL; + signal_call[i].context = NULL; + signal_call[i].schedule = NULL; + signal_call[i].call = FALSE; + signal(sig, SIG_DFL); } } silc_schedule_internal_signals_unblock(schedule, context); + sigdelset(&internal->signals, sig); } /* Call all signals */ @@ -406,15 +553,17 @@ void silc_schedule_internal_signals_call(SilcSchedule schedule, void *context) silc_schedule_internal_signals_block(schedule, context); for (i = 0; i < SIGNAL_COUNT; i++) { - if (internal->signal_call[i].call && - internal->signal_call[i].callback) { + if (signal_call[i].call && + signal_call[i].callback) { SILC_LOG_DEBUG(("Calling signal %d callback", - internal->signal_call[i].signal)); - internal->signal_call[i].callback(schedule, internal->app_context, - SILC_TASK_INTERRUPT, - internal->signal_call[i].signal, - internal->signal_call[i].context); - internal->signal_call[i].call = FALSE; + signal_call[i].sig)); + silc_schedule_internal_signals_unblock(schedule, context); + signal_call[i].callback(schedule, internal->app_context, + SILC_TASK_INTERRUPT, + signal_call[i].sig, + signal_call[i].context); + signal_call[i].call = FALSE; + silc_schedule_internal_signals_block(schedule, context); } } @@ -450,15 +599,17 @@ const SilcScheduleOps schedule_ops = { silc_schedule_internal_init, silc_schedule_internal_uninit, -#if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE) +#if defined(HAVE_EPOLL_WAIT) + silc_epoll, +#elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE) silc_poll, #else silc_select, #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */ + silc_schedule_internal_schedule_fd, silc_schedule_internal_wakeup, silc_schedule_internal_signal_register, silc_schedule_internal_signal_unregister, - silc_schedule_internal_signal_call, silc_schedule_internal_signals_call, silc_schedule_internal_signals_block, silc_schedule_internal_signals_unblock,