+#include "silc.h"
+
+#if defined(HAVE_EPOLL_WAIT)
+#include <sys/epoll.h>
+#elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
+#include <poll.h>
+#endif
+
+const SilcScheduleOps schedule_ops;
+
+/* Internal context. */
+typedef struct {
+#if defined(HAVE_EPOLL_WAIT)
+ struct epoll_event *fds;
+ SilcUInt32 fds_count;
+ int epfd;
+#elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
+ struct rlimit nofile;
+ struct pollfd *fds;
+ SilcUInt32 fds_count;
+#endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
+ void *app_context;
+ int wakeup_pipe[2];
+ SilcTask wakeup_task;
+ sigset_t signals;
+ sigset_t signals_blocked;
+} *SilcUnixScheduler;
+
+typedef struct {
+ SilcUInt32 sig;
+ SilcTaskCallback callback;
+ void *context;
+ SilcBool call;
+ SilcSchedule schedule;
+} SilcUnixSignal;
+
+#define SIGNAL_COUNT 32
+SilcUnixSignal signal_call[SIGNAL_COUNT];
+
+#if defined(HAVE_EPOLL_WAIT)
+
+/* Linux's fast epoll system (level triggered) */
+
+int silc_epoll(SilcSchedule schedule, void *context)
+{
+ SilcUnixScheduler internal = context;
+ SilcTaskFd task;
+ struct epoll_event *fds = internal->fds;
+ SilcUInt32 fds_count = internal->fds_count;
+ int ret, i, timeout = -1;
+
+ /* Allocate larger fd table if needed */
+ i = silc_hash_table_count(schedule->fd_queue);
+ if (i > fds_count) {
+ fds = silc_realloc(internal->fds, sizeof(*internal->fds) *
+ (fds_count + (i / 2)));
+ if (silc_likely(fds)) {
+ internal->fds = fds;
+ internal->fds_count = fds_count = fds_count + (i / 2);
+ }
+ }
+
+ if (schedule->has_timeout)
+ timeout = ((schedule->timeout.tv_sec * 1000) +
+ (schedule->timeout.tv_usec / 1000));
+
+ SILC_SCHEDULE_UNLOCK(schedule);
+ ret = epoll_wait(internal->epfd, fds, fds_count, timeout);
+ SILC_SCHEDULE_LOCK(schedule);
+ if (ret <= 0)
+ return ret;
+
+ silc_list_init(schedule->fd_dispatch, struct SilcTaskStruct, next);
+
+ for (i = 0; i < ret; i++) {
+ task = fds[i].data.ptr;
+ task->revents = 0;
+ if (!task->header.valid || !task->events) {
+ epoll_ctl(internal->epfd, EPOLL_CTL_DEL, task->fd, &fds[i]);
+ continue;
+ }
+ if (fds[i].events & (EPOLLIN | EPOLLPRI | EPOLLHUP | EPOLLERR))
+ task->revents |= SILC_TASK_READ;
+ if (fds[i].events & EPOLLOUT)
+ task->revents |= SILC_TASK_WRITE;
+ silc_list_add(schedule->fd_dispatch, task);
+ }
+
+ return ret;
+}
+
+#elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
+
+/* Calls normal poll() system call. */
+
+int silc_poll(SilcSchedule schedule, void *context)
+{
+ SilcUnixScheduler internal = context;
+ SilcHashTableList htl;
+ SilcTaskFd task;
+ struct pollfd *fds = internal->fds;
+ SilcUInt32 fds_count = internal->fds_count;
+ int fd, ret, i = 0, timeout = -1;
+ void *fdp;
+
+ silc_hash_table_list(schedule->fd_queue, &htl);
+ while (silc_hash_table_get(&htl, &fdp, (void *)&task)) {
+ if (!task->events)
+ continue;
+ fd = SILC_PTR_TO_32(fdp);
+
+ /* Allocate larger fd table if needed */
+ if (i >= fds_count) {
+ struct rlimit nofile;
+
+ fds = silc_realloc(internal->fds, sizeof(*internal->fds) *
+ (fds_count + (fds_count / 2)));
+ if (silc_unlikely(!fds))
+ break;
+ internal->fds = fds;
+ internal->fds_count = fds_count = fds_count + (fds_count / 2);
+ internal->nofile.rlim_cur = fds_count;
+ if (fds_count > internal->nofile.rlim_max)
+ internal->nofile.rlim_max = fds_count;
+ if (setrlimit(RLIMIT_NOFILE, &nofile) < 0)
+ break;
+ }
+
+ fds[i].fd = fd;
+ fds[i].events = 0;
+ task->revents = fds[i].revents = 0;
+
+ if (task->events & SILC_TASK_READ)
+ fds[i].events |= (POLLIN | POLLPRI);
+ if (task->events & SILC_TASK_WRITE)
+ fds[i].events |= POLLOUT;
+ i++;
+ }
+ silc_hash_table_list_reset(&htl);
+ silc_list_init(schedule->fd_dispatch, struct SilcTaskStruct, next);
+
+ if (schedule->has_timeout)
+ timeout = ((schedule->timeout.tv_sec * 1000) +
+ (schedule->timeout.tv_usec / 1000));
+
+ fds_count = i;
+ SILC_SCHEDULE_UNLOCK(schedule);
+ ret = poll(fds, fds_count, timeout);
+ SILC_SCHEDULE_LOCK(schedule);
+ if (ret <= 0)
+ return ret;
+
+ for (i = 0; i < fds_count; i++) {
+ if (!fds[i].revents)
+ continue;
+ if (!silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fds[i].fd),
+ NULL, (void *)&task))
+ continue;
+ if (!task->header.valid || !task->events)
+ continue;
+
+ fd = fds[i].revents;
+ if (fd & (POLLIN | POLLPRI | POLLERR | POLLHUP | POLLNVAL))
+ task->revents |= SILC_TASK_READ;
+ if (fd & POLLOUT)
+ task->revents |= SILC_TASK_WRITE;
+ silc_list_add(schedule->fd_dispatch, task);
+ }
+
+ return ret;
+}
+
+#else