* SYNOPSIS
*
* typedef SilcBool (*SilcLogCb)(SilcLogType type, char *message,
- * void *context);
+ * void *context);
*
* DESCRIPTION
*
* SYNOPSIS
*
* typedef SilcBool (*SilcLogDebugCb)(char *file, char *function, int line,
- * char *message, void *context);
+ * char *message, void *context);
*
* DESCRIPTION
*
*
***/
typedef SilcBool (*SilcLogDebugCb)(char *file, char *function, int line,
- char *message, void *context);
+ char *message, void *context);
/****f* silcutil/SilcLogAPI/SilcLogHexdumpCb
*
* SYNOPSIS
*
- * typedef SilcBool (*SilcDebugHexdumpCb)(char *file, char *function, int line,
- * unsigned char *data,
- * SilcUInt32 data_len,
- * char *message, void *context;
+ * typedef SilcBool
+ * (*SilcDebugHexdumpCb)(char *file, char *function, int line,
+ * unsigned char *data,
+ * SilcUInt32 data_len,
+ * char *message, void *context;
*
* DESCRIPTION
*
*
***/
typedef SilcBool (*SilcLogHexdumpCb)(char *file, char *function, int line,
- unsigned char *data, SilcUInt32 data_len,
- char *message, void *context);
+ unsigned char *data, SilcUInt32 data_len,
+ char *message, void *context);
/* Macros */
* DESCRIPTION
*
* Assert macro that prints error message to stderr and calls abort()
- * if the `expression' is is false (ie. compares equal to zero). If
- * SILC_DEBUG is not defined this macro as no effect.
+ * if the `expression' is false (ie. compares equal to zero). If
+ * SILC_DEBUG is not defined this macro has no effect.
*
* SOURCE
*/
static void silc_schedule_dispatch_fd(SilcSchedule schedule)
{
- SilcHashTableList htl;
- SilcTask t;
SilcTaskFd task;
- SilcUInt32 fd;
+ SilcTask t;
- silc_hash_table_list(schedule->fd_queue, &htl);
- while (silc_likely(silc_hash_table_get(&htl, (void **)&fd,
- (void **)&task))) {
+ /* The dispatch list includes only valid tasks, and tasks that have
+ something to dispatch. Dispatching is atomic; no matter if another
+ thread invalidates a task when we unlock, we dispatch to completion. */
+ SILC_SCHEDULE_UNLOCK(schedule);
+ silc_list_start(schedule->fd_dispatch);
+ while ((task = silc_list_get(schedule->fd_dispatch))) {
t = (SilcTask)task;
- if (silc_unlikely(!t->valid)) {
- silc_schedule_task_remove(schedule, t);
- continue;
- }
- if (!task->revents || !task->events)
- continue;
-
/* Is the task ready for reading */
- if (task->revents & SILC_TASK_READ) {
- SILC_SCHEDULE_UNLOCK(schedule);
+ if (task->revents & SILC_TASK_READ)
t->callback(schedule, schedule->app_context, SILC_TASK_READ,
task->fd, t->context);
- SILC_SCHEDULE_LOCK(schedule);
- }
/* Is the task ready for writing */
- if (t->valid && task->revents & SILC_TASK_WRITE) {
- SILC_SCHEDULE_UNLOCK(schedule);
+ if (t->valid && task->revents & SILC_TASK_WRITE)
t->callback(schedule, schedule->app_context, SILC_TASK_WRITE,
task->fd, t->context);
- SILC_SCHEDULE_LOCK(schedule);
- }
-
- /* Remove if task was invalidated in the task callback */
- if (silc_unlikely(!t->valid))
- silc_schedule_task_remove(schedule, t);
}
- silc_hash_table_list_reset(&htl);
+ SILC_SCHEDULE_LOCK(schedule);
+
+ /* Remove invalidated tasks */
+ silc_list_start(schedule->fd_dispatch);
+ while ((task = silc_list_get(schedule->fd_dispatch)))
+ if (silc_unlikely(!task->header.valid))
+ silc_schedule_task_remove(schedule, (SilcTask)task);
}
/* Executes all tasks whose timeout has expired. The task is removed from
if (!schedule->fd_queue)
return NULL;
- silc_list_init(schedule->timeout_queue, struct SilcTaskTimeoutStruct, next);
- silc_list_init(schedule->free_tasks, struct SilcTaskTimeoutStruct, next);
+ silc_list_init(schedule->timeout_queue, struct SilcTaskStruct, next);
+ silc_list_init(schedule->free_tasks, struct SilcTaskStruct, next);
schedule->app_context = app_context;
schedule->valid = TRUE;
of the selected file descriptors change status or the selected
timeout expires. */
SILC_LOG_DEBUG(("Select"));
- ret = schedule_ops.select(schedule, schedule->internal);
+ ret = schedule_ops.schedule(schedule, schedule->internal);
if (silc_likely(ret == 0)) {
/* Timeout */
if (silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fd),
NULL, (void **)&task)) {
task->events = mask;
- if (silc_unlikely(send_events)) {
+ schedule_ops.schedule_fd(schedule, schedule->internal, task, mask);
+ if (silc_unlikely(send_events) && mask) {
task->revents = mask;
silc_schedule_dispatch_fd(schedule);
}
SILC_SCHEDULE_UNLOCK(schedule);
}
+/* Returns the file descriptors current requested event mask. */
+
+SilcTaskEvent silc_schedule_get_fd_events(SilcSchedule schedule,
+ SilcUInt32 fd)
+{
+ SilcTaskFd task;
+ SilcTaskEvent event = 0;
+
+ if (silc_unlikely(!schedule->valid))
+ return 0;
+
+ SILC_SCHEDULE_LOCK(schedule);
+ if (silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fd),
+ NULL, (void **)&task))
+ event = task->events;
+ SILC_SCHEDULE_UNLOCK(schedule);
+
+ return event;
+}
+
/* Removes a file descriptor from listen list. */
void silc_schedule_unset_listen_fd(SilcSchedule schedule, SilcUInt32 fd)
void silc_schedule_set_listen_fd(SilcSchedule schedule, SilcUInt32 fd,
SilcTaskEvent mask, SilcBool send_events);
+/****f* silcutil/SilcScheduleAPI/silc_schedule_get_fd_events
+ *
+ * SYNOPSIS
+ *
+ * SilcTaskEvent silc_schedule_get_fd_events(SilcSchedule schedule,
+ * SilcUInt32 fd);
+ *
+ * DESCRIPTION
+ *
+ * Returns the file descriptor `fd' current requested events mask,
+ * or 0 on error.
+ *
+ ***/
+SilcTaskEvent silc_schedule_get_fd_events(SilcSchedule schedule,
+ SilcUInt32 fd);
+
/****f* silcutil/SilcScheduleAPI/silc_schedule_unset_listen_fd
*
* SYNOPSIS
/* Task header */
struct SilcTaskStruct {
+ struct SilcTaskStruct *next;
SilcTaskCallback callback;
void *context;
unsigned int type : 1; /* 0 = fd, 1 = timeout */
/* Timeout task */
typedef struct SilcTaskTimeoutStruct {
struct SilcTaskStruct header;
- struct SilcTaskTimeoutStruct *next;
struct timeval timeout;
} *SilcTaskTimeout;
/* Fd task */
-typedef struct {
+typedef struct SilcTaskFdStruct {
struct SilcTaskStruct header;
- unsigned int events : 15;
- unsigned int revents : 15;
+ unsigned int scheduled : 1;
+ unsigned int events : 14;
+ unsigned int revents : 15;
SilcUInt32 fd;
} *SilcTaskFd;
void *internal;
void *app_context; /* Application specific context */
SilcHashTable fd_queue; /* FD task queue */
+ SilcList fd_dispatch; /* Dispatched FDs */
SilcList timeout_queue; /* Timeout queue */
SilcList free_tasks; /* Timeout task freelist */
SilcMutex lock; /* Scheduler lock */
struct timeval timeout; /* Current timeout */
- unsigned int max_tasks : 28; /* Max FD tasks */
+ unsigned int max_tasks : 29; /* Max FD tasks */
unsigned int has_timeout : 1; /* Set if timeout is set */
unsigned int valid : 1; /* Set if scheduler is valid */
unsigned int signal_tasks : 1; /* Set if to dispatch signals */
synchronise signals with SILC Scheduler. */
#define SILC_SCHEDULE_LOCK(schedule) \
do { \
- schedule_ops.signals_block(schedule, schedule->internal); \
silc_mutex_lock(schedule->lock); \
+ schedule_ops.signals_block(schedule, schedule->internal); \
} while (0)
#define SILC_SCHEDULE_UNLOCK(schedule) \
do { \
- silc_mutex_unlock(schedule->lock); \
schedule_ops.signals_unblock(schedule, schedule->internal); \
+ silc_mutex_unlock(schedule->lock); \
} while (0)
/* Platform specific scheduler operations */
/* Uninitializes the platform specific scheduler context. */
void (*uninit)(SilcSchedule schedule, void *context);
- /* System specific select(). Returns same values as normal select(). */
- int (*select)(SilcSchedule schedule, void *context);
+ /* System specific waiter. This must fill the schedule->fd_dispatch queue
+ with valid tasks that has something to dispatch, when this returns. */
+ int (*schedule)(SilcSchedule schedule, void *context);
+
+ /* Schedule `task' with events `event_mask'. Zero `event_mask'
+ unschedules the task. */
+ SilcBool (*schedule_fd)(SilcSchedule schedule, void *context,
+ SilcTaskFd task, SilcTaskEvent event_mask);
/* Wakes up the scheduler. This is platform specific routine */
void (*wakeup)(SilcSchedule schedule, void *context);
}
SILC_LOG_DEBUG(("Sent data %d bytes", ret));
- silc_schedule_set_listen_fd(sock->schedule, sock->sock,
- SILC_TASK_READ, FALSE);
+ if (silc_schedule_get_fd_events(sock->schedule, sock->sock) &
+ SILC_TASK_WRITE)
+ silc_schedule_set_listen_fd(sock->schedule, sock->sock,
+ SILC_TASK_READ, FALSE);
return ret;
}
SilcSocketStreamStatus stream_status;
SilcStream stream;
SilcFSMStruct fsm;
- SilcFSMSemaStruct sema;
+ SilcFSMEventStruct sema;
SilcAsyncOperation op;
SilcAsyncOperation sop;
char *local_ip;
SILC_TASK_CALLBACK(silc_net_connect_wait)
{
SilcNetConnect conn = context;
- SILC_FSM_SEMA_POST(&conn->sema);
+ SILC_FSM_EVENT_SIGNAL(&conn->sema);
silc_schedule_task_del_by_fd(schedule, conn->sock);
}
if (conn->aborted) {
/** Aborted */
silc_fsm_next(fsm, silc_net_connect_st_finish);
- return SILC_FSM_CONTINUE;
+ SILC_FSM_CONTINUE;
}
/* Do host lookup */
/** Network unreachable */
conn->status = SILC_NET_HOST_UNREACHABLE;
silc_fsm_next(fsm, silc_net_connect_st_finish);
- return SILC_FSM_CONTINUE;
+ SILC_FSM_CONTINUE;
}
/* Set sockaddr for this connection */
if (!silc_net_set_sockaddr(&desthost, conn->ip_addr, conn->port)) {
/** Sockaddr failed */
silc_fsm_next(fsm, silc_net_connect_st_finish);
- return SILC_FSM_CONTINUE;
+ SILC_FSM_CONTINUE;
}
/* Create the connection socket */
/** Cannot create socket */
SILC_LOG_ERROR(("Cannot create socket: %s", strerror(errno)));
silc_fsm_next(fsm, silc_net_connect_st_finish);
- return SILC_FSM_CONTINUE;
+ SILC_FSM_CONTINUE;
}
/* Bind to the local address if provided */
/** Cannot connect to remote host */
SILC_LOG_ERROR(("Cannot connect to remote host: %s", strerror(errno)));
silc_fsm_next(fsm, silc_net_connect_st_finish);
- return SILC_FSM_CONTINUE;
+ SILC_FSM_CONTINUE;
}
}
/** Wait for connection */
silc_fsm_next(fsm, silc_net_connect_st_connected);
- silc_fsm_sema_init(&conn->sema, fsm, 0);
+ silc_fsm_event_init(&conn->sema, fsm);
silc_schedule_task_add_fd(silc_fsm_get_schedule(fsm), sock,
silc_net_connect_wait, conn);
silc_schedule_set_listen_fd(silc_fsm_get_schedule(fsm), sock,
SILC_TASK_WRITE, FALSE);
- SILC_FSM_SEMA_WAIT(&conn->sema);
- return SILC_FSM_CONTINUE;
+ SILC_FSM_EVENT_WAIT(&conn->sema);
+ SILC_FSM_CONTINUE;
}
static void silc_net_connect_wait_stream(SilcSocketStreamStatus status,
if (conn->aborted) {
/** Aborted */
silc_fsm_next(fsm, silc_net_connect_st_finish);
- return SILC_FSM_CONTINUE;
+ SILC_FSM_CONTINUE;
}
ret = silc_net_get_socket_opt(conn->sock, SOL_SOCKET, SO_ERROR,
conn->retry--;
silc_net_close_connection(conn->sock);
silc_fsm_next(fsm, silc_net_connect_st_start);
- return SILC_FSM_CONTINUE;
+ SILC_FSM_CONTINUE;
}
#if defined(ECONNREFUSED)
/** Connecting failed */
SILC_LOG_DEBUG(("Connecting failed"));
silc_fsm_next(fsm, silc_net_connect_st_finish);
- return SILC_FSM_CONTINUE;
+ SILC_FSM_CONTINUE;
}
/** Connection created */
if (conn->aborted) {
/** Aborted */
silc_fsm_next(fsm, silc_net_connect_st_finish);
- return SILC_FSM_CONTINUE;
+ SILC_FSM_CONTINUE;
}
if (conn->stream_status != SILC_SOCKET_OK) {
else
conn->status = SILC_NET_ERROR;
silc_fsm_next(fsm, silc_net_connect_st_finish);
- return SILC_FSM_CONTINUE;
+ SILC_FSM_CONTINUE;
}
/* Set stream information */
SILC_LOG_DEBUG(("Connected successfully"));
conn->status = SILC_NET_OK;
silc_fsm_next(fsm, silc_net_connect_st_finish);
- return SILC_FSM_CONTINUE;
+ SILC_FSM_CONTINUE;
}
SILC_FSM_STATE(silc_net_connect_st_finish)
silc_async_free(conn->sop);
}
- return SILC_FSM_FINISH;
+ SILC_FSM_FINISH;
}
static void silc_net_connect_abort(SilcAsyncOperation op, void *context)
#include "silc.h"
-#if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
+#if defined(HAVE_EPOLL_WAIT)
+#include <sys/epoll.h>
+#elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
#include <poll.h>
#endif
/* Internal context. */
typedef struct {
-#if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
+#if defined(HAVE_EPOLL_WAIT)
+ struct epoll_event *fds;
+ SilcUInt32 fds_count;
+ int epfd;
+#elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
struct rlimit nofile;
struct pollfd *fds;
SilcUInt32 fds_count;
#define SIGNAL_COUNT 32
SilcUnixSignal signal_call[SIGNAL_COUNT];
-#if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
+#if defined(HAVE_EPOLL_WAIT)
+
+/* Linux's fast epoll system (level triggered) */
+
+int silc_epoll(SilcSchedule schedule, void *context)
+{
+ SilcUnixScheduler internal = context;
+ SilcTaskFd task;
+ struct epoll_event *fds = internal->fds;
+ SilcUInt32 fds_count = internal->fds_count;
+ int ret, i, timeout = -1;
+
+ /* Allocate larger fd table if needed */
+ i = silc_hash_table_count(schedule->fd_queue);
+ if (i > fds_count) {
+ fds = silc_realloc(internal->fds, sizeof(*internal->fds) *
+ (fds_count + (i / 2)));
+ if (silc_likely(fds)) {
+ internal->fds = fds;
+ internal->fds_count = fds_count = fds_count + (i / 2);
+ }
+ }
+
+ if (schedule->has_timeout)
+ timeout = ((schedule->timeout.tv_sec * 1000) +
+ (schedule->timeout.tv_usec / 1000));
+
+ SILC_SCHEDULE_UNLOCK(schedule);
+ ret = epoll_wait(internal->epfd, fds, fds_count, timeout);
+ SILC_SCHEDULE_LOCK(schedule);
+ if (ret <= 0)
+ return ret;
+
+ silc_list_init(schedule->fd_dispatch, struct SilcTaskStruct, next);
+
+ for (i = 0; i < ret; i++) {
+ task = fds[i].data.ptr;
+ task->revents = 0;
+ if (!task->header.valid || !task->events) {
+ epoll_ctl(internal->epfd, EPOLL_CTL_DEL, task->fd, &fds[i]);
+ continue;
+ }
+ if (fds[i].events & EPOLLIN)
+ task->revents |= SILC_TASK_READ;
+ if (fds[i].events & EPOLLOUT)
+ task->revents |= SILC_TASK_WRITE;
+ silc_list_add(schedule->fd_dispatch, task);
+ }
+
+ return ret;
+}
+
+#elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
/* Calls normal poll() system call. */
i++;
}
silc_hash_table_list_reset(&htl);
+ silc_list_init(schedule->fd_dispatch, struct SilcTaskStruct, next);
if (schedule->has_timeout)
timeout = ((schedule->timeout.tv_sec * 1000) +
if (!silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fds[i].fd),
NULL, (void **)&task))
continue;
+ if (!task->header.valid || !task->events)
+ continue;
fd = fds[i].revents;
if (fd & (POLLIN | POLLPRI | POLLERR | POLLHUP | POLLNVAL))
task->revents |= SILC_TASK_READ;
if (fd & POLLOUT)
task->revents |= SILC_TASK_WRITE;
+ silc_list_add(schedule->fd_dispatch, task);
}
return ret;
task->revents = 0;
}
silc_hash_table_list_reset(&htl);
+ silc_list_init(schedule->fd_dispatch, struct SilcTaskStruct, next);
SILC_SCHEDULE_UNLOCK(schedule);
ret = select(max_fd + 1, &in, &out, NULL, (schedule->has_timeout ?
silc_hash_table_list(schedule->fd_queue, &htl);
while (silc_hash_table_get(&htl, (void **)&fd, (void **)&task)) {
- if (!task->events)
+ if (!task->header.valid || !task->events)
continue;
#ifdef FD_SETSIZE
task->revents |= SILC_TASK_READ;
if (FD_ISSET(fd, &out))
task->revents |= SILC_TASK_WRITE;
+ silc_list_add(schedule->fd_dispatch, task);
}
silc_hash_table_list_reset(&htl);
#endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
+/* Schedule `task' with events `event_mask'. Zero `event_mask' unschedules. */
+
+SilcBool silc_schedule_internal_schedule_fd(SilcSchedule schedule,
+ void *context,
+ SilcTaskFd task,
+ SilcTaskEvent event_mask)
+{
+#if defined(HAVE_EPOLL_WAIT)
+ SilcUnixScheduler internal = (SilcUnixScheduler)context;
+ struct epoll_event event;
+
+ event.events = 0;
+ if (task->events & SILC_TASK_READ)
+ event.events |= (EPOLLIN | EPOLLPRI);
+ if (task->events & SILC_TASK_WRITE)
+ event.events |= EPOLLOUT;
+
+ /* Zero mask unschedules task */
+ if (silc_unlikely(!event.events)) {
+ epoll_ctl(internal->epfd, EPOLL_CTL_DEL, task->fd, &event);
+ return TRUE;
+ }
+
+ /* Schedule the task */
+ if (silc_unlikely(!task->scheduled)) {
+ event.data.ptr = task;
+ epoll_ctl(internal->epfd, EPOLL_CTL_ADD, task->fd, &event);
+ task->scheduled = TRUE;
+ return TRUE;
+ }
+
+ /* Schedule for specific mask */
+ event.data.ptr = task;
+ epoll_ctl(internal->epfd, EPOLL_CTL_MOD, task->fd, &event);
+#endif /* HAVE_EPOLL_WAIT */
+ return TRUE;
+}
+
#ifdef SILC_THREADS
SILC_TASK_CALLBACK(silc_schedule_wakeup_cb)
if (!internal)
return NULL;
-#if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
+#if defined(HAVE_EPOLL_WAIT)
+ internal->epfd = epoll_create(4);
+ if (internal->epfd < 0)
+ return NULL;
+ internal->fds = silc_calloc(4, sizeof(*internal->fds));
+ if (!internal->fds) {
+ close(internal->epfd);
+ return NULL;
+ }
+ internal->fds_count = 4;
+#elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
getrlimit(RLIMIT_NOFILE, &internal->nofile);
if (schedule->max_tasks > 0) {
close(internal->wakeup_pipe[1]);
#endif
-#if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
+#if defined(HAVE_EPOLL_WAIT)
+ close(internal->epfd);
+ silc_free(internal->fds);
+#elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
silc_free(internal->fds);
#endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
{
silc_schedule_internal_init,
silc_schedule_internal_uninit,
-#if defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
+#if defined(HAVE_EPOLL_WAIT)
+ silc_epoll,
+#elif defined(HAVE_POLL) && defined(HAVE_SETRLIMIT) && defined(RLIMIT_NOFILE)
silc_poll,
#else
silc_select,
#endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
+ silc_schedule_internal_schedule_fd,
silc_schedule_internal_wakeup,
silc_schedule_internal_signal_register,
silc_schedule_internal_signal_unregister,
}
SILC_LOG_DEBUG(("Wrote data %d bytes", ret));
- silc_schedule_set_listen_fd(sock->schedule, sock->sock,
- SILC_TASK_READ, FALSE);
+ if (silc_schedule_get_fd_events(sock->schedule, sock->sock) &
+ SILC_TASK_WRITE)
+ silc_schedule_set_listen_fd(sock->schedule, sock->sock,
+ SILC_TASK_READ, FALSE);
return ret;
}