/* System specific routines. Implemented under unix/ and win32/. */
/* System specific select(). Returns same values as normal select(). */
-int silc_select(SilcScheduleFd fds, uint32 fds_count, struct timeval *timeout);
+int silc_select(SilcScheduleFd fds, SilcUInt32 fds_count, struct timeval *timeout);
/* Initializes the wakeup of the scheduler. In multi-threaded environment
the scheduler needs to be wakenup when tasks are added or removed from
static void silc_task_queue_alloc(SilcTaskQueue *queue);
static void silc_task_queue_free(SilcTaskQueue queue);
-static SilcTask silc_task_find(SilcTaskQueue queue, uint32 fd);
+static SilcTask silc_task_find(SilcTaskQueue queue, SilcUInt32 fd);
static SilcTask silc_task_add(SilcTaskQueue queue, SilcTask newtask,
SilcTaskPriority priority);
static SilcTask silc_task_get_first(SilcTaskQueue queue, SilcTask first);
static void silc_task_del_by_context(SilcTaskQueue queue, void *context);
static void silc_task_del_by_callback(SilcTaskQueue queue,
SilcTaskCallback callback);
-static void silc_task_del_by_fd(SilcTaskQueue queue, uint32 fd);
+static void silc_task_del_by_fd(SilcTaskQueue queue, SilcUInt32 fd);
/* Returns the task queue by task type */
#define SILC_SCHEDULE_GET_QUEUE(type) \
/* SILC Task object. Represents one task in the scheduler. */
struct SilcTaskStruct {
- uint32 fd;
+ SilcUInt32 fd;
struct timeval timeout;
SilcTaskCallback callback;
void *context;
List of file descriptors the scheduler is supposed to be listenning.
This is updated internally.
- uint32 max_fd
- uint32 last_fd
+ SilcUInt32 max_fd
+ SilcUInt32 last_fd
Size of the fd_list list. There can be `max_fd' many tasks in
the scheduler at once. The `last_fd' is the last valid entry
SilcTaskQueue timeout_queue;
SilcTaskQueue generic_queue;
SilcScheduleFd fd_list;
- uint32 max_fd;
- uint32 last_fd;
+ SilcUInt32 max_fd;
+ SilcUInt32 last_fd;
struct timeval *timeout;
bool valid;
void *wakeup;
silc_task_queue_alloc(&schedule->timeout_queue);
silc_task_queue_alloc(&schedule->generic_queue);
+ if (!max_tasks)
+ max_tasks = 200;
+
/* Initialize the scheduler */
schedule->fd_list = silc_calloc(max_tasks, sizeof(*schedule->fd_list));
schedule->max_fd = max_tasks;
return TRUE;
}
+/* Enlarge the capabilities of the scheduler to handle tasks to `max_tasks'. */
+
+bool silc_schedule_reinit(SilcSchedule schedule, int max_tasks)
+{
+ silc_mutex_lock(schedule->lock);
+ if (schedule->max_fd <= max_tasks)
+ return FALSE;
+ schedule->fd_list = silc_realloc(schedule->fd_list,
+ (sizeof(*schedule->fd_list) * max_tasks));
+ schedule->max_fd = max_tasks;
+ silc_mutex_unlock(schedule->lock);
+ return TRUE;
+}
+
/* Stops the schedule even if it is not supposed to be stopped yet.
After calling this, one should call silc_schedule_uninit (after the
silc_schedule has returned). */
void silc_schedule_stop(SilcSchedule schedule)
{
SILC_LOG_DEBUG(("Stopping scheduler"));
- silc_mutex_lock(schedule->lock);
schedule->valid = FALSE;
- silc_mutex_unlock(schedule->lock);
}
/* Executes nontimeout tasks. It then checks whether any of ther fd tasks
{
SilcTask task;
int i, last_fd = schedule->last_fd;
+ SilcUInt32 fd;
for (i = 0; i <= last_fd; i++) {
if (schedule->fd_list[i].events == 0)
continue;
+ fd = schedule->fd_list[i].fd;
+
/* First check whether this fd has task in the fd queue */
silc_mutex_lock(schedule->fd_queue->lock);
- task = silc_task_find(schedule->fd_queue, schedule->fd_list[i].fd);
+ task = silc_task_find(schedule->fd_queue, fd);
/* If the task was found then execute its callbacks. If not then
execute all generic tasks for that fd. */
if (task->valid && schedule->fd_list[i].revents & SILC_TASK_READ) {
silc_mutex_unlock(schedule->generic_queue->lock);
silc_mutex_unlock(schedule->lock);
- task->callback(schedule, SILC_TASK_READ, schedule->fd_list[i].fd,
- task->context);
+ task->callback(schedule, SILC_TASK_READ, fd, task->context);
silc_mutex_lock(schedule->lock);
silc_mutex_lock(schedule->generic_queue->lock);
}
if (task->valid && schedule->fd_list[i].revents & SILC_TASK_WRITE) {
silc_mutex_unlock(schedule->generic_queue->lock);
silc_mutex_unlock(schedule->lock);
- task->callback(schedule, SILC_TASK_WRITE, schedule->fd_list[i].fd,
- task->context);
+ task->callback(schedule, SILC_TASK_WRITE, fd, task->context);
silc_mutex_lock(schedule->lock);
silc_mutex_lock(schedule->generic_queue->lock);
}
/* Add new task to the scheduler */
-SilcTask silc_schedule_task_add(SilcSchedule schedule, uint32 fd,
+SilcTask silc_schedule_task_add(SilcSchedule schedule, SilcUInt32 fd,
SilcTaskCallback callback, void *context,
long seconds, long useconds,
SilcTaskType type,
/* Remove task by fd */
-void silc_schedule_task_del_by_fd(SilcSchedule schedule, uint32 fd)
+void silc_schedule_task_del_by_fd(SilcSchedule schedule, SilcUInt32 fd)
{
- SILC_LOG_DEBUG(("Unregister task by fd"));
+ SILC_LOG_DEBUG(("Unregister task by fd %d", fd));
silc_task_del_by_fd(schedule->timeout_queue, fd);
silc_task_del_by_fd(schedule->fd_queue, fd);
one file descriptor to set different iomasks. */
void silc_schedule_set_listen_fd(SilcSchedule schedule,
- uint32 fd, SilcTaskEvent iomask)
+ SilcUInt32 fd, SilcTaskEvent iomask)
{
int i;
bool found = FALSE;
/* Removes a file descriptor from listen list. */
-void silc_schedule_unset_listen_fd(SilcSchedule schedule, uint32 fd)
+void silc_schedule_unset_listen_fd(SilcSchedule schedule, SilcUInt32 fd)
{
int i;
silc_mutex_lock(schedule->lock);
+ SILC_LOG_DEBUG(("Unset listen fd %d", fd));
+
for (i = 0; i < schedule->max_fd; i++)
if (schedule->fd_list[i].fd == fd) {
schedule->fd_list[i].fd = 0;
/* Return task by its fd. */
-static SilcTask silc_task_find(SilcTaskQueue queue, uint32 fd)
+static SilcTask silc_task_find(SilcTaskQueue queue, SilcUInt32 fd)
{
SilcTask next;
return FALSE;
}
-static void silc_task_del_by_fd(SilcTaskQueue queue, uint32 fd)
+static void silc_task_del_by_fd(SilcTaskQueue queue, SilcUInt32 fd)
{
SilcTask next;