Added various branc prediction optimizations.
/* Allocate new SilcBuffer */
sb = (SilcBuffer)silc_calloc(1, sizeof(*sb));
- if (!sb)
+ if (silc_unlikely(!sb))
return NULL;
- if (len) {
+ if (silc_likely(len)) {
/* Allocate the actual data area */
sb->head = (unsigned char *)silc_calloc(len, sizeof(*sb->head));
- if (!sb->head)
+ if (silc_unlikely(!sb->head))
return NULL;
/* Set pointers to the new buffer */
#if defined(SILC_DEBUG)
SILC_ASSERT(len <= silc_buffer_len(sb));
#else
- if (len > silc_buffer_len(sb))
+ if (silc_unlikely(len > silc_buffer_len(sb)))
return NULL;
#endif
sb->data += len;
#if defined(SILC_DEBUG)
SILC_ASSERT((sb->data - len) >= sb->head);
#else
- if ((sb->data - len) < sb->head)
+ if (silc_unlikely((sb->data - len) < sb->head))
return NULL;
#endif
sb->data -= len;
#if defined(SILC_DEBUG)
SILC_ASSERT(len <= silc_buffer_taillen(sb));
#else
- if (len > silc_buffer_taillen(sb))
+ if (silc_unlikely(len > silc_buffer_taillen(sb)))
return NULL;
#endif
sb->tail += len;
#if defined(SILC_DEBUG)
SILC_ASSERT((sb->tail - len) >= sb->data);
#else
- if ((sb->tail - len) < sb->data)
+ if (silc_unlikely((sb->tail - len) < sb->data))
return NULL;
#endif
sb->tail -= len;
#if defined(SILC_DEBUG)
SILC_ASSERT(len <= silc_buffer_headlen(sb));
#else
- if (len > silc_buffer_headlen(sb))
+ if (silc_unlikely(len > silc_buffer_headlen(sb)))
return NULL;
#endif
return (unsigned char *)memcpy(sb->head, data, len);
#if defined(SILC_DEBUG)
SILC_ASSERT(len <= silc_buffer_len(sb));
#else
- if (len > silc_buffer_len(sb))
+ if (silc_unlikely(len > silc_buffer_len(sb)))
return NULL;
#endif
return (unsigned char *)memcpy(sb->data, data, len);
#if defined(SILC_DEBUG)
SILC_ASSERT(len <= silc_buffer_taillen(sb));
#else
- if (len > silc_buffer_taillen(sb))
+ if (silc_unlikely(len > silc_buffer_taillen(sb)))
return NULL;
#endif
return (unsigned char *)memcpy(sb->tail, data, len);
SilcBuffer silc_buffer_alloc_size(SilcUInt32 len)
{
SilcBuffer sb = silc_buffer_alloc(len);
- if (!sb)
+ if (silc_unlikely(!sb))
return NULL;
silc_buffer_pull_tail(sb, len);
return sb;
SilcBuffer sb_new;
sb_new = silc_buffer_alloc_size(silc_buffer_len(sb));
- if (!sb_new)
+ if (silc_unlikely(!sb_new))
return NULL;
silc_buffer_put(sb_new, sb->data, silc_buffer_len(sb));
SilcBuffer sb_new;
sb_new = silc_buffer_alloc_size(silc_buffer_truelen(sb));
- if (!sb_new)
+ if (silc_unlikely(!sb_new))
return NULL;
silc_buffer_put(sb_new, sb->head, silc_buffer_truelen(sb));
sb_new->data = sb_new->head + silc_buffer_headlen(sb);
if (!sb)
return silc_buffer_alloc(newsize);
- if (newsize <= silc_buffer_truelen(sb))
+ if (silc_unlikely(newsize <= silc_buffer_truelen(sb)))
return sb;
hlen = silc_buffer_headlen(sb);
dlen = silc_buffer_len(sb);
h = (unsigned char *)silc_realloc(sb->head, newsize);
- if (!h)
+ if (silc_unlikely(!h))
return NULL;
sb->head = h;
sb->data = sb->head + hlen;
SilcBuffer silc_buffer_realloc_size(SilcBuffer sb, SilcUInt32 newsize)
{
sb = silc_buffer_realloc(sb, newsize);
- if (!sb)
+ if (silc_unlikely(!sb))
return NULL;
silc_buffer_pull_tail(sb, silc_buffer_taillen(sb));
return sb;
{
if (size > silc_buffer_len(sb)) {
if (size > silc_buffer_taillen(sb) + silc_buffer_len(sb))
- if (!silc_buffer_realloc(sb, silc_buffer_truelen(sb) +
- (size - silc_buffer_taillen(sb) -
- silc_buffer_len(sb))))
+ if (silc_unlikely(!silc_buffer_realloc(sb, silc_buffer_truelen(sb) +
+ (size - silc_buffer_taillen(sb) -
+ silc_buffer_len(sb)))))
return FALSE;
silc_buffer_pull_tail(sb, size - silc_buffer_len(sb));
}
/* Allocate new SilcBuffer */
sb = (SilcBuffer)silc_scalloc(stack, 1, sizeof(*sb));
- if (!sb)
+ if (silc_unlikely(!sb))
return NULL;
/* Allocate the actual data area */
sb->head = (unsigned char *)silc_smalloc_ua(stack, len);
- if (!sb->head)
+ if (silc_unlikely(!sb->head))
return NULL;
/* Set pointers to the new buffer */
SilcBuffer silc_buffer_salloc_size(SilcStack stack, SilcUInt32 len)
{
SilcBuffer sb = silc_buffer_salloc(stack, len);
- if (!sb)
+ if (silc_unlikely(!sb))
return NULL;
silc_buffer_pull_tail(sb, len);
return sb;
/* Do slow and stack wasting realloc. The old sb->head is lost and
is freed eventually. */
h = silc_smalloc_ua(stack, newsize);
- if (!h)
+ if (silc_unlikely(!h))
return NULL;
memcpy(h, sb->head, silc_buffer_truelen(sb));
}
SilcBuffer sb, SilcUInt32 newsize)
{
sb = silc_buffer_srealloc(stack, sb, newsize);
- if (!sb)
+ if (silc_unlikely(!sb))
return NULL;
silc_buffer_pull_tail(sb, silc_buffer_taillen(sb));
return sb;
{
if (size > silc_buffer_len(sb)) {
if (size > silc_buffer_taillen(sb) + silc_buffer_len(sb))
- if (!silc_buffer_srealloc(stack, sb, silc_buffer_truelen(sb) +
- (size - silc_buffer_taillen(sb) -
- silc_buffer_len(sb))))
+ if (silc_unlikely(!silc_buffer_srealloc(stack, sb,
+ silc_buffer_truelen(sb) +
+ (size - silc_buffer_taillen(sb) -
+ silc_buffer_len(sb)))))
return FALSE;
silc_buffer_pull_tail(sb, size - silc_buffer_len(sb));
}
SilcBuffer sb_new;
sb_new = silc_buffer_salloc_size(stack, silc_buffer_len(sb));
- if (!sb_new)
+ if (silc_unlikely(!sb_new))
return NULL;
silc_buffer_put(sb_new, sb->data, silc_buffer_len(sb));
SilcBuffer sb_new;
sb_new = silc_buffer_salloc_size(stack, silc_buffer_truelen(sb));
- if (!sb_new)
+ if (silc_unlikely(!sb_new))
return NULL;
silc_buffer_put(sb_new, sb->head, silc_buffer_truelen(sb));
sb_new->data = sb_new->head + silc_buffer_headlen(sb);
/* Check that buffer has enough room to format data in it, if not
allocate more. */
-#define FORMAT_HAS_SPACE(s, b, req) \
-do { \
- if (!silc_buffer_senlarge(s, b, req)) \
- goto fail; \
- flen += req; \
+#define FORMAT_HAS_SPACE(s, b, req) \
+do { \
+ if (silc_unlikely(!silc_buffer_senlarge(s, b, req))) \
+ goto fail; \
+ flen += req; \
} while(0)
/* Check that there is data to be unformatted */
#define UNFORMAT_HAS_SPACE(b, req) \
do { \
- if (req > silc_buffer_len(b)) \
+ if (silc_unlikely(req > silc_buffer_len(b))) \
goto fail; \
- if ((req + 1) <= 0) \
+ if (silc_unlikely((req + 1) <= 0)) \
goto fail; \
} while(0)
unsigned char **x = va_arg(ap, unsigned char **);
SilcUInt32 len2 = va_arg(ap, SilcUInt32);
UNFORMAT_HAS_SPACE(src, len2);
- if (len2 && x)
+ if (silc_likely(len2 && x))
*x = src->data;
silc_buffer_pull(src, len2);
break;
unsigned char **x = va_arg(ap, unsigned char **);
SilcUInt32 len2 = va_arg(ap, SilcUInt32);
UNFORMAT_HAS_SPACE(src, len2);
- if (len2 && x) {
+ if (silc_likely(len2 && x)) {
*x = silc_scalloc(stack, len2 + 1, sizeof(unsigned char));
memcpy(*x, src->data, len2);
}
{
unsigned char *x = va_arg(ap, unsigned char *);
UNFORMAT_HAS_SPACE(src, 1);
- if (x)
+ if (silc_likely(x))
*x = src->data[0];
silc_buffer_pull(src, 1);
break;
{
SilcUInt16 *x = va_arg(ap, SilcUInt16 *);
UNFORMAT_HAS_SPACE(src, 2);
- if (x)
+ if (silc_likely(x))
SILC_GET16_MSB(*x, src->data);
silc_buffer_pull(src, 2);
break;
{
SilcUInt32 *x = va_arg(ap, SilcUInt32 *);
UNFORMAT_HAS_SPACE(src, 4);
- if (x)
+ if (silc_likely(x))
SILC_GET32_MSB(*x, src->data);
silc_buffer_pull(src, 4);
break;
{
SilcUInt64 *x = va_arg(ap, SilcUInt64 *);
UNFORMAT_HAS_SPACE(src, sizeof(SilcUInt64));
- if (x)
+ if (silc_likely(x))
SILC_GET64_MSB(*x, src->data);
silc_buffer_pull(src, sizeof(SilcUInt64));
break;
{
char *x = va_arg(ap, char *);
UNFORMAT_HAS_SPACE(src, 1);
- if (x)
+ if (silc_likely(x))
*x = src->data[0];
silc_buffer_pull(src, 1);
break;
{
SilcInt16 *x = va_arg(ap, SilcInt16 *);
UNFORMAT_HAS_SPACE(src, 2);
- if (x)
+ if (silc_likely(x))
SILC_GET16_MSB(*x, src->data);
silc_buffer_pull(src, 2);
break;
{
SilcInt32 *x = va_arg(ap, SilcInt32 *);
UNFORMAT_HAS_SPACE(src, 4);
- if (x)
+ if (silc_likely(x))
SILC_GET32_MSB(*x, src->data);
silc_buffer_pull(src, 4);
break;
{
SilcInt64 *x = va_arg(ap, SilcInt64 *);
UNFORMAT_HAS_SPACE(src, sizeof(SilcInt64));
- if (x)
+ if (silc_likely(x))
SILC_GET64_MSB(*x, src->data);
silc_buffer_pull(src, sizeof(SilcInt64));
break;
len2 = (SilcUInt8)src->data[0];
silc_buffer_pull(src, 1);
UNFORMAT_HAS_SPACE(src, len2);
- if (x)
+ if (silc_likely(x))
*x = src->data;
silc_buffer_pull(src, len2);
break;
SILC_GET16_MSB(len2, src->data);
silc_buffer_pull(src, 2);
UNFORMAT_HAS_SPACE(src, len2);
- if (x)
+ if (silc_likely(x))
*x = src->data;
silc_buffer_pull(src, len2);
break;
len2 = (SilcUInt8)src->data[0];
silc_buffer_pull(src, 1);
UNFORMAT_HAS_SPACE(src, len2);
- if (x && len2) {
+ if (silc_likely(x && len2)) {
*x = silc_scalloc(stack, len2 + 1, sizeof(unsigned char));
memcpy(*x, src->data, len2);
}
SILC_GET16_MSB(len2, src->data);
silc_buffer_pull(src, 2);
UNFORMAT_HAS_SPACE(src, len2);
- if (x && len2) {
+ if (silc_likely(x && len2)) {
*x = silc_scalloc(stack, len2 + 1, sizeof(unsigned char));
memcpy(*x, src->data, len2);
}
SILC_GET32_MSB(len2, src->data);
silc_buffer_pull(src, 4);
UNFORMAT_HAS_SPACE(src, len2);
- if (x)
+ if (silc_likely(x))
*x = src->data;
silc_buffer_pull(src, len2);
break;
SILC_GET32_MSB(len2, src->data);
silc_buffer_pull(src, 4);
UNFORMAT_HAS_SPACE(src, len2);
- if (x && len2) {
+ if (silc_likely(x && len2)) {
*x = silc_scalloc(stack, len2 + 1, sizeof(unsigned char));
memcpy(*x, src->data, len2);
}
slen = strlen(string);
d = silc_realloc(dst->head, sizeof(*dst->head) * (slen + len + 1));
- if (!d)
+ if (silc_unlikely(!d))
return -1;
dst->head = d;
memcpy(dst->head + len, string, slen);
slen = strlen(string);
d = silc_srealloc_ua(stack, len + 1, dst->head,
sizeof(*dst->head) * (slen + len + 1));
- if (!d)
+ if (silc_unlikely(!d))
return -1;
dst->head = d;
memcpy(dst->head + len, string, slen);
Author: Pekka Riikonen <priikone@silcnet.org>
- Copyright (C) 2000 - 2005 Pekka Riikonen
+ Copyright (C) 2000 - 2006 Pekka Riikonen
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
* will automatically allocate list entries. Normal SILC List API cannot
* be used for this purpose because in that case the context passed to the
* list must be defined as list structure already. This is not the case in
- * SilcDList. But SilcDList is a bit slower than SilcList because it
+ * SilcDList. But SilcDList is a bit slower than SilcList because it
* requires memory allocation when adding new entries to the list.
*
- * SILC Dynamic List is not thread-safe. If the same list context must be
+ * SILC Dynamic List is not thread-safe. If the same list context must be
* used in multithreaded environment concurrency control must be employed.
*
***/
SilcBool silc_dlist_add(SilcDList list, void *context)
{
SilcDListEntry e = (SilcDListEntry)silc_malloc(sizeof(*e));
- if (!e)
+ if (silc_unlikely(!e))
return FALSE;
e->context = context;
silc_list_add(list->list, e);
SilcBool silc_dlist_insert(SilcDList list, void *context)
{
SilcDListEntry e = (SilcDListEntry)silc_malloc(sizeof(*e));
- if (!e)
+ if (silc_unlikely(!e))
return FALSE;
e->context = context;
silc_list_insert(list->list, list->prev, e);
SilcFSM fsm;
fsm = silc_calloc(1, sizeof(*fsm));
- if (!fsm)
+ if (silc_unlikely(!fsm))
return NULL;
- if (!silc_fsm_init(fsm, fsm_context, destructor,
- destructor_context, schedule)) {
+ if (silc_unlikely(!silc_fsm_init(fsm, fsm_context, destructor,
+ destructor_context, schedule))) {
silc_free(fsm);
return NULL;
}
SilcFSMThread thread;
thread = silc_calloc(1, sizeof(*thread));
- if (!thread)
+ if (silc_unlikely(!thread))
return NULL;
silc_fsm_thread_init(thread, fsm, thread_context, destructor,
SilcFSMSema sema;
sema = silc_calloc(1, sizeof(*sema));
- if (!sema)
+ if (silc_unlikely(!sema))
return NULL;
silc_fsm_sema_init(sema, fsm, value);
}
p = silc_calloc(1, sizeof(*p));
- if (!p)
+ if (silc_unlikely(!p))
continue;
p->sema = sema;
p->fsm = fsm;
cannot be used in this thread. Application may still use it if it
wants but we use our own. */
fsm->schedule = silc_schedule_init(0, old);
- if (!fsm->schedule)
+ if (silc_unlikely(!fsm->schedule))
return NULL;
/* Start the FSM thread */
- if (!silc_schedule_task_add_timeout(fsm->schedule, silc_fsm_run, fsm, 0, 0))
+ if (silc_unlikely(!silc_schedule_task_add_timeout(fsm->schedule,
+ silc_fsm_run, fsm, 0, 0)))
return NULL;
/* Run the scheduler */
Author: Pekka Riikonen <priikone@silcnet.org>
- Copyright (C) 1999 - 2005 Pekka Riikonen
+ Copyright (C) 1999 - 2006 Pekka Riikonen
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
void *silc_malloc(size_t size)
{
void *addr;
- if (size <= 0 || size >= SILC_MAX_ALLOC) {
+ if (silc_unlikely(size <= 0 || size >= SILC_MAX_ALLOC)) {
SILC_LOG_ERROR(("Invalid memory allocation"));
return NULL;
}
addr = malloc(size);
- if (!addr)
+ if (silc_unlikely(!addr))
SILC_LOG_ERROR(("System out of memory"));
return addr;
}
void *silc_calloc(size_t items, size_t size)
{
void *addr;
- if (size * items <= 0 || size * items >= SILC_MAX_ALLOC) {
+ if (silc_unlikely(size * items <= 0 || size * items >= SILC_MAX_ALLOC)) {
SILC_LOG_ERROR(("Invalid memory allocation"));
return NULL;
}
addr = calloc(items, size);
- if (!addr)
+ if (silc_unlikely(!addr))
SILC_LOG_ERROR(("System out of memory"));
return addr;
}
void *silc_realloc(void *ptr, size_t size)
{
void *addr;
- if (size <= 0 || size >= SILC_MAX_ALLOC) {
+ if (silc_unlikely(size <= 0 || size >= SILC_MAX_ALLOC)) {
SILC_LOG_ERROR(("Invalid memory allocation"));
return NULL;
}
addr = realloc(ptr, size);
- if (!addr)
+ if (silc_unlikely(!addr))
SILC_LOG_ERROR(("System out of memory"));
return addr;
}
{
unsigned char *addr;
addr = silc_malloc(size + 1);
- if (!addr) {
+ if (silc_unlikely(!addr)) {
SILC_LOG_ERROR(("System out of memory"));
return NULL;
}
return silc_calloc(items, size);
addr = silc_stack_malloc(stack, items * size, TRUE);
- if (!addr)
+ if (silc_unlikely(!addr))
return NULL;
memset(addr, 0, items * size);
return (void *)addr;
return silc_memdup(ptr, size);
addr = silc_stack_malloc(stack, size + 1, TRUE);
- if (!addr)
+ if (silc_unlikely(!addr))
return NULL;
memcpy((void *)addr, ptr, size);
addr[size] = '\0';
return silc_memdup(str, size);
addr = silc_stack_malloc(stack, size + 1, FALSE);
- if (!addr)
+ if (silc_unlikely(!addr))
return NULL;
memcpy((void *)addr, str, size);
addr[size] = '\0';
while (silc_hash_table_get(&htl, (void **)&fd, (void **)&task)) {
t = (SilcTask)task;
- if (!t->valid) {
+ if (silc_unlikely(!t->valid)) {
silc_schedule_task_remove(schedule, t);
continue;
}
- if (!task->events || !task->revents)
+ if (!task->revents || !task->events)
continue;
/* Is the task ready for reading */
}
/* Remove if task was invalidated in the task callback */
- if (!t->valid)
+ if (silc_unlikely(!t->valid))
silc_schedule_task_remove(schedule, t);
}
silc_hash_table_list_reset(&htl);
/* First task in the task queue has always the earliest timeout. */
silc_list_start(schedule->timeout_queue);
- while ((task = silc_list_get(schedule->timeout_queue)) != SILC_LIST_END) {
+ task = silc_list_get(schedule->timeout_queue);
+ do {
t = (SilcTask)task;
/* Remove invalid task */
- if (!t->valid) {
+ if (silc_unlikely(!t->valid)) {
silc_schedule_task_remove(schedule, t);
continue;
}
+ SILC_SCHEDULE_UNLOCK(schedule);
+
/* Execute the task if the timeout has expired */
- if (dispatch_all || silc_compare_timeval(&task->timeout, &curtime)) {
- t->valid = FALSE;
- SILC_SCHEDULE_UNLOCK(schedule);
- t->callback(schedule, schedule->app_context, SILC_TASK_EXPIRE, 0,
- t->context);
+ if (!silc_compare_timeval(&task->timeout, &curtime) && !dispatch_all) {
SILC_SCHEDULE_LOCK(schedule);
+ break;
+ }
- /* Remove the expired task */
- silc_schedule_task_remove(schedule, t);
+ t->valid = FALSE;
+ t->callback(schedule, schedule->app_context, SILC_TASK_EXPIRE, 0,
+ t->context);
- /* Balance when we have lots of small timeouts */
- if ((++count) > 40)
- break;
- }
- }
+ SILC_SCHEDULE_LOCK(schedule);
+
+ /* Remove the expired task */
+ silc_schedule_task_remove(schedule, t);
+
+ /* Balance when we have lots of small timeouts */
+ if (silc_unlikely((++count) > 40))
+ break;
+ } while (silc_likely((task = silc_list_get(schedule->timeout_queue))));
}
/* Calculates next timeout. This is the timeout value when at earliest some
/* First task in the task queue has always the earliest timeout. */
silc_list_start(schedule->timeout_queue);
- while ((task = silc_list_get(schedule->timeout_queue)) != SILC_LIST_END) {
+ task = silc_list_get(schedule->timeout_queue);
+ do {
t = (SilcTask)task;
/* Remove invalid task */
- if (!t->valid) {
+ if (silc_unlikely(!t->valid)) {
silc_schedule_task_remove(schedule, t);
continue;
}
timeout tasks from the past. */
if (silc_compare_timeval(&task->timeout, &curtime) && dispatch) {
silc_schedule_dispatch_timeout(schedule, FALSE);
- if (!schedule->valid)
+ if (silc_unlikely(!schedule->valid))
return;
/* Start selecting new timeout again after dispatch */
curtime.tv_sec = 0;
curtime.tv_usec += 1000000L;
}
-
break;
- }
+ } while ((task = silc_list_get(schedule->timeout_queue)));
/* Save the timeout */
if (task) {
static void silc_schedule_task_remove(SilcSchedule schedule, SilcTask task)
{
SilcTaskFd ftask;
- SilcTaskTimeout ttask;
- if (task == SILC_ALL_TASKS) {
+ if (silc_unlikely(task == SILC_ALL_TASKS)) {
SilcTask task;
SilcHashTableList htl;
SilcUInt32 fd;
return;
}
- /* Delete from timeout queue */
- if (task->type == 1) {
- silc_list_start(schedule->timeout_queue);
- while ((ttask = silc_list_get(schedule->timeout_queue)) != SILC_LIST_END) {
- if (ttask == (SilcTaskTimeout)task) {
- silc_list_del(schedule->timeout_queue, ttask);
-
- /* Put to free list */
- silc_list_add(schedule->free_tasks, ttask);
- break;
- }
- }
+ if (silc_likely(task->type == 1)) {
+ /* Delete from timeout queue */
+ silc_list_del(schedule->timeout_queue, task);
- return;
+ /* Put to free list */
+ silc_list_add(schedule->free_tasks, task);
+ } else {
+ /* Delete from fd queue */
+ ftask = (SilcTaskFd)task;
+ silc_hash_table_del(schedule->fd_queue, SILC_32_TO_PTR(ftask->fd));
}
-
- /* Delete from fd queue */
- ftask = (SilcTaskFd)task;
- silc_hash_table_del(schedule->fd_queue, SILC_32_TO_PTR(ftask->fd));
}
/* Timeout freelist garbage collection */
SILC_SCHEDULE_UNLOCK(schedule);
}
-/* Runs the scheduler once and then returns. */
+/* Runs the scheduler once and then returns. Must be called locked. */
-SilcBool silc_schedule_one(SilcSchedule schedule, int timeout_usecs)
+static SilcBool silc_schedule_iterate(SilcSchedule schedule, int timeout_usecs)
{
struct timeval timeout;
int ret;
- if (!schedule->is_locked)
- SILC_SCHEDULE_LOCK(schedule);
-
do {
SILC_LOG_DEBUG(("In scheduler loop"));
/* Deliver signals if any has been set to be called */
- if (schedule->signal_tasks) {
+ if (silc_unlikely(schedule->signal_tasks)) {
SILC_SCHEDULE_UNLOCK(schedule);
schedule_ops.signals_call(schedule, schedule->internal);
schedule->signal_tasks = FALSE;
}
/* Check if scheduler is valid */
- if (schedule->valid == FALSE) {
+ if (silc_unlikely(schedule->valid == FALSE)) {
SILC_LOG_DEBUG(("Scheduler not valid anymore, exiting"));
- if (!schedule->is_locked)
- SILC_SCHEDULE_UNLOCK(schedule);
return FALSE;
}
silc_schedule_select_timeout(schedule);
/* Check if scheduler is valid */
- if (schedule->valid == FALSE) {
+ if (silc_unlikely(schedule->valid == FALSE)) {
SILC_LOG_DEBUG(("Scheduler not valid anymore, exiting"));
- if (!schedule->is_locked)
- SILC_SCHEDULE_UNLOCK(schedule);
return FALSE;
}
SILC_LOG_DEBUG(("Select"));
ret = schedule_ops.select(schedule, schedule->internal);
- switch (ret) {
- case 0:
+ if (silc_likely(ret == 0)) {
/* Timeout */
SILC_LOG_DEBUG(("Running timeout tasks"));
- if (silc_list_count(schedule->timeout_queue))
+ if (silc_likely(silc_list_count(schedule->timeout_queue)))
silc_schedule_dispatch_timeout(schedule, FALSE);
- break;
- case -1:
- /* Error */
- if (errno == EINTR)
- break;
- SILC_LOG_ERROR(("Error in select(): %s", strerror(errno)));
- break;
- default:
+ continue;
+
+ } else if (silc_likely(ret > 0)) {
/* There is some data available now */
SILC_LOG_DEBUG(("Running fd tasks"));
silc_schedule_dispatch_fd(schedule);
- break;
+ continue;
+
+ } else {
+ /* Error */
+ if (silc_likely(errno == EINTR))
+ continue;
+ SILC_LOG_ERROR(("Error in select()/poll(): %s", strerror(errno)));
+ continue;
}
} while (timeout_usecs == -1);
- if (!schedule->is_locked)
- SILC_SCHEDULE_UNLOCK(schedule);
-
return TRUE;
}
-/* The SILC scheduler. This is actually the main routine in SILC programs.
- When this returns the program is to be ended. Before this function can
- be called, one must call silc_schedule_init function. */
+/* Runs the scheduler once and then returns. */
+
+SilcBool silc_schedule_one(SilcSchedule schedule, int timeout_usecs)
+{
+ SilcBool ret;
+ SILC_SCHEDULE_LOCK(schedule);
+ ret = silc_schedule_iterate(schedule, timeout_usecs);
+ SILC_SCHEDULE_UNLOCK(schedule);
+ return ret;
+}
+
+/* Runs the scheduler and blocks here. When this returns the scheduler
+ has ended. */
void silc_schedule(SilcSchedule schedule)
{
SILC_LOG_DEBUG(("Running scheduler"));
- if (schedule->valid == FALSE) {
- SILC_LOG_ERROR(("Scheduler is not valid, stopping"));
- return;
- }
-
/* Start the scheduler loop */
SILC_SCHEDULE_LOCK(schedule);
- schedule->is_locked = TRUE;
- silc_schedule_one(schedule, -1);
+ silc_schedule_iterate(schedule, -1);
SILC_SCHEDULE_UNLOCK(schedule);
}
{
SilcTask task = NULL;
- if (!schedule->valid)
+ if (silc_unlikely(!schedule->valid))
return NULL;
SILC_SCHEDULE_LOCK(schedule);
- if (type == SILC_TASK_TIMEOUT) {
+ if (silc_likely(type == SILC_TASK_TIMEOUT)) {
SilcTaskTimeout tmp, prev, ttask;
+ SilcList list;
silc_list_start(schedule->free_tasks);
ttask = silc_list_get(schedule->free_tasks);
- if (!ttask) {
+ if (silc_unlikely(!ttask)) {
ttask = silc_calloc(1, sizeof(*ttask));
- if (!ttask)
+ if (silc_unlikely(!ttask))
goto out;
}
silc_list_del(schedule->free_tasks, ttask);
/* Add task to correct spot so that the first task in the list has
the earliest timeout. */
- silc_list_start(schedule->timeout_queue);
+ list = schedule->timeout_queue;
+ silc_list_start(list);
prev = NULL;
- while ((tmp = silc_list_get(schedule->timeout_queue)) != SILC_LIST_END) {
+ while ((tmp = silc_list_get(list)) != SILC_LIST_END) {
/* If we have shorter timeout, we have found our spot */
if (silc_compare_timeval(&ttask->timeout, &tmp->timeout)) {
silc_list_insert(schedule->timeout_queue, prev, ttask);
task = (SilcTask)ttask;
- } else if (type == SILC_TASK_FD) {
+ } else if (silc_likely(type == SILC_TASK_FD)) {
/* Check if fd is already added */
- if (silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fd),
- NULL, (void **)&task))
+ if (silc_unlikely(silc_hash_table_find(schedule->fd_queue,
+ SILC_32_TO_PTR(fd),
+ NULL, (void **)&task)))
goto out;
/* Check max tasks */
- if (schedule->max_tasks > 0 &&
- silc_hash_table_count(schedule->fd_queue) >= schedule->max_tasks) {
+ if (silc_unlikely(schedule->max_tasks > 0 &&
+ silc_hash_table_count(schedule->fd_queue) >=
+ schedule->max_tasks)) {
SILC_LOG_WARNING(("Scheduler task limit reached: cannot add new task"));
goto out;
}
SilcTaskFd ftask = silc_calloc(1, sizeof(*ftask));
- if (!ftask)
+ if (silc_unlikely(!ftask))
goto out;
SILC_LOG_DEBUG(("New fd task %p fd=%d", ftask, fd));
task = (SilcTask)ftask;
- } else if (type == SILC_TASK_SIGNAL) {
+ } else if (silc_unlikely(type == SILC_TASK_SIGNAL)) {
SILC_SCHEDULE_UNLOCK(schedule);
- schedule_ops.signal_register(schedule, schedule->internal, (int)fd,
+ schedule_ops.signal_register(schedule, schedule->internal, fd,
callback, context);
return NULL;
}
-
out:
SILC_SCHEDULE_UNLOCK(schedule);
return task;
void silc_schedule_task_del(SilcSchedule schedule, SilcTask task)
{
- if (task == SILC_ALL_TASKS) {
+ if (silc_unlikely(task == SILC_ALL_TASKS)) {
SilcHashTableList htl;
SILC_LOG_DEBUG(("Unregister all tasks"));
SILC_SCHEDULE_LOCK(schedule);
/* fd is unique, so there is only one task with this fd in the table */
- if (silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fd), NULL,
- (void **)&task))
+ if (silc_likely(silc_hash_table_find(schedule->fd_queue,
+ SILC_32_TO_PTR(fd), NULL,
+ (void **)&task)))
task->valid = FALSE;
SILC_SCHEDULE_UNLOCK(schedule);
/* If it is signal, remove it */
- if (!task)
+ if (silc_unlikely(!task))
schedule_ops.signal_unregister(schedule, schedule->internal, fd);
}
{
SilcTask task;
SilcHashTableList htl;
+ SilcList list;
SILC_LOG_DEBUG(("Unregister task by callback"));
silc_hash_table_list_reset(&htl);
/* Delete from timeout queue */
- silc_list_start(schedule->timeout_queue);
- while ((task = (SilcTask)silc_list_get(schedule->timeout_queue))
- != SILC_LIST_END) {
+ list = schedule->timeout_queue;
+ silc_list_start(list);
+ while ((task = (SilcTask)silc_list_get(list))) {
if (task->callback == callback)
task->valid = FALSE;
}
{
SilcTask task;
SilcHashTableList htl;
+ SilcList list;
SILC_LOG_DEBUG(("Unregister task by context"));
silc_hash_table_list_reset(&htl);
/* Delete from timeout queue */
- silc_list_start(schedule->timeout_queue);
- while ((task = (SilcTask)silc_list_get(schedule->timeout_queue))
- != SILC_LIST_END) {
+ list = schedule->timeout_queue;
+ silc_list_start(list);
+ while ((task = (SilcTask)silc_list_get(list))) {
if (task->context == context)
task->valid = FALSE;
}
SilcTaskCallback callback, void *context)
{
SilcTask task;
+ SilcList list;
SILC_LOG_DEBUG(("Unregister task by fd, callback and context"));
SILC_SCHEDULE_LOCK(schedule);
/* Delete from timeout queue */
- silc_list_start(schedule->timeout_queue);
- while ((task = (SilcTask)silc_list_get(schedule->timeout_queue))
- != SILC_LIST_END) {
+ list = schedule->timeout_queue;
+ silc_list_start(list);
+ while ((task = (SilcTask)silc_list_get(list))) {
if (task->callback == callback && task->context == context)
task->valid = FALSE;
}
{
SilcTaskFd task;
- if (!schedule->valid)
+ if (silc_unlikely(!schedule->valid))
return;
SILC_SCHEDULE_LOCK(schedule);
if (silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fd),
NULL, (void **)&task)) {
task->events = mask;
- if (send_events) {
+ if (silc_unlikely(send_events)) {
task->revents = mask;
silc_schedule_dispatch_fd(schedule);
}
* the signal call silc_schedule_task_del_by_fd.
*
***/
-#define silc_schedule_task_add_signal(schedule, signal, callback, context) \
- silc_schedule_task_add(schedule, signal, callback, context, 0, 0, \
+#define silc_schedule_task_add_signal(schedule, sig, callback, context) \
+ silc_schedule_task_add(schedule, sig, callback, context, 0, 0, \
SILC_TASK_SIGNAL)
/****f* silcutil/SilcScheduleAPI/silc_schedule_task_del
unsigned int max_tasks : 28; /* Max FD tasks */
unsigned int has_timeout : 1; /* Set if timeout is set */
unsigned int valid : 1; /* Set if scheduler is valid */
- unsigned int is_locked : 1; /* Set if scheduler is locked */
unsigned int signal_tasks : 1; /* Set if to dispatch signals */
};
{
SilcSocketStream stream = context;
- if (!stream->notifier)
+ if (silc_unlikely(!stream->notifier))
return;
switch (type) {
Author: Pekka Riikonen <priikone@silcnet.org>
- Copyright (C) 2003 - 2005 Pekka Riikonen
+ Copyright (C) 2003 - 2006 Pekka Riikonen
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
SILC_ST_DEBUG(("Allocating %d bytes (%s) from %p",
size, aligned ? "align" : "not align", stack));
- if (!size) {
+ if (silc_unlikely(!size)) {
SILC_LOG_ERROR(("Allocation by zero (0)"));
SILC_STACK_STAT(stack, num_errors, 1);
return NULL;
}
- if (size > SILC_STACK_MAX_ALLOC) {
+ if (silc_unlikely(size > SILC_STACK_MAX_ALLOC)) {
SILC_LOG_ERROR(("Allocating too much"));
SILC_STACK_STAT(stack, num_errors, 1);
return NULL;
bsize2 <<= 1;
si++;
}
- if (si >= SILC_STACK_BLOCK_NUM) {
+ if (silc_unlikely(si >= SILC_STACK_BLOCK_NUM)) {
SILC_LOG_ERROR(("Allocating too large block"));
SILC_STACK_STAT(stack, num_errors, 1);
return NULL;
stack->stack[si] = silc_malloc(bsize2 +
SILC_STACK_ALIGN(sizeof(**stack->stack),
SILC_STACK_DEFAULT_ALIGN));
- if (!stack->stack[si]) {
+ if (silc_unlikely(!stack->stack[si])) {
SILC_STACK_STAT(stack, num_errors, 1);
return NULL;
}
SILC_ST_DEBUG(("Reallocating %d bytes (%d) (%s) from %p", size, old_size,
aligned ? "align" : "not align", stack));
- if (!size || !old_size) {
+ if (silc_unlikely(!size || !old_size)) {
SILC_LOG_ERROR(("Allocation by zero (0)"));
SILC_STACK_STAT(stack, num_errors, 1);
return NULL;
}
- if (size > SILC_STACK_MAX_ALLOC) {
+ if (silc_unlikely(size > SILC_STACK_MAX_ALLOC)) {
SILC_LOG_ERROR(("Allocating too much"));
SILC_STACK_STAT(stack, num_errors, 1);
return NULL;
#endif
/***/
+/* Our offsetof macro */
#define silc_offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+/* silc_likely and silc_unlikely GCC branch prediction macros. Use only if
+ you have profiled the code first. */
+#if __GNUC__ >= 3
+#define silc_likely(expr) __builtin_expect(!!(expr), 1)
+#define silc_unlikely(expr) __builtin_expect(!!(expr), 0)
+#else
+#define silc_likely(expr) (expr)
+#define silc_unlikely(expr) (expr)
+#endif /* __GNUC__ >= 3 */
+
#if SILC_SIZEOF_SHORT > 2
#error "size of the short must be 2 bytes"
#endif
*/
#define SILC_GET16_MSB(l, cp) \
do { \
- (l) = ((SilcUInt32)(SilcUInt8)(cp)[0] << 8) \
- | ((SilcUInt32)(SilcUInt8)(cp)[1]); \
+ (l) = ((SilcUInt32)(SilcUInt8)(cp)[0] << 8) \
+ | ((SilcUInt32)(SilcUInt8)(cp)[1]); \
} while(0)
/***/
*/
#define SILC_GET32_MSB(l, cp) \
do { \
- (l) = ((SilcUInt32)(SilcUInt8)(cp)[0]) << 24 \
- | ((SilcUInt32)(SilcUInt8)(cp)[1] << 16) \
- | ((SilcUInt32)(SilcUInt8)(cp)[2] << 8) \
- | ((SilcUInt32)(SilcUInt8)(cp)[3]); \
+ (l) = ((SilcUInt32)(SilcUInt8)(cp)[0]) << 24 \
+ | ((SilcUInt32)(SilcUInt8)(cp)[1] << 16) \
+ | ((SilcUInt32)(SilcUInt8)(cp)[2] << 8) \
+ | ((SilcUInt32)(SilcUInt8)(cp)[3]); \
} while(0)
/***/
*/
#define SILC_GET64_MSB(l, cp) \
do { \
- (l) = ((((SilcUInt64)SILC_GET_WORD((cp))) << 32) | \
- ((SilcUInt64)SILC_GET_WORD((cp) + 4))); \
+ (l) = ((((SilcUInt64)SILC_GET_WORD((cp))) << 32) | \
+ ((SilcUInt64)SILC_GET_WORD((cp) + 4))); \
} while(0)
/***/
*
* SOURCE
*/
+#if defined(SILC_I486) && defined(__GNUC__)
+#define SILC_GET16_LSB(l, cp) (l) = (*(SilcUInt16 *)(cp))
+#else
#define SILC_GET16_LSB(l, cp) \
do { \
- (l) = ((SilcUInt32)(SilcUInt8)(cp)[0]) \
- | ((SilcUInt32)(SilcUInt8)(cp)[1] << 8); \
+ (l) = ((SilcUInt32)(SilcUInt8)(cp)[0]) \
+ | ((SilcUInt32)(SilcUInt8)(cp)[1] << 8); \
} while(0)
+#endif /* SILC_I486 && __GNUC__ */
/***/
/****d* silcutil/SILCTypes/SILC_GET32_LSB
*
* SOURCE
*/
+#if defined(SILC_I486) && defined(__GNUC__)
+#define SILC_GET32_LSB(l, cp) (l) = (*(SilcUInt32 *)(cp))
+#else
#define SILC_GET32_LSB(l, cp) \
do { \
- (l) = ((SilcUInt32)(SilcUInt8)(cp)[0]) \
- | ((SilcUInt32)(SilcUInt8)(cp)[1] << 8) \
- | ((SilcUInt32)(SilcUInt8)(cp)[2] << 16) \
- | ((SilcUInt32)(SilcUInt8)(cp)[3] << 24); \
+ (l) = ((SilcUInt32)(SilcUInt8)(cp)[0]) \
+ | ((SilcUInt32)(SilcUInt8)(cp)[1] << 8) \
+ | ((SilcUInt32)(SilcUInt8)(cp)[2] << 16) \
+ | ((SilcUInt32)(SilcUInt8)(cp)[3] << 24); \
} while(0)
+#endif /* SILC_I486 && __GNUC__ */
/* Same as upper but XOR the result always. Special purpose macro. */
+#if defined(SILC_I486) && defined(__GNUC__)
+#define SILC_GET32_X_LSB(l, cp) (l) ^= (*(SilcUInt32 *)(cp))
+#else
#define SILC_GET32_X_LSB(l, cp) \
- (l) ^= ((SilcUInt32)(SilcUInt8)(cp)[0]) \
- | ((SilcUInt32)(SilcUInt8)(cp)[1] << 8) \
- | ((SilcUInt32)(SilcUInt8)(cp)[2] << 16) \
- | ((SilcUInt32)(SilcUInt8)(cp)[3] << 24)
+ (l) ^= ((SilcUInt32)(SilcUInt8)(cp)[0]) \
+ | ((SilcUInt32)(SilcUInt8)(cp)[1] << 8) \
+ | ((SilcUInt32)(SilcUInt8)(cp)[2] << 16) \
+ | ((SilcUInt32)(SilcUInt8)(cp)[3] << 24)
+#endif /* SILC_I486 && __GNUC__ */
/***/
/****d* silcutil/SILCTypes/SILC_PUT16_MSB
*/
#define SILC_PUT16_MSB(l, cp) \
do { \
- (cp)[0] = (SilcUInt8)((l) >> 8); \
- (cp)[1] = (SilcUInt8)(l); \
+ (cp)[0] = (SilcUInt8)((l) >> 8); \
+ (cp)[1] = (SilcUInt8)(l); \
} while(0)
/***/
*/
#define SILC_PUT32_MSB(l, cp) \
do { \
- (cp)[0] = (SilcUInt8)((l) >> 24); \
- (cp)[1] = (SilcUInt8)((l) >> 16); \
- (cp)[2] = (SilcUInt8)((l) >> 8); \
- (cp)[3] = (SilcUInt8)(l); \
+ (cp)[0] = (SilcUInt8)((l) >> 24); \
+ (cp)[1] = (SilcUInt8)((l) >> 16); \
+ (cp)[2] = (SilcUInt8)((l) >> 8); \
+ (cp)[3] = (SilcUInt8)(l); \
} while(0)
/***/
*
* SOURCE
*/
+#if defined(SILC_I486) && defined(__GNUC__)
+#define SILC_PUT16_LSB(l, cp) (*(SilcUInt16 *)(cp)) = (l)
+#else
#define SILC_PUT16_LSB(l, cp) \
do { \
- (cp)[0] = (SilcUInt8)(l); \
- (cp)[1] = (SilcUInt8)((l) >> 8); \
+ (cp)[0] = (SilcUInt8)(l); \
+ (cp)[1] = (SilcUInt8)((l) >> 8); \
} while(0)
+#endif /* SILC_I486 && __GNUC__ */
/***/
/****d* silcutil/SILCTypes/SILC_PUT32_LSB
*
* SOURCE
*/
+#if defined(SILC_I486) && defined(__GNUC__)
+#define SILC_PUT32_LSB(l, cp) (*(SilcUInt32 *)(cp)) = (l)
+#else
#define SILC_PUT32_LSB(l, cp) \
do { \
- (cp)[0] = (SilcUInt8)(l); \
- (cp)[1] = (SilcUInt8)((l) >> 8); \
- (cp)[2] = (SilcUInt8)((l) >> 16); \
- (cp)[3] = (SilcUInt8)((l) >> 24); \
+ (cp)[0] = (SilcUInt8)(l); \
+ (cp)[1] = (SilcUInt8)((l) >> 8); \
+ (cp)[2] = (SilcUInt8)((l) >> 16); \
+ (cp)[3] = (SilcUInt8)((l) >> 24); \
} while(0)
+#endif /* SILC_I486 && __GNUC__ */
/***/
/****d* silcutil/SILCTypes/SILC_SWAB_16
silc_log_set_debug_string("*fsm*,*async*");
}
+{
+#define SSILC_GET32_MSB(l, cp) \
+do { \
+ (l) = ((SilcUInt32)(SilcUInt8)(cp)[3]) << 24 \
+ | ((SilcUInt32)(SilcUInt8)(cp)[2] << 16) \
+ | ((SilcUInt32)(SilcUInt8)(cp)[1] << 8) \
+ | ((SilcUInt32)(SilcUInt8)(cp)[0]); \
+} while(0)
+ unsigned char tmp[8], tmp2[8];
+ SilcUInt32 t1, t2, t3, t4;
+
+ tmp[0] = 0x11;
+ tmp[1] = 0x22;
+ tmp[2] = 0x33;
+ tmp[3] = 0x44;
+ tmp[4] = 0x55;
+ tmp[5] = 0x66;
+ tmp[6] = 0x77;
+ tmp[7] = 0x88;
+
+ SILC_LOG_HEXDUMP(("DATA"), tmp, 4);
+
+ SILC_GET32_LSB(t1, tmp);
+ SILC_LOG_DEBUG(("GET_LSB: %x", t1));
+
+ SSILC_GET32_MSB(t1, tmp);
+ SILC_LOG_DEBUG(("GET_MSB: %x", t1));
+
+ SILC_PUT32_LSB(t1, tmp2);
+ SILC_LOG_HEXDUMP(("PUT_LSB"), tmp2, 4);
+
+ SILC_PUT32_MSB(t1, tmp2);
+ SILC_LOG_HEXDUMP(("PUT_MSB"), tmp2, 4);
+
+ exit(1);
+}
+
SILC_LOG_DEBUG(("Allocating scheduler"));
schedule = silc_schedule_init(0, NULL);
} *SilcUnixScheduler;
typedef struct {
- SilcUInt32 signal;
+ SilcUInt32 sig;
SilcTaskCallback callback;
void *context;
SilcBool call;
fds = silc_realloc(internal->fds, sizeof(*internal->fds) *
(fds_count + (fds_count / 2)));
- if (!fds)
+ if (silc_unlikely(!fds))
break;
internal->fds = fds;
internal->fds_count = fds_count = fds_count + (fds_count / 2);
internal->app_context = app_context;
for (i = 0; i < SIGNAL_COUNT; i++) {
- signal_call[i].signal = 0;
+ signal_call[i].sig = 0;
signal_call[i].call = FALSE;
signal_call[i].schedule = schedule;
}
int i;
for (i = 0; i < SIGNAL_COUNT; i++) {
- if (signal_call[i].signal == signal) {
+ if (signal_call[i].sig == signal) {
signal_call[i].call = TRUE;
signal_call[i].schedule->signal_tasks = TRUE;
SILC_LOG_DEBUG(("Scheduling signal %d to be called",
- signal_call[i].signal));
+ signal_call[i].sig));
break;
}
}
if (!internal)
return;
- SILC_LOG_DEBUG(("Registering signal %d", signal));
+ SILC_LOG_DEBUG(("Registering signal %d", sig));
silc_schedule_internal_signals_block(schedule, context);
for (i = 0; i < SIGNAL_COUNT; i++) {
- if (!signal_call[i].signal) {
- signal_call[i].signal = sig;
+ if (!signal_call[i].sig) {
+ signal_call[i].sig = sig;
signal_call[i].callback = callback;
signal_call[i].context = callback_context;
signal_call[i].call = FALSE;
if (!internal)
return;
- SILC_LOG_DEBUG(("Unregistering signal %d", signal));
+ SILC_LOG_DEBUG(("Unregistering signal %d", sig));
silc_schedule_internal_signals_block(schedule, context);
for (i = 0; i < SIGNAL_COUNT; i++) {
- if (signal_call[i].signal == sig) {
- signal_call[i].signal = 0;
+ if (signal_call[i].sig == sig) {
+ signal_call[i].sig = 0;
signal_call[i].callback = NULL;
signal_call[i].context = NULL;
signal_call[i].call = FALSE;
if (signal_call[i].call &&
signal_call[i].callback) {
SILC_LOG_DEBUG(("Calling signal %d callback",
- signal_call[i].signal));
+ signal_call[i].sig));
signal_call[i].callback(schedule, internal->app_context,
SILC_TASK_INTERRUPT,
- signal_call[i].signal,
+ signal_call[i].sig,
signal_call[i].context);
signal_call[i].call = FALSE;
}