Added SILC Thread Pool API.
authorPekka Riikonen <priikone@silcnet.org>
Mon, 2 Jul 2007 14:20:42 +0000 (14:20 +0000)
committerPekka Riikonen <priikone@silcnet.org>
Mon, 2 Jul 2007 14:20:42 +0000 (14:20 +0000)
CHANGES.RUNTIME [new file with mode: 0644]
TODO
lib/silcutil/Makefile.ad
lib/silcutil/silcthread.c [new file with mode: 0644]
lib/silcutil/silcthread.h
lib/silcutil/tests/Makefile.am
lib/silcutil/tests/test_silcthread.c [new file with mode: 0644]

diff --git a/CHANGES.RUNTIME b/CHANGES.RUNTIME
new file mode 100644 (file)
index 0000000..6f9443f
--- /dev/null
@@ -0,0 +1,8 @@
+Mon Jul  2 17:13:51 EEST 2007  Pekka Riikonen <priikone@silcnet.org>
+
+       * Added SILC Thread Pool API to lib/silcutil/silcthread.[ch],
+         and test program in lib/silcutil/tests/test_silcthread.c.
+
+-----------------------------------------------------------------------------
+
+For older changes please see the CHANGES file from the CVS tree.
diff --git a/TODO b/TODO
index 92ce8299bddb81c48a60e884da2ab1833e7e29d8..ec144295c15b7c689f1a7e6e09c5df532cf516f2 100644 (file)
--- a/TODO
+++ b/TODO
@@ -187,55 +187,7 @@ Runtime library, lib/silcutil/
 
  o SilcStack aware SilcDList.
 
 
  o SilcStack aware SilcDList.
 
- o Thread pool API.  Add this to lib/silcutil/silcthread.[ch].
-
-   typedef void (*SilcThreadPoolFunc)(SilcSchedule schedule,
-                                     void *context);
-
-   /* Allocate thread pool with at least `min_threads' and at most
-      `max_threads' many threads.  If `stack' is non-NULL all memory
-      is allocated from the `stack'.  If `start_min_threads' is TRUE
-      this will start `min_threads' many threads immediately. */
-   SilcThreadPool silc_thread_pool_alloc(SilcStack stack,
-                                        SilcUInt32 min_threads,
-                                        SilcUInt32 max_threads,
-                                        SilcBool start_min_threads);
-
-   /* Free thread pool.  If `wait_unfinished' is TRUE this will block
-      and waits that all remaining active threads finish before freeing
-      the pool. */
-   void silc_thread_pool_free(SilcThreadPool tp, SilcBool wait_unfinished);
-
-   /* Run `run' function with `run_context' in one of the threads in the
-      thread pool.  Returns FALSE if the thread pool is being freed.  If
-      there are no free threads left in the pool this will queue the
-      the `run' and will call it once a thread becomes free.
-
-      If `completion' is non-NULL it will be called to indicate completion
-      of the `run' function.  If `schedule' is non-NULL the `completion'
-      will be called through the scheduler in the main thread.  If it is
-      NULL the `completion' is called directly from the thread after the
-      `run' has returned. */
-   SilcBool silc_thread_pool_run(SilcThreadPool tp,
-                                SilcSchedule schedule,
-                                SilcThreadPoolFunc run,
-                                void *run_context,
-                                SilcThreadPoolFunc completion,
-                                void *completion_context);
-
-   /* Modify the amount of maximum threads of the pool. */
-   void silc_thread_pool_set_max_threads(SilcThreadPool tp,
-                                        SilcUInt32 max_threads);
-
-   /* Returns the amount of maximum size the pool can grow. */
-   SilcUInt32 silc_thread_pool_num_max_threads(SilcThreadPool tp);
-
-   /* Returns the amount of free threads in the pool currently. */
-   SilcUInt32 silc_thread_pool_num_free_threads(SilcThreadPool tp);
-
-   /* Stops all free and started threads.  The minumum amount of threads
-      specified to silc_thread_pool_alloc always remains. */
-   void silc_thread_pool_purge(SilcThreadPool tp);
+ o Thread pool API.  Add this to lib/silcutil/silcthread.[ch].         (***DONE)
 
  o Fast mutex implementation.  Fast rwlock implementation.  Mutex and
    rwlock implementation using atomic operations.
 
  o Fast mutex implementation.  Fast rwlock implementation.  Mutex and
    rwlock implementation using atomic operations.
@@ -642,17 +594,17 @@ lib/silcserver
 
  o Library must have support for SERVICE command.
 
 
  o Library must have support for SERVICE command.
 
- o The server must be able to run behind NAT device.  This means that 
+ o The server must be able to run behind NAT device.  This means that
    Server ID must be based on public IP instead of private IP.
 
    Server ID must be based on public IP instead of private IP.
 
- o The following data must be in per-connection context: client id cache, 
-   server id cache, channel id cache, all statistics must be 
+ o The following data must be in per-connection context: client id cache,
+   server id cache, channel id cache, all statistics must be
    per-connection.
 
  o The following data must be in per-thread context: command context
    freelist/pool, pending commands, random number generator.
 
    per-connection.
 
  o The following data must be in per-thread context: command context
    freelist/pool, pending commands, random number generator.
 
- o Do inccoming packet processing in an own FSM thread in the 
+ o Do inccoming packet processing in an own FSM thread in the
    server-threads FSM.  Same as in client library.
 
  o Reference count all Silc*Entry structures.
    server-threads FSM.  Same as in client library.
 
  o Reference count all Silc*Entry structures.
index b685a75fa96d3d9b62a762d995f0c3be8a1675f4..d14a624d5b4aa7eb0c39ba173e25a720b43022c0 100644 (file)
@@ -68,7 +68,8 @@ libsilcutil_la_SOURCES = \
        silctime.c      \
        silcmime.c      \
        silcstack.c     \
        silctime.c      \
        silcmime.c      \
        silcstack.c     \
-       silcsnprintf.c
+       silcsnprintf.c  \
+       silcthread.c
 
 #ifdef SILC_DIST_TOOLKIT
 include_HEADERS =      \
 
 #ifdef SILC_DIST_TOOLKIT
 include_HEADERS =      \
diff --git a/lib/silcutil/silcthread.c b/lib/silcutil/silcthread.c
new file mode 100644 (file)
index 0000000..0c6d642
--- /dev/null
@@ -0,0 +1,439 @@
+/*
+
+  silcthread.c
+
+  Author: Pekka Riikonen <priikone@silcnet.org>
+
+  Copyright (C) 2007 Pekka Riikonen
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; version 2 of the License.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+*/
+
+#include "silc.h"
+
+/************************** Types and definitions ***************************/
+
+/* Thread pool thread context */
+typedef struct SilcThreadPoolThreadStruct {
+  struct SilcThreadPoolThreadStruct *next;
+  struct SilcThreadPoolThreadStruct *next2;
+  SilcThreadPool tp;               /* The thread pool */
+  SilcSchedule schedule;           /* Scheduler, may be NULL */
+  SilcThreadPoolFunc run;          /* The function to run in a thread */
+  SilcThreadPoolFunc completion;    /* Completion function */
+  void *run_context;
+  void *completion_context;
+  unsigned int stop        : 1;            /* Set to stop the thread */
+} *SilcThreadPoolThread;
+
+/* Completion context */
+typedef struct SilcThreadPoolCompletionStruct {
+  SilcSchedule schedule;           /* Scheduler, may be NULL */
+  SilcThreadPoolFunc completion;    /* Completion function */
+  void *completion_context;
+} *SilcThreadPoolCompletion;
+
+/* Thread pool context */
+struct SilcThreadPoolStruct {
+  SilcStack stack;                 /* Stack for memory allocation */
+  SilcMutex lock;                  /* Pool lock */
+  SilcCond pool_signal;                    /* Condition variable for signalling */
+  SilcList threads;                /* Threads in the pool */
+  SilcList free_threads;           /* Threads freelist */
+  SilcList queue;                  /* Queue for waiting calls */
+  SilcUInt16 min_threads;          /* Minimum threads in the pool */
+  SilcUInt16 max_threads;          /* Maximum threads in the pool */
+  SilcUInt16 refcnt;               /* Reference counter */
+  unsigned int destroy       : 1;   /* Set when pool is to be destroyed */
+};
+
+/************************ Static utility functions **************************/
+
+/* Reference thread pool.  Must be called locked. */
+
+static void silc_thread_pool_ref(SilcThreadPool tp)
+{
+  tp->refcnt++;
+  SILC_LOG_DEBUG(("Thread pool %p, refcnt %d -> %d", tp, tp->refcnt - 1,
+                 tp->refcnt));
+}
+
+/* Unreference thread pool.  Must be called locked.  Releases the lock. */
+
+static void silc_thread_pool_unref(SilcThreadPool tp)
+{
+  tp->refcnt--;
+  SILC_LOG_DEBUG(("Thread pool %p refcnt %d -> %d", tp, tp->refcnt + 1,
+                 tp->refcnt));
+  if (!tp->refcnt) {
+    silc_mutex_unlock(tp->lock);
+    silc_mutex_free(tp->lock);
+    silc_cond_free(tp->pool_signal);
+    silc_free(tp);
+    return;
+  }
+  silc_mutex_unlock(tp->lock);
+}
+
+/* Thread completion callback */
+
+SILC_TASK_CALLBACK(silc_thread_pool_run_completion)
+{
+  SilcThreadPoolCompletion c = context;
+  c->completion(c->schedule, c->completion_context);
+  silc_free(c);
+}
+
+/* The thread executor.  Each thread in the pool is run here.  They wait
+   here for something to do which is given to them by silc_thread_pool_run. */
+
+static void *silc_thread_pool_run_thread(void *context)
+{
+  SilcThreadPoolThread t = context, q;
+  SilcThreadPool tp = t->tp;
+  SilcMutex lock = tp->lock;
+  SilcCond pool_signal = tp->pool_signal;
+
+  silc_mutex_lock(lock);
+
+  while (1) {
+    /* Wait here for code to execute */
+    while (!t->run && !t->stop)
+      silc_cond_wait(pool_signal, lock);
+
+    if (t->stop) {
+      /* Stop the thread.  Remove from threads list and free memory. */
+      SILC_LOG_DEBUG(("Stop thread %p", t));
+      silc_list_del(tp->threads, t);
+      silc_free(t);
+
+      /* If we are last thread, signal the waiting destructor. */
+      if (silc_list_count(tp->threads) == 0)
+       silc_cond_signal(pool_signal);
+
+      /* Release pool reference.  Releases lock also. */
+      silc_thread_pool_unref(tp);
+      break;
+    }
+    silc_mutex_unlock(lock);
+
+    /* Execute code */
+    SILC_LOG_DEBUG(("Execute call %p, context %p, thread %p", t->run,
+                   t->run_context, t));
+    t->run(t->schedule, t->run_context);
+
+    /* If scheduler is NULL, call completion directly from here.  Otherwise
+       it is called through the scheduler in the thread where the scheduler
+       is running. */
+    if (t->completion) {
+      if (t->schedule) {
+       SilcThreadPoolCompletion c = silc_calloc(1, sizeof(*c));
+       if (c) {
+         SILC_LOG_DEBUG(("Run completion through scheduler %p", t->schedule));
+         c->schedule = t->schedule;
+         c->completion = t->completion;
+         c->completion_context = t->completion_context;
+         silc_schedule_task_add_timeout(c->schedule,
+                                        silc_thread_pool_run_completion, c,
+                                        0, 0);
+         silc_schedule_wakeup(c->schedule);
+       } else {
+         t->completion(NULL, t->completion_context);
+       }
+      } else {
+       SILC_LOG_DEBUG(("Run completion directly"));
+       t->completion(NULL, t->completion_context);
+      }
+    }
+
+    silc_mutex_lock(lock);
+
+    /* Check if there are calls in queue */
+    if (silc_list_count(tp->queue) > 0) {
+      silc_list_start(tp->queue);
+      q = silc_list_get(tp->queue);
+
+      SILC_LOG_DEBUG(("Execute call from queue"));
+
+      /* Execute this call now */
+      t->run = q->run;
+      t->run_context = q->run_context;
+      t->completion = q->completion;
+      t->completion_context = q->completion_context;
+      t->schedule = q->schedule;
+
+      silc_list_del(tp->queue, q);
+      silc_free(q);
+      continue;
+    }
+
+    /* The thread is now free for use again. */
+    t->run = NULL;
+    t->completion = NULL;
+    t->schedule = NULL;
+    silc_list_add(tp->free_threads, t);
+  }
+
+  return NULL;
+}
+
+/* Creates new thread to thread pool */
+
+static SilcThreadPoolThread silc_thread_pool_new_thread(SilcThreadPool tp)
+{
+  SilcThreadPoolThread t;
+
+  t = silc_calloc(1, sizeof(*t));
+  if (!t)
+    return NULL;
+  t->tp = tp;
+  silc_list_add(tp->threads, t);
+  silc_list_add(tp->free_threads, t);
+  silc_thread_pool_ref(tp);
+
+  SILC_LOG_DEBUG(("Start thread %p", t));
+
+  /* Start the thread */
+  silc_thread_create(silc_thread_pool_run_thread, t, FALSE);
+
+  return t;
+}
+
+/**************************** Thread Pool API *******************************/
+
+/* Allocate thread pool */
+
+SilcThreadPool silc_thread_pool_alloc(SilcStack stack,
+                                     SilcUInt32 min_threads,
+                                     SilcUInt32 max_threads,
+                                     SilcBool start_min_threads)
+{
+  SilcThreadPool tp;
+  int i;
+
+  if (max_threads < min_threads)
+    return NULL;
+
+  tp = silc_calloc(1, sizeof(*tp));
+  if (!tp)
+    return NULL;
+
+  SILC_LOG_DEBUG(("Starting thread pool %p, min threads %d, max threads %d",
+                 tp, min_threads, max_threads));
+
+  tp->stack = stack;
+  tp->min_threads = min_threads;
+  tp->max_threads = max_threads;
+  tp->refcnt++;
+
+  if (!silc_mutex_alloc(&tp->lock)) {
+    silc_free(tp);
+    return NULL;
+  }
+
+  if (!silc_cond_alloc(&tp->pool_signal)) {
+    silc_mutex_free(tp->lock);
+    silc_free(tp);
+    return NULL;
+  }
+
+  silc_list_init(tp->threads, struct SilcThreadPoolThreadStruct, next);
+  silc_list_init(tp->free_threads, struct SilcThreadPoolThreadStruct, next2);
+  silc_list_init(tp->queue, struct SilcThreadPoolThreadStruct, next);
+
+  for (i = 0; i < tp->min_threads && start_min_threads; i++)
+    silc_thread_pool_new_thread(tp);
+
+  return tp;
+}
+
+/* Free thread pool */
+
+void silc_thread_pool_free(SilcThreadPool tp, SilcBool wait_unfinished)
+{
+  SilcThreadPoolThread t;
+
+  SILC_LOG_DEBUG(("Free thread pool %p", tp));
+
+  silc_mutex_lock(tp->lock);
+  tp->destroy = TRUE;
+
+  /* Stop threads */
+  silc_list_start(tp->threads);
+  while ((t = silc_list_get(tp->threads)))
+    t->stop = TRUE;
+  silc_cond_signal(tp->pool_signal);
+
+  if (wait_unfinished) {
+    SILC_LOG_DEBUG(("Wait threads to finish"));
+    while (silc_list_count(tp->threads))
+      silc_cond_wait(tp->pool_signal, tp->lock);
+  }
+
+  /* Free calls from queue */
+  silc_list_start(tp->queue);
+  while ((t = silc_list_get(tp->queue)))
+    silc_free(t);
+  silc_list_init(tp->queue, struct SilcThreadPoolThreadStruct, next);
+
+  /* Release reference.  Releases lock also. */
+  silc_thread_pool_unref(tp);
+}
+
+/* Execute code in a thread in the pool */
+
+SilcBool silc_thread_pool_run(SilcThreadPool tp,
+                             SilcBool queuable,
+                             SilcSchedule schedule,
+                             SilcThreadPoolFunc run,
+                             void *run_context,
+                             SilcThreadPoolFunc completion,
+                             void *completion_context)
+{
+  SilcThreadPoolThread t;
+
+  silc_mutex_lock(tp->lock);
+
+  if (tp->destroy) {
+    silc_mutex_unlock(tp->lock);
+    return FALSE;
+  }
+
+  /* Get free thread */
+  silc_list_start(tp->free_threads);
+  t = silc_list_get(tp->free_threads);
+  if (!t) {
+    if (silc_list_count(tp->threads) + 1 > tp->max_threads) {
+      /* Maximum threads reached */
+      if (!queuable) {
+       silc_mutex_unlock(tp->lock);
+       return FALSE;
+      }
+
+      SILC_LOG_DEBUG(("Queue call %p, context %p", run, run_context));
+
+      /* User wants to queue this call until thread becomes free */
+      t = silc_calloc(1, sizeof(*t));
+      if (!t) {
+       silc_mutex_unlock(tp->lock);
+       return FALSE;
+      }
+
+      t->run = run;
+      t->run_context = run_context;
+      t->completion = completion;
+      t->completion_context = completion_context;
+      t->schedule = schedule;
+
+      silc_list_add(tp->queue, t);
+      silc_mutex_unlock(tp->lock);
+      return TRUE;
+    } else {
+      /* Create new thread */
+      t = silc_thread_pool_new_thread(tp);
+      if (!t) {
+       silc_mutex_unlock(tp->lock);
+       return FALSE;
+      }
+    }
+  }
+
+  SILC_LOG_DEBUG(("Run call %p, context %p, thread %p", run, run_context, t));
+
+  /* Mark this call to be executed in this thread */
+  t->run = run;
+  t->run_context = run_context;
+  t->completion = completion;
+  t->completion_context = completion_context;
+  t->schedule = schedule;
+  silc_list_del(tp->free_threads, t);
+
+  /* Signal threads */
+  silc_cond_signal(tp->pool_signal);
+
+  silc_mutex_unlock(tp->lock);
+  return TRUE;
+}
+
+/* Set maximum threads in the pool */
+
+void silc_thread_pool_set_max_threads(SilcThreadPool tp,
+                                     SilcUInt32 max_threads)
+{
+  SILC_LOG_DEBUG(("Set thread pool %p max threads to %d", tp, max_threads));
+
+  silc_mutex_lock(tp->lock);
+  tp->max_threads = max_threads;
+  silc_mutex_unlock(tp->lock);
+}
+
+/* Get maximum threads in the pool */
+
+SilcUInt32 silc_thread_pool_num_max_threads(SilcThreadPool tp)
+{
+  SilcUInt32 max_threads;
+
+  silc_mutex_lock(tp->lock);
+  max_threads = tp->max_threads;
+  silc_mutex_unlock(tp->lock);
+
+  return max_threads;
+}
+
+/* Get numnber of free threads in the pool */
+
+SilcUInt32 silc_thread_pool_num_free_threads(SilcThreadPool tp)
+{
+  SilcUInt32 free_threads;
+
+  silc_mutex_lock(tp->lock);
+  free_threads = silc_list_count(tp->free_threads);
+  silc_mutex_unlock(tp->lock);
+
+  return free_threads;
+}
+
+/* Purge pool */
+
+void silc_thread_pool_purge(SilcThreadPool tp)
+{
+  SilcThreadPoolThread t;
+  int i;
+
+  silc_mutex_lock(tp->lock);
+
+  if (silc_list_count(tp->free_threads) <= tp->min_threads) {
+    silc_mutex_unlock(tp->lock);
+    return;
+  }
+
+  i = silc_list_count(tp->free_threads) - tp->min_threads;
+
+  SILC_LOG_DEBUG(("Purge %d threads", i));
+
+  silc_list_start(tp->threads);
+  while ((t = silc_list_get(tp->threads))) {
+    if (t->run)
+      continue;
+
+    t->stop = TRUE;
+    silc_list_del(tp->free_threads, t);
+
+    i--;
+    if (!i)
+      break;
+  }
+
+  /* Signal threads to stop */
+  silc_cond_signal(tp->pool_signal);
+
+  silc_mutex_unlock(tp->lock);
+}
index 3a4036f6459ee1c5a3d8a0742fe6fd3f8bf80ab1..2a7888049341147d3bfdb85a7849cf35efcf8665 100644 (file)
@@ -4,7 +4,7 @@
 
   Author: Pekka Riikonen <priikone@silcnet.org>
 
 
   Author: Pekka Riikonen <priikone@silcnet.org>
 
-  Copyright (C) 2001 - 2005 Pekka Riikonen
+  Copyright (C) 2001 - 2007 Pekka Riikonen
 
   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
 
   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
  *
  * DESCRIPTION
  *
  *
  * DESCRIPTION
  *
- * Interface for SILC Thread implementation. This is platform independent
- * interface of threads for applications that need concurrent execution
- * with the application's main thread. The threads created with this
- * interface executes concurrently with the calling thread.
+ * Interface for platform independent thread implementation and thread pool
+ * system.  The interface provides routines for applications that need
+ * concurrent execution with the application's main thread.  The threads
+ * created with this interface executes concurrently with the calling thread.
+ *
+ * The thread pool system can be used to start many threads and execute code
+ * in the threads.  The thread pool manages the threads creation and
+ * destruction.
  *
  ***/
 
 #ifndef SILCTHREAD_H
 #define SILCTHREAD_H
 
  *
  ***/
 
 #ifndef SILCTHREAD_H
 #define SILCTHREAD_H
 
+#include "silcschedule.h"
+
 /* Prototypes */
 
 /****s* silcutil/SilcThreadAPI/SilcThread
 /* Prototypes */
 
 /****s* silcutil/SilcThreadAPI/SilcThread
@@ -162,4 +168,184 @@ SilcBool silc_thread_wait(SilcThread thread, void **exit_value);
  ***/
 void silc_thread_yield(void);
 
  ***/
 void silc_thread_yield(void);
 
+/****s* silcutil/SilcThreadAPI/SilcThreadPool
+ *
+ * NAME
+ *
+ *    typedef struct SilcThreadPoolStruct *SilcThreadPool;
+ *
+ * DESCRIPTION
+ *
+ *    This context is the actual SILC Thread Pool and is returned by
+ *    the silc_thread_pool_alloc function, and given as arguments to
+ *    some of the silc_thread_pool_* functions. This context and its
+ *    resources are freed by calling silc_thread_pool_free;
+ *
+ ***/
+typedef struct SilcThreadPoolStruct *SilcThreadPool;
+
+/****f* silcutil/SilcThreadAPI/SilcThreadPoolFunc
+ *
+ * SYNOPSIS
+ *
+ *    typedef void (*SilcThreadPoolFunc)(SilcSchedule schedule,
+ *                                       void *context);
+ *
+ * DESCRIPTION
+ *
+ *    A callback function of this type is given as argument to the
+ *    silc_thread_pool_run.  The `schedule' is the scheduler and the
+ *    `context' is the `run_context' or `completion_context' given as
+ *    argument to silc_thread_pool_run.
+ *
+ ***/
+typedef void (*SilcThreadPoolFunc)(SilcSchedule schedule, void *context);
+
+/****f* silcutil/SilcThreadAPI/silc_thread_pool_alloc
+ *
+ * SYNOPSIS
+ *
+ *    SilcThreadPool silc_thread_pool_alloc(SilcStack stack,
+ *                                          SilcUInt32 min_threads,
+ *                                          SilcUInt32 max_threads,
+ *                                          SilcBool start_min_threads);
+ *
+ * DESCRIPTION
+ *
+ *    Allocate thread pool with at least `min_threads' and at most
+ *    `max_threads' many threads.  If `stack' is non-NULL all memory is
+ *    allocated from the `stack'.  If `start_min_threads' is TRUE this will
+ *    start `min_threads' many threads immediately.  Returns the thread
+ *    pool context or NULL on error.
+ *
+ * EXAMPLE
+ *
+ *    // Start thread pool, by default it has 0 threads.
+ *    pool = silc_thread_pool_alloc(NULL, 0, 5, FALSE);
+ *
+ *    // Function to execute in a thread
+ *    void my_func(SilcSchedule schedule, void *context)
+ *    {
+ *      MyContext mycontext = context;
+ *      ...
+ *    }
+ *
+ *    // Execute code in a thread in the pool
+ *    silc_thread_pool_run(pool, TRUE, NULL, my_func, my_context, NULL, NULL);
+ *
+ ***/
+SilcThreadPool silc_thread_pool_alloc(SilcStack stack,
+                                     SilcUInt32 min_threads,
+                                     SilcUInt32 max_threads,
+                                     SilcBool start_min_threads);
+
+/****f* silcutil/SilcThreadAPI/silc_thread_pool_free
+ *
+ * SYNOPSIS
+ *
+ *    void silc_thread_pool_free(SilcThreadPool tp, SilcBool wait_unfinished);
+ *
+ * DESCRIPTION
+ *
+ *     Free the thread pool.  If `wait_unfinished' is TRUE this will block
+ *     and waits that all remaining active threads finish before freeing
+ *     the pool.
+ *
+ ***/
+void silc_thread_pool_free(SilcThreadPool tp, SilcBool wait_unfinished);
+
+/****f* silcutil/SilcThreadAPI/silc_thread_pool_run
+ *
+ * SYNOPSIS
+ *
+ *    SilcBool silc_thread_pool_run(SilcThreadPool tp,
+ *                                  SilcBool queueable,
+ *                                  SilcSchedule schedule,
+ *                                  SilcThreadPoolFunc run,
+ *                                  void *run_context,
+ *                                  SilcThreadPoolFunc completion,
+ *                                  void *completion_context);
+ *
+ * DESCRIPTION
+ *
+ *    Run the `run' function with `run_context' in one of the threads in the
+ *    thread pool.  Returns FALSE if the thread pool is being freed.  If
+ *    there are no free threads left in the pool this will queue the `run'
+ *    and call it once a thread becomes free, if `queueable' is TRUE.  If
+ *    `queueable' is FALSE and there are no free threads, this returns FALSE
+ *    and `run' is not executed.
+ *
+ *    If `completion' is non-NULL it will be called to indicate completion
+ *    of the `run' function.  If `schedule' is non-NULL the `completion'
+ *    will be called through the scheduler in the main thread.  If it is
+ *    NULL the `completion' is called directly from the thread after the
+ *    `run' has returned.
+ *
+ ***/
+SilcBool silc_thread_pool_run(SilcThreadPool tp,
+                             SilcBool queue,
+                             SilcSchedule schedule,
+                             SilcThreadPoolFunc run,
+                             void *run_context,
+                             SilcThreadPoolFunc completion,
+                             void *completion_context);
+
+/****f* silcutil/SilcThreadAPI/silc_thread_pool_set_max_threads
+ *
+ * SYNOPSIS
+ *
+ *    void silc_thread_pool_set_max_threads(SilcThreadPool tp,
+ *                                          SilcUInt32 max_threads);
+ *
+ * DESCRIPTION
+ *
+ *    Modify the amount of maximum threads of the pool.  This call does not
+ *    affect any currently active or running thread.
+ *
+ ***/
+void silc_thread_pool_set_max_threads(SilcThreadPool tp,
+                                     SilcUInt32 max_threads);
+
+/****f* silcutil/SilcThreadAPI/silc_thread_pool_num_max_threads
+ *
+ * SYNOPSIS
+ *
+ *    SilcUInt32 silc_thread_pool_num_max_threads(SilcThreadPool tp);
+ *
+ * DESCRIPTION
+ *
+ *    Returns the number of maximum threads to which the pool can grow.
+ *
+ ***/
+SilcUInt32 silc_thread_pool_num_max_threads(SilcThreadPool tp);
+
+/****f* silcutil/SilcThreadAPI/silc_thread_pool_num_free_threads
+ *
+ * SYNOPSIS
+ *
+ *    SilcUInt32 silc_thread_pool_num_free_threads(SilcThreadPool tp);
+ *
+ * DESCRIPTION
+ *
+ *    Returns the number of free threads in the pool currently.  Free threads
+ *    are threads that are not currently executing any code.
+ *
+ ***/
+SilcUInt32 silc_thread_pool_num_free_threads(SilcThreadPool tp);
+
+/****f* silcutil/SilcThreadAPI/silc_thread_pool_purge
+ *
+ * SYNOPSIS
+ *
+ *    void silc_thread_pool_purge(SilcThreadPool tp);
+ *
+ * DESCRIPTION
+ *
+ *    Stops all free and started threads.  The minumum amount of threads
+ *    specified to silc_thread_pool_alloc always remains.  Any thread that
+ *    is currently executing code is not affected by this call.
+ *
+ ***/
+void silc_thread_pool_purge(SilcThreadPool tp);
+
 #endif
 #endif
index f08893d7250f48d83332f6ade8b81ce11f9fe098..4b5513a9342caba6390402533d2dc94941b23bba 100644 (file)
@@ -3,7 +3,7 @@
 #
 #  Author: Pekka Riikonen <priikone@silcnet.org>
 #
 #
 #  Author: Pekka Riikonen <priikone@silcnet.org>
 #
-#  Copyright (C) 2004 - 2005 Pekka Riikonen
+#  Copyright (C) 2004 - 2007 Pekka Riikonen
 #
 #  This program is free software; you can redistribute it and/or modify
 #  it under the terms of the GNU General Public License as published by
 #
 #  This program is free software; you can redistribute it and/or modify
 #  it under the terms of the GNU General Public License as published by
@@ -20,7 +20,7 @@ AUTOMAKE_OPTIONS = 1.0 no-dependencies foreign
 bin_PROGRAMS =         test_silcstrutil test_silcstringprep test_silchashtable \
        test_silclist test_silcfsm test_silcasync test_silcschedule \
        test_silcnet test_silcstack test_silcmime test_silcfdstream \
 bin_PROGRAMS =         test_silcstrutil test_silcstringprep test_silchashtable \
        test_silclist test_silcfsm test_silcasync test_silcschedule \
        test_silcnet test_silcstack test_silcmime test_silcfdstream \
-       test_silcatomic test_silcmutex test_silctime
+       test_silcatomic test_silcmutex test_silctime test_silcthread
 
 test_silcstrutil_SOURCES = test_silcstrutil.c
 test_silcstringprep_SOURCES = test_silcstringprep.c
 
 test_silcstrutil_SOURCES = test_silcstrutil.c
 test_silcstringprep_SOURCES = test_silcstringprep.c
@@ -36,6 +36,7 @@ test_silcfdstream_SOURCES = test_silcfdstream.c
 test_silcatomic_SOURCES = test_silcatomic.c
 test_silcmutex_SOURCES = test_silcmutex.c
 test_silctime_SOURCES = test_silctime.c
 test_silcatomic_SOURCES = test_silcatomic.c
 test_silcmutex_SOURCES = test_silcmutex.c
 test_silctime_SOURCES = test_silctime.c
+test_silcthread_SOURCES = test_silcthread.c
 
 LIBS = $(SILC_COMMON_LIBS)
 LDADD = -L.. -L../.. -lsilc
 
 LIBS = $(SILC_COMMON_LIBS)
 LDADD = -L.. -L../.. -lsilc
diff --git a/lib/silcutil/tests/test_silcthread.c b/lib/silcutil/tests/test_silcthread.c
new file mode 100644 (file)
index 0000000..920cb6f
--- /dev/null
@@ -0,0 +1,103 @@
+/* SilcThreadPool tests */
+
+#include "silc.h"
+
+SilcSchedule schedule;
+
+static void func(SilcSchedule schedule, void *context)
+{
+  SILC_LOG_DEBUG(("func: %d", (int)context));
+  sleep(1);
+}
+
+static void compl(SilcSchedule schedule, void *context)
+{
+  SILC_LOG_DEBUG(("completion: %d", (int)context));
+  if ((int)context == 0xff)
+    silc_schedule_stop(schedule);
+}
+
+int main(int argc, char **argv)
+{
+  SilcBool success = FALSE;
+  SilcThreadPool tp;
+  int i;
+
+  if (argc > 1 && !strcmp(argv[1], "-d")) {
+    silc_log_debug(TRUE);
+    silc_log_quick(TRUE);
+    silc_log_debug_hexdump(TRUE);
+    silc_log_set_debug_string("*thread*");
+  }
+
+  schedule = silc_schedule_init(0, NULL);
+  if (!schedule)
+    goto err;
+
+  SILC_LOG_DEBUG(("Allocate thread pool"));
+  tp = silc_thread_pool_alloc(NULL, 2, 4, TRUE);
+  if (!tp)
+    goto err;
+  SILC_LOG_DEBUG(("Stop thread pool"));
+  silc_thread_pool_free(tp, TRUE);
+
+
+  SILC_LOG_DEBUG(("Allocate thread pool"));
+  tp = silc_thread_pool_alloc(NULL, 0, 2, TRUE);
+  if (!tp)
+    goto err;
+  for (i = 0; i < 4; i++) {
+    SILC_LOG_DEBUG(("Run thread %d", i + 1));
+    if (!silc_thread_pool_run(tp, TRUE, NULL, func, (void *) i + 1,
+                             compl, (void *)i + 1))
+      goto err;
+  }
+  sleep(3);
+  SILC_LOG_DEBUG(("Stop thread pool"));
+  silc_thread_pool_free(tp, TRUE);
+
+  SILC_LOG_DEBUG(("Allocate thread pool"));
+  tp = silc_thread_pool_alloc(NULL, 0, 2, TRUE);
+  if (!tp)
+    goto err;
+  for (i = 0; i < 2; i++) {
+    SILC_LOG_DEBUG(("Run thread %d", i + 1));
+    if (!silc_thread_pool_run(tp, FALSE, NULL, func, (void *) i + 1,
+                             compl, (void *)i + 1))
+      goto err;
+  }
+  if (silc_thread_pool_run(tp, FALSE, NULL, func, (void *)3,
+                          compl, (void *)3))
+    goto err;
+  sleep(3);
+  SILC_LOG_DEBUG(("Stop thread pool"));
+  silc_thread_pool_free(tp, TRUE);
+
+  SILC_LOG_DEBUG(("Allocate thread pool"));
+  tp = silc_thread_pool_alloc(NULL, 8, 16, FALSE);
+  if (!tp)
+    goto err;
+  for (i = 0; i < 8; i++) {
+    SILC_LOG_DEBUG(("Run thread %d", i + 1));
+    if (!silc_thread_pool_run(tp, FALSE, schedule, func, (void *) i + 1,
+                             compl, (void *)i + 1))
+      goto err;
+  }
+  if (!silc_thread_pool_run(tp, FALSE, schedule, func, (void *)0xff,
+                           compl, (void *)0xff))
+    goto err;
+
+  silc_schedule(schedule);
+
+  SILC_LOG_DEBUG(("Stop thread pool"));
+  silc_thread_pool_free(tp, TRUE);
+
+  silc_schedule_uninit(schedule);
+  success = TRUE;
+
+ err:
+  SILC_LOG_DEBUG(("Testing was %s", success ? "SUCCESS" : "FAILURE"));
+  fprintf(stderr, "Testing was %s\n", success ? "SUCCESS" : "FAILURE");
+
+  return success;
+}