Added sort-of Quality of Service (QoS) support to the
authorPekka Riikonen <priikone@silcnet.org>
Sun, 20 Oct 2002 20:09:58 +0000 (20:09 +0000)
committerPekka Riikonen <priikone@silcnet.org>
Sun, 20 Oct 2002 20:09:58 +0000 (20:09 +0000)
SilcSocketConnection and added support for configuring it to
SILC Server.

13 files changed:
CHANGES
TODO
TODO-1.0
apps/silcd/server.c
apps/silcd/serverconfig.c
apps/silcd/serverconfig.h
doc/example_silcd.conf.in
lib/silcutil/silcschedule.c
lib/silcutil/silcsockconn.c
lib/silcutil/silcsockconn.h
lib/silcutil/silcutil.c
lib/silcutil/silcutil.h
lib/silcutil/unix/silcunixsockconn.c

diff --git a/CHANGES b/CHANGES
index bb2f48f882925b908ceab3c1ed5f17f145c8dfce..d59c0f0488eae0bea44855f2597e6b0ab2fa00a2 100644 (file)
--- a/CHANGES
+++ b/CHANGES
@@ -6,6 +6,19 @@ Sun Oct 20 19:54:55 EEST 2002  Pekka Riikonen <priikone@silcnet.org>
          should be FALSE.  Affected file lib/silcutil/silcschedule.[ch],
          lib/silcclient/client_internal.h and silcd/server_internal.h.
 
+       * Added silc_compare_timeval to determine which time values
+         is smaller.  Affected file lib/silcutil/silcutil.[ch].
+
+       * Added sort-of "Quality of Service" (QoS) support to
+         SilcSocketConnection.  Data reception can be controlled with
+         rate limit per second and/or read data length limit.
+         Added silc_socket_set_qos.  Affected files are
+         lib/silcutil/silcsockconn.[ch] and unix/silcunixsockconn.c.
+
+       * Added the QoS configuration to SILC Server configuration
+         file.  Affected files are silcd/serverconfig.[ch] and
+         server.c.
+
 Sun Oct 20 14:12:24 CEST 2002  Pekka Riikonen <priikone@silcnet.org>
 
        * Merged with irssi.org CVS.
diff --git a/TODO b/TODO
index b9dac74501c8e31ca776184cd2943d0cd181cb85..eb380fad03f905c5998678592a8a1dbaf249025a 100644 (file)
--- a/TODO
+++ b/TODO
@@ -26,15 +26,6 @@ TODO/bugs In SILC Server
        o Add special handling in router and server for "connection
          timed out" error.  Be optimistic.
 
-       o Testing
-
- o Add a timeout to handling incoming JOIN commands.  It should be 
-   enforced that JOIN command is executed only once in a second or two
-   seconds.  Now it is possible to accept n incoming JOIN commands
-   and process them without any timeouts.  THis must be employed because
-   each JOIN command will create and distribute the new channel key
-   to everybody on the channel (Fix this to 0.9.x).
-
  o Manual file for silcd(8) and silcd.conf.
 
  o Testing
index 7e37bd927ba491c73b342d530e4d70234e58dff3..1f821fdec6d46fc5714ef2984521c080f15762c1 100644 (file)
--- a/TODO-1.0
+++ b/TODO-1.0
@@ -144,6 +144,13 @@ least could be done.
    SILC currently supports SOCKS4 and SOCKS5 but it needs to be compiled
    in separately.
 
+ o Add a timeout to handling incoming JOIN commands.  It should be 
+   enforced that JOIN command is executed only once in a second or two
+   seconds.  Now it is possible to accept n incoming JOIN commands
+   and process them without any timeouts.  THis must be employed because
+   each JOIN command will create and distribute the new channel key
+   to everybody on the channel (Fix this to 0.9.x).
+
  o EPOC specific additions/changes required:
 
        o lib/silcutil/epoc routines missing or not completed.
index 7ee66b1fe8388b342a2b893db9ed7924b21bacfb..d79911578966156f0ebd8ce825dd9314a5a5e1d3 100644 (file)
@@ -1376,8 +1376,7 @@ SILC_TASK_CALLBACK(silc_server_connect_to_router_final)
   if (conn && conn->param)
     param = conn->param;
 
-  /* Perform keepalive. The `hb_context' will be freed automatically
-     when finally calling the silc_socket_free function. */
+  /* Perform keepalive. */
   silc_socket_set_heartbeat(sock, param->keepalive_secs, server,
                            silc_server_perform_heartbeat,
                            server->schedule);
@@ -1775,7 +1774,7 @@ SILC_TASK_CALLBACK(silc_server_accept_new_connection_final)
   SilcSocketConnection sock = ctx->sock;
   SilcUnknownEntry entry = (SilcUnknownEntry)sock->user_data;
   void *id_entry;
-  SilcUInt32 hearbeat_timeout = server->config->param.keepalive_secs;
+  SilcServerConfigConnParams *param = &server->config->param;
 
   if (protocol->state == SILC_PROTOCOL_STATE_ERROR ||
       protocol->state == SILC_PROTOCOL_STATE_FAILURE) {
@@ -1872,11 +1871,21 @@ SILC_TASK_CALLBACK(silc_server_accept_new_connection_final)
 
       /* Get connection parameters */
       if (conn->param) {
-       if (conn->param->keepalive_secs)
-         hearbeat_timeout = conn->param->keepalive_secs;
+       param = conn->param;
+
+       if (!param->keepalive_secs)
+         param->keepalive_secs = server->config->param.keepalive_secs;
+
+       if (!param->qos && server->config->param.qos) {
+         param->qos = server->config->param.qos;
+         param->qos_rate_limit = server->config->param.qos_rate_limit;
+         param->qos_bytes_limit = server->config->param.qos_bytes_limit;
+         param->qos_limit_sec = server->config->param.qos_limit_sec;
+         param->qos_limit_usec = server->config->param.qos_limit_usec;
+       }
 
        /* Check if to be anonymous connection */
-       if (conn->param->anonymous)
+       if (param->anonymous)
          client->mode |= SILC_UMODE_ANONYMOUS;
       }
 
@@ -1925,8 +1934,18 @@ SILC_TASK_CALLBACK(silc_server_accept_new_connection_final)
 
        if (rconn) {
          if (rconn->param) {
-           if (rconn->param->keepalive_secs)
-             hearbeat_timeout = rconn->param->keepalive_secs;
+           param = rconn->param;
+
+           if (!param->keepalive_secs)
+             param->keepalive_secs = server->config->param.keepalive_secs;
+
+           if (!param->qos && server->config->param.qos) {
+             param->qos = server->config->param.qos;
+             param->qos_rate_limit = server->config->param.qos_rate_limit;
+             param->qos_bytes_limit = server->config->param.qos_bytes_limit;
+             param->qos_limit_sec = server->config->param.qos_limit_sec;
+             param->qos_limit_usec = server->config->param.qos_limit_usec;
+           }
          }
 
          initiator = rconn->initiator;
@@ -1949,8 +1968,18 @@ SILC_TASK_CALLBACK(silc_server_accept_new_connection_final)
        }
        if (sconn) {
          if (sconn->param) {
-           if (sconn->param->keepalive_secs)
-             hearbeat_timeout = sconn->param->keepalive_secs;
+           param = rconn->param;
+
+           if (!param->keepalive_secs)
+             param->keepalive_secs = server->config->param.keepalive_secs;
+
+           if (!param->qos && server->config->param.qos) {
+             param->qos = server->config->param.qos;
+             param->qos_rate_limit = server->config->param.qos_rate_limit;
+             param->qos_bytes_limit = server->config->param.qos_bytes_limit;
+             param->qos_limit_sec = server->config->param.qos_limit_sec;
+             param->qos_limit_usec = server->config->param.qos_limit_usec;
+           }
          }
 
          backup_router = sconn->backup_router;
@@ -2083,11 +2112,17 @@ SILC_TASK_CALLBACK(silc_server_accept_new_connection_final)
   /* Connection has been fully established now. Everything is ok. */
   SILC_LOG_DEBUG(("New connection authenticated"));
 
-  /* Perform keepalive. The `hb_context' will be freed automatically
-     when finally calling the silc_socket_free function. */
-  silc_socket_set_heartbeat(sock, hearbeat_timeout, server,
-                           silc_server_perform_heartbeat,
-                           server->schedule);
+  /* Perform keepalive. */
+  if (param->keepalive_secs)
+    silc_socket_set_heartbeat(sock, param->keepalive_secs, server,
+                             silc_server_perform_heartbeat,
+                             server->schedule);
+
+  /* Perform Quality of Service */
+  if (param->qos)
+    silc_socket_set_qos(sock, param->qos_rate_limit, param->qos_bytes_limit,
+                       param->qos_limit_sec, param->qos_limit_usec,
+                       server->schedule);
 
  out:
   silc_schedule_task_del_by_callback(server->schedule,
index 94f1e7dd0c626761e433515351502784dc48137c..deecf1aac5af95c5a3da35252eb732d4f615dab1 100644 (file)
@@ -231,6 +231,21 @@ SILC_CONFIG_CALLBACK(fetch_generic)
   else if (!strcmp(name, "detach_timeout")) {
     config->detach_timeout = (SilcUInt32) *(int *)val;
   }
+  else if (!strcmp(name, "qos")) {
+    config->param.qos = *(bool *)val;
+  }
+  else if (!strcmp(name, "qos_rate_limit")) {
+    config->param.qos_rate_limit = *(SilcUInt32 *)val;
+  }
+  else if (!strcmp(name, "qos_bytes_limit")) {
+    config->param.qos_bytes_limit = *(SilcUInt32 *)val;
+  }
+  else if (!strcmp(name, "qos_limit_sec")) {
+    config->param.qos_limit_sec = *(SilcUInt32 *)val;
+  }
+  else if (!strcmp(name, "qos_limit_usec")) {
+    config->param.qos_limit_usec = *(SilcUInt32 *)val;
+  }
   else
     return SILC_CONFIG_EINTERNAL;
 
@@ -668,6 +683,21 @@ SILC_CONFIG_CALLBACK(fetch_connparam)
   else if (!strcmp(name, "anonymous")) {
     tmp->anonymous = *(bool *)val;
   }
+  else if (!strcmp(name, "qos")) {
+    tmp->qos = *(bool *)val;
+  }
+  else if (!strcmp(name, "qos_rate_limit")) {
+    tmp->qos_rate_limit = *(SilcUInt32 *)val;
+  }
+  else if (!strcmp(name, "qos_bytes_limit")) {
+    tmp->qos_bytes_limit = *(SilcUInt32 *)val;
+  }
+  else if (!strcmp(name, "qos_limit_sec")) {
+    tmp->qos_limit_sec = *(SilcUInt32 *)val;
+  }
+  else if (!strcmp(name, "qos_limit_usec")) {
+    tmp->qos_limit_usec = *(SilcUInt32 *)val;
+  }
   else
     return SILC_CONFIG_EINTERNAL;
 
@@ -1019,6 +1049,11 @@ static const SilcConfigTable table_general[] = {
   { "version_software_vendor", SILC_CONFIG_ARG_STR,    fetch_generic,  NULL },
   { "detach_disabled",         SILC_CONFIG_ARG_TOGGLE, fetch_generic,  NULL },
   { "detach_timeout",          SILC_CONFIG_ARG_INT,    fetch_generic,  NULL },
+  { "qos",                     SILC_CONFIG_ARG_TOGGLE, fetch_generic,  NULL },
+  { "qos_rate_limit",          SILC_CONFIG_ARG_INT,    fetch_generic,  NULL },
+  { "qos_bytes_limit",         SILC_CONFIG_ARG_INT,    fetch_generic,  NULL },
+  { "qos_limit_sec",           SILC_CONFIG_ARG_INT,    fetch_generic,  NULL },
+  { "qos_limit_usec",          SILC_CONFIG_ARG_INT,    fetch_generic,  NULL },
   { 0, 0, 0, 0 }
 };
 
@@ -1107,6 +1142,11 @@ static const SilcConfigTable table_connparam[] = {
   { "version_software",               SILC_CONFIG_ARG_STR,    fetch_connparam, NULL },
   { "version_software_vendor", SILC_CONFIG_ARG_STR,    fetch_connparam,        NULL },
   { "anonymous",               SILC_CONFIG_ARG_TOGGLE, fetch_connparam,        NULL },
+  { "qos",                    SILC_CONFIG_ARG_TOGGLE,  fetch_generic,  NULL },
+  { "qos_rate_limit",          SILC_CONFIG_ARG_INT,    fetch_generic,  NULL },
+  { "qos_bytes_limit",         SILC_CONFIG_ARG_INT,    fetch_generic,  NULL },
+  { "qos_limit_sec",           SILC_CONFIG_ARG_INT,    fetch_generic,  NULL },
+  { "qos_limit_usec",          SILC_CONFIG_ARG_INT,    fetch_generic,  NULL },
   { 0, 0, 0, 0 }
 };
 
index ef9ce8b75e97378e1d16ac345a141cb7f3c755fa..164647d8262664e3fc4830a6c14f5936c102a327 100644 (file)
@@ -78,6 +78,7 @@ typedef struct SilcServerConfigLoggingStruct {
 
 /* Connection parameters */
 typedef struct SilcServerConfigConnParams {
+  struct SilcServerConfigConnParams *next;
   char *name;
   char *version_protocol;
   char *version_software;
@@ -89,10 +90,14 @@ typedef struct SilcServerConfigConnParams {
   SilcUInt32 reconnect_interval;
   SilcUInt32 reconnect_interval_max;
   SilcUInt32 key_exchange_rekey;
-  bool key_exchange_pfs;
-  bool reconnect_keep_trying;
-  bool anonymous;
-  struct SilcServerConfigConnParams *next;
+  SilcUInt32 qos_rate_limit;
+  SilcUInt32 qos_bytes_limit;
+  SilcUInt32 qos_limit_sec;
+  SilcUInt32 qos_limit_usec;
+  unsigned int key_exchange_pfs      : 1;
+  unsigned int reconnect_keep_trying : 1;
+  unsigned int anonymous             : 1;
+  unsigned int qos                   : 1;
 } SilcServerConfigConnParams;
 
 /* Holds all client authentication data from config file */
index aa7df35a65bb25257793034cd8e6652aae0296bd..3ddf7614876e5e4d43960cdf8de6633dd7f0d78c 100644 (file)
@@ -138,6 +138,23 @@ General {
        # sessions are persistent as long as server is running.
        #detach_disabled = true;
        #detach_timeout = 1440;
+
+       # Quality of Service (QoS) settings.  The QoS can be used to handle
+       # the incoming data and limit its handling rate to avoid flooding.
+       # By default QoS is disabled and can be enabled by setting "qos" to
+       # true value.  The "qos_rate_limit" is the incmoing data reading
+       # per second, and if more frequently than the set limit is read the
+       # QoS is applied to the data.  The "qos_bytes_limit" is maximum bytes
+       # allowed for incoming data.  If more is received at once the QoS
+       # is applied to the data.  The "qos_limit_sec" and "qos_limit_usec"
+       # is the timeout used to delay the data handling, seconds and
+       # microseconds, respectively.  This can be overridden with
+       # ConnectionParams.
+       #qos = true;
+       #qos_rate_limit = 5;
+       #qos_bytes_limit = 1024;
+       #qos_limit_sec = 0;
+       #qos_limit_usec = 500000;
 };
 
 #
@@ -358,8 +375,22 @@ ConnectionParams {
        # is scrambled and anonymous mode is set for the user.
        #anonymous = true;
 
-       #TODO:
-       #connections_interval - incoming connection interval limit ?
+       # Quality of Service (QoS) settings.  The QoS can be used to handle
+       # the incoming data and limit its handling rate to avoid flooding.
+       # By default QoS is disabled and can be enabled by setting "qos" to
+       # true value.  The "qos_rate_limit" is the incmoing data reading
+       # per second, and if more frequently than the set limit is read the
+       # QoS is applied to the data.  The "qos_bytes_limit" is maximum bytes
+       # allowed for incoming data.  If more is received at once the QoS
+       # is applied to the data.  The "qos_limit_sec" and "qos_limit_usec"
+       # is the timeout used to delay the data handling, seconds and
+       # microseconds, respectively.  For server connections QoS SHOULD NOT
+       # be set.
+       #qos = true;
+       #qos_rate_limit = 5;
+       #qos_bytes_limit = 1024;
+       #qos_limit_sec = 0;
+       #qos_limit_usec = 500000;
 };
 
 #
index 97005ca1911e1c75c96b5cd6a209fda56fbfc73f..515d3a9fc8134e92567fb407957f1c0c3cd30510 100644 (file)
@@ -81,8 +81,6 @@ static SilcTask silc_task_get_first(SilcTaskQueue queue, SilcTask first);
 static SilcTask silc_task_add_timeout(SilcTaskQueue queue, SilcTask newtask,
                                      SilcTaskPriority priority);
 static int silc_schedule_task_remove(SilcTaskQueue queue, SilcTask task);
-static int silc_schedule_task_timeout_compare(struct timeval *smaller, 
-                                             struct timeval *bigger);
 static void silc_task_del_by_context(SilcTaskQueue queue, void *context);
 static void silc_task_del_by_callback(SilcTaskQueue queue,
                                      SilcTaskCallback callback);
@@ -479,7 +477,7 @@ static void silc_schedule_dispatch_timeout(SilcSchedule schedule,
     while(1) {
       /* Execute the task if the timeout has expired */
       if (dispatch_all ||
-         silc_schedule_task_timeout_compare(&task->timeout, &curtime)) {
+         silc_compare_timeval(&task->timeout, &curtime)) {
         if (task->valid) {
          silc_mutex_unlock(queue->lock);
          SILC_SCHEDULE_UNLOCK(schedule);
@@ -533,7 +531,7 @@ static void silc_schedule_select_timeout(SilcSchedule schedule)
     if (task && task->valid == TRUE) {
       /* If the timeout is in past, we will run the task and all other
         timeout tasks from the past. */
-      if (silc_schedule_task_timeout_compare(&task->timeout, &curtime)) {
+      if (silc_compare_timeval(&task->timeout, &curtime)) {
        silc_schedule_dispatch_timeout(schedule, FALSE);
 
        /* The task(s) has expired and doesn't exist on the task queue
@@ -1086,7 +1084,7 @@ static SilcTask silc_task_get_first(SilcTaskQueue queue, SilcTask first)
     if (first == prev)
       break;
 
-    if (silc_schedule_task_timeout_compare(&prev->timeout, &task->timeout))
+    if (silc_compare_timeval(&prev->timeout, &task->timeout))
       task = prev;
 
     prev = prev->prev;
@@ -1119,13 +1117,11 @@ static SilcTask silc_task_add_timeout(SilcTaskQueue queue, SilcTask newtask,
 
       /* If we have longer timeout than with the task head of us
         we have found our spot. */
-      if (silc_schedule_task_timeout_compare(&prev->timeout, 
-                                            &newtask->timeout))
+      if (silc_compare_timeval(&prev->timeout, &newtask->timeout))
        break;
 
       /* If we are equal size of timeout we will be after it. */
-      if (!silc_schedule_task_timeout_compare(&newtask->timeout, 
-                                             &prev->timeout))
+      if (!silc_compare_timeval(&newtask->timeout, &prev->timeout))
        break;
 
       /* We have shorter timeout, compare to next one. */
@@ -1140,11 +1136,9 @@ static SilcTask silc_task_add_timeout(SilcTaskQueue queue, SilcTask newtask,
     
     if (prev == task) {
       /* Check if we are going to be the first task in the queue */
-      if (silc_schedule_task_timeout_compare(&prev->timeout, 
-                                            &newtask->timeout))
+      if (silc_compare_timeval(&prev->timeout, &newtask->timeout))
        break;
-      if (!silc_schedule_task_timeout_compare(&newtask->timeout, 
-                                             &prev->timeout))
+      if (!silc_compare_timeval(&newtask->timeout, &prev->timeout))
        break;
 
       /* We are now the first task in queue */
@@ -1158,13 +1152,11 @@ static SilcTask silc_task_add_timeout(SilcTaskQueue queue, SilcTask newtask,
 
       /* If we have longer timeout than with the task head of us
         we have found our spot. */
-      if (silc_schedule_task_timeout_compare(&prev->timeout, 
-                                            &newtask->timeout))
+      if (silc_compare_timeval(&prev->timeout, &newtask->timeout))
        break;
 
       /* If we are equal size of timeout, priority kicks in place. */
-      if (!silc_schedule_task_timeout_compare(&newtask->timeout, 
-                                             &prev->timeout))
+      if (!silc_compare_timeval(&newtask->timeout, &prev->timeout))
        if (prev->priority >= SILC_TASK_PRI_NORMAL)
          break;
 
@@ -1180,11 +1172,9 @@ static SilcTask silc_task_add_timeout(SilcTaskQueue queue, SilcTask newtask,
     
     if (prev == task) {
       /* Check if we are going to be the first task in the queue */
-      if (silc_schedule_task_timeout_compare(&prev->timeout, 
-                                            &newtask->timeout))
+      if (silc_compare_timeval(&prev->timeout, &newtask->timeout))
        break;
-      if (!silc_schedule_task_timeout_compare(&newtask->timeout, 
-                                             &prev->timeout))
+      if (!silc_compare_timeval(&newtask->timeout, &prev->timeout))
        if (prev->priority >= SILC_TASK_PRI_NORMAL)
          break;
 
@@ -1263,20 +1253,6 @@ static int silc_schedule_task_remove(SilcTaskQueue queue, SilcTask task)
   }
 }
 
-/* Compare two time values. If the first argument is smaller than the
-   second this function returns TRUE. */
-
-static int silc_schedule_task_timeout_compare(struct timeval *smaller, 
-                                             struct timeval *bigger)
-{
-  if ((smaller->tv_sec < bigger->tv_sec) ||
-      ((smaller->tv_sec == bigger->tv_sec) &&
-       (smaller->tv_usec < bigger->tv_usec)))
-    return TRUE;
-
-  return FALSE;
-}
-
 static void silc_task_del_by_fd(SilcTaskQueue queue, SilcUInt32 fd)
 {
   SilcTask next;
index 25a56fe1aa658de5398f2502063c1da450cea65a..3ae1a1e8e0dbec5053f5dc2de51de5434ec9ad82 100644 (file)
@@ -75,7 +75,7 @@ void silc_socket_free(SilcSocketConnection sock)
       silc_schedule_task_del(sock->hb->schedule, sock->hb->hb_task);
       silc_free(sock->hb);
     }
-
+    silc_free(sock->qos);
     silc_free(sock->ip);
     silc_free(sock->hostname);
 
@@ -145,6 +145,38 @@ void silc_socket_set_heartbeat(SilcSocketConnection sock,
                                             SILC_TASK_PRI_LOW);
 }
 
+/* Sets a "Quality of Service" settings for socket connection `sock'.
+   The `read_rate' specifies the maximum read operations per second.
+   If more read operations are executed the limit will be applied for
+   the reading.  The `read_limit_bytes' specifies the maximum data
+   that is read.  It is guaranteed that silc_socket_read never returns
+   more that `read_limit_bytes' of data.  If more is read the limit
+   will be applied for the reading.  The `limit_sec' and `limit_usec'
+   specifies the limit that is applied if `read_rate' and/or 
+   `read_limit_bytes' is reached.  The `schedule' is the application's
+   scheduler. */
+
+void silc_socket_set_qos(SilcSocketConnection sock, 
+                        SilcUInt32 read_rate,
+                        SilcUInt32 read_limit_bytes,
+                        SilcUInt32 limit_sec,
+                        SilcUInt32 limit_usec,
+                        SilcSchedule schedule)
+{
+  if (!sock->qos) {
+    sock->qos = silc_calloc(1, sizeof(*sock->qos));
+    if (!sock->qos)
+      return;
+  }
+  sock->qos->read_rate = read_rate;
+  sock->qos->read_limit_bytes = read_limit_bytes;
+  sock->qos->limit_sec = limit_sec;
+  sock->qos->limit_usec = limit_usec;
+  sock->qos->schedule = schedule;
+  memset(&sock->qos->next_limit, 0, sizeof(sock->qos->next_limit));
+  sock->qos->cur_rate = 0;
+}
+
 /* Finishing timeout callback that will actually call the user specified
    host lookup callback. This is executed back in the calling thread and
    not in the lookup thread. */
index 6507e269f913833ee3a8ba88d7a68662f017ad0c..e8f3356cdae300e1c8ae6ea39c0f51461a8808c8 100644 (file)
@@ -67,6 +67,31 @@ typedef struct SilcSocketConnectionStruct *SilcSocketConnection;
  ***/
 typedef struct SilcSocketConnectionHBStruct *SilcSocketConnectionHB;
 
+/****s* silcutil/SilcSocketConnectionAPI/SilcSocketConnectionQos
+ *
+ * NAME
+ * 
+ *    typedef struct SilcSocketConnectionQosStruct *SilcSocketConnectionQos;
+ *
+ * DESCRIPTION
+ *
+ *    This structure is "Quality of Service" structure for the socket
+ *    connection and is set with silc_socket_set_qos function for a
+ *    socket context.
+ *
+ ***/
+typedef struct SilcSocketConnectionQosStruct {
+  SilcUInt16 read_limit_bytes;     /* Max read bytes */
+  SilcUInt16 read_rate;                    /* Max read rate/second */
+  SilcUInt16 limit_sec;                    /* Limit seconds */
+  SilcUInt32 limit_usec;           /* Limit microseconds */
+  SilcSchedule schedule;
+  struct timeval next_limit;
+  unsigned int cur_rate : 31;
+  unsigned int applied  : 1;
+  SilcUInt32 data_len;
+} *SilcSocketConnectionQos;
+
 /****d* silcutil/SilcSocketConnectionAPI/SilcSocketType
  *
  * NAME
@@ -183,6 +208,7 @@ struct SilcSocketConnectionStruct {
   int users;
 
   SilcSocketConnectionHB hb;
+  SilcSocketConnectionQos qos;
 
   SilcBuffer inbuf;
   SilcBuffer outbuf;
@@ -383,6 +409,38 @@ void silc_socket_set_heartbeat(SilcSocketConnection sock,
                               SilcSocketConnectionHBCb hb_callback,
                               SilcSchedule schedule);
 
+/****f* silcutil/SilcSocketConnectionAPI/silc_socket_set_qos
+ *
+ * SYNOPSIS
+ *
+ *    void silc_socket_set_qos(SilcSocketConnection sock, 
+ *                             SilcUInt32 read_rate,
+ *                             SilcUInt32 read_limit_bytes,
+ *                             SilcUInt32 limit_sec,
+ *                             SilcUInt32 limit_usec,
+ *                             SilcSchedule schedule)
+ *
+ * DESCRIPTION
+ *
+ *    Sets a "Quality of Service" settings for socket connection `sock'.
+ *    The `read_rate' specifies the maximum read operations per second.
+ *    If more read operations are executed the limit will be applied for
+ *    the reading.  The `read_limit_bytes' specifies the maximum data
+ *    that is read.  It is guaranteed that silc_socket_read never returns
+ *    more that `read_limit_bytes' of data.  If more is read the limit
+ *    will be applied for the reading.  The `limit_sec' and `limit_usec'
+ *    specifies the limit that is applied if `read_rate' and/or 
+ *    `read_limit_bytes' is reached.  The `schedule' is the application's
+ *    scheduler.
+ *
+ ***/
+void silc_socket_set_qos(SilcSocketConnection sock, 
+                        SilcUInt32 read_rate,
+                        SilcUInt32 read_limit_bytes,
+                        SilcUInt32 limit_sec,
+                        SilcUInt32 limit_usec,
+                        SilcSchedule schedule);
+
 /****f* silcutil/SilcSocketConnectionAPI/SilcSocketHostLookupCb
  *
  * SYNOPSIS
index da6da055f855969d0b67090f2a3d72eeb49b659d..618be0fca7218f556659e7c2a83f59f050ef77d0 100644 (file)
@@ -1140,3 +1140,16 @@ const char *silc_get_command_name(unsigned char command)
     return "UNKNOWN";
   return command_name[command];
 }
+
+/* Return TRUE if `smaller' is smaller than `bigger'. */
+
+bool silc_compare_timeval(struct timeval *smaller, 
+                         struct timeval *bigger)
+{
+  if ((smaller->tv_sec < bigger->tv_sec) ||
+      ((smaller->tv_sec == bigger->tv_sec) &&
+       (smaller->tv_usec < bigger->tv_usec)))
+    return TRUE;
+
+  return FALSE;
+}
index 28598bf2d3d65896b6a471932964b0cdf9db7f86..c06fa9c73a94f22d223812e6791fc344c8411c02 100644 (file)
@@ -471,13 +471,28 @@ char *silc_get_input(const char *prompt, bool echo_off);
  *
  * DESCRIPTION
  *
- *    Return current time to struct timeval.
- *
- *    This function is system dependant.
+ *    Return current time to struct timeval.  This function is system
+ *    dependant.  Returns 0 on success and -1 on error.
  *
  ***/
 int silc_gettimeofday(struct timeval *p);
 
+/****f* silcutil/SilcUtilAPI/silc_compare_timeval
+ *
+ * SYNOPSIS
+ *
+ *    bool silc_compare_timeval(struct timeval *smaller, 
+ *                              struct timeval *bigger)
+ *
+ * DESCRIPTION
+ *
+ *    Compare two timeval structures and return TRUE if the first
+ *    time value is smaller than the second time value.
+ *
+ ***/
+bool silc_compare_timeval(struct timeval *smaller, 
+                         struct timeval *bigger);
+
 /****f* silcutil/SilcUtilAPI/silc_string_regexify
  *
  * SYNOPSIS
index 02af7cc747646009ef560a7fc0db57570587e436..543fc833dc32a0b704ae2c518fbe2bf968c27a42 100644 (file)
@@ -65,6 +65,19 @@ int silc_socket_write(SilcSocketConnection sock)
   return ret;
 }
 
+/* QoS read handler, this will call the read and write events to indicate
+   that data is available again after a timeout */
+
+SILC_TASK_CALLBACK(silc_socket_read_qos)
+{
+  SilcSocketConnection sock = context;
+  sock->qos->applied = TRUE;
+  silc_schedule_set_listen_fd(sock->qos->schedule, sock->sock,
+                             (SILC_TASK_READ | SILC_TASK_WRITE), TRUE);
+  sock->qos->applied = FALSE;
+  silc_socket_free(sock);
+}
+
 /* Reads data from the socket connection into the incoming data buffer.
    It reads as much as possible from the socket connection. This returns
    amount of bytes read or -1 on error or -2 on case where all of the
@@ -79,6 +92,43 @@ int silc_socket_read(SilcSocketConnection sock)
   if (SILC_IS_DISABLED(sock))
     return -1;
 
+  /* If QoS was applied to socket then return earlier read data but apply
+     QoS to it too, if necessary. */
+  if (sock->qos) {
+    if (sock->qos->applied) {
+      if (sock->qos->data_len) {
+       /* Pull hidden data since we have it from earlier QoS apply */
+       silc_buffer_pull_tail(sock->inbuf, sock->qos->data_len);
+       len = sock->qos->data_len;
+       sock->qos->data_len = 0;
+      }
+
+      if (sock->inbuf->len - len > sock->qos->read_limit_bytes) {
+       /* Seems we need to apply QoS for the remaining data as well */
+       silc_schedule_task_add(sock->qos->schedule, sock->sock,
+                              silc_socket_read_qos, silc_socket_dup(sock),
+                              sock->qos->limit_sec, sock->qos->limit_usec,
+                              SILC_TASK_TIMEOUT, SILC_TASK_PRI_LOW);
+       silc_schedule_unset_listen_fd(sock->qos->schedule, sock->sock);
+      
+       /* Hide the rest of the data from the buffer. */
+       sock->qos->data_len = (sock->inbuf->len - len - 
+                              sock->qos->read_limit_bytes);
+       silc_buffer_push_tail(sock->inbuf, sock->qos->data_len);
+      }
+
+      if (sock->inbuf->len)
+       return sock->inbuf->len;
+    }
+
+    /* If we were called and we have active QoS data pending, return
+       with no data */
+    if (sock->qos->data_len) {
+      silc_schedule_unset_listen_fd(sock->qos->schedule, sock->sock);
+      return -2;
+    }
+  }
+
   SILC_LOG_DEBUG(("Reading data from socket %d", fd));
 
   /* Read the data from the socket. */
@@ -110,6 +160,55 @@ int silc_socket_read(SilcSocketConnection sock)
 
   SILC_LOG_DEBUG(("Read %d bytes", len));
 
+  /* Apply QoS to the read data if necessary */
+  if (sock->qos) {
+    struct timeval curtime;
+    silc_gettimeofday(&curtime);
+
+    /* If we have passed the rate time limit, set our new time limit,
+       and zero the rate limit. */
+    if (!silc_compare_timeval(&curtime, &sock->qos->next_limit)) {
+      curtime.tv_sec++;
+      sock->qos->next_limit = curtime;
+      sock->qos->cur_rate = 0;
+    }
+    sock->qos->cur_rate++;
+
+    /* If we are not withing rate limit apply QoS for the read data */
+    if (sock->qos->cur_rate > sock->qos->read_rate) {
+      silc_schedule_task_add(sock->qos->schedule, sock->sock,
+                            silc_socket_read_qos, silc_socket_dup(sock),
+                            sock->qos->limit_sec, sock->qos->limit_usec,
+                            SILC_TASK_TIMEOUT, SILC_TASK_PRI_LOW);
+      silc_schedule_unset_listen_fd(sock->qos->schedule, sock->sock);
+
+      /* Check the byte limit as well, and do not return more than allowed */
+      if (sock->inbuf->len > sock->qos->read_limit_bytes) {
+       /* Hide the rest of the data from the buffer. */
+       sock->qos->data_len = sock->inbuf->len - sock->qos->read_limit_bytes;
+       silc_buffer_push_tail(sock->inbuf, sock->qos->data_len);
+       len = sock->inbuf->len;
+      } else {
+       /* Rate limit kicked in, do not return data yet */
+       return -2;
+      }
+    } else {
+      /* Check the byte limit, and do not return more than allowed */
+      if (sock->inbuf->len > sock->qos->read_limit_bytes) {
+       silc_schedule_task_add(sock->qos->schedule, sock->sock,
+                              silc_socket_read_qos, silc_socket_dup(sock),
+                              sock->qos->limit_sec, sock->qos->limit_usec,
+                              SILC_TASK_TIMEOUT, SILC_TASK_PRI_LOW);
+       silc_schedule_unset_listen_fd(sock->qos->schedule, sock->sock);
+
+       /* Hide the rest of the data from the buffer. */
+       sock->qos->data_len = sock->inbuf->len - sock->qos->read_limit_bytes;
+       silc_buffer_push_tail(sock->inbuf, sock->qos->data_len);
+       len = sock->inbuf->len;
+      }
+    }
+  }
+
   return len;
 }