should be FALSE. Affected file lib/silcutil/silcschedule.[ch],
lib/silcclient/client_internal.h and silcd/server_internal.h.
+ * Added silc_compare_timeval to determine which time values
+ is smaller. Affected file lib/silcutil/silcutil.[ch].
+
+ * Added sort-of "Quality of Service" (QoS) support to
+ SilcSocketConnection. Data reception can be controlled with
+ rate limit per second and/or read data length limit.
+ Added silc_socket_set_qos. Affected files are
+ lib/silcutil/silcsockconn.[ch] and unix/silcunixsockconn.c.
+
+ * Added the QoS configuration to SILC Server configuration
+ file. Affected files are silcd/serverconfig.[ch] and
+ server.c.
+
Sun Oct 20 14:12:24 CEST 2002 Pekka Riikonen <priikone@silcnet.org>
* Merged with irssi.org CVS.
o Add special handling in router and server for "connection
timed out" error. Be optimistic.
- o Testing
-
- o Add a timeout to handling incoming JOIN commands. It should be
- enforced that JOIN command is executed only once in a second or two
- seconds. Now it is possible to accept n incoming JOIN commands
- and process them without any timeouts. THis must be employed because
- each JOIN command will create and distribute the new channel key
- to everybody on the channel (Fix this to 0.9.x).
-
o Manual file for silcd(8) and silcd.conf.
o Testing
SILC currently supports SOCKS4 and SOCKS5 but it needs to be compiled
in separately.
+ o Add a timeout to handling incoming JOIN commands. It should be
+ enforced that JOIN command is executed only once in a second or two
+ seconds. Now it is possible to accept n incoming JOIN commands
+ and process them without any timeouts. THis must be employed because
+ each JOIN command will create and distribute the new channel key
+ to everybody on the channel (Fix this to 0.9.x).
+
o EPOC specific additions/changes required:
o lib/silcutil/epoc routines missing or not completed.
if (conn && conn->param)
param = conn->param;
- /* Perform keepalive. The `hb_context' will be freed automatically
- when finally calling the silc_socket_free function. */
+ /* Perform keepalive. */
silc_socket_set_heartbeat(sock, param->keepalive_secs, server,
silc_server_perform_heartbeat,
server->schedule);
SilcSocketConnection sock = ctx->sock;
SilcUnknownEntry entry = (SilcUnknownEntry)sock->user_data;
void *id_entry;
- SilcUInt32 hearbeat_timeout = server->config->param.keepalive_secs;
+ SilcServerConfigConnParams *param = &server->config->param;
if (protocol->state == SILC_PROTOCOL_STATE_ERROR ||
protocol->state == SILC_PROTOCOL_STATE_FAILURE) {
/* Get connection parameters */
if (conn->param) {
- if (conn->param->keepalive_secs)
- hearbeat_timeout = conn->param->keepalive_secs;
+ param = conn->param;
+
+ if (!param->keepalive_secs)
+ param->keepalive_secs = server->config->param.keepalive_secs;
+
+ if (!param->qos && server->config->param.qos) {
+ param->qos = server->config->param.qos;
+ param->qos_rate_limit = server->config->param.qos_rate_limit;
+ param->qos_bytes_limit = server->config->param.qos_bytes_limit;
+ param->qos_limit_sec = server->config->param.qos_limit_sec;
+ param->qos_limit_usec = server->config->param.qos_limit_usec;
+ }
/* Check if to be anonymous connection */
- if (conn->param->anonymous)
+ if (param->anonymous)
client->mode |= SILC_UMODE_ANONYMOUS;
}
if (rconn) {
if (rconn->param) {
- if (rconn->param->keepalive_secs)
- hearbeat_timeout = rconn->param->keepalive_secs;
+ param = rconn->param;
+
+ if (!param->keepalive_secs)
+ param->keepalive_secs = server->config->param.keepalive_secs;
+
+ if (!param->qos && server->config->param.qos) {
+ param->qos = server->config->param.qos;
+ param->qos_rate_limit = server->config->param.qos_rate_limit;
+ param->qos_bytes_limit = server->config->param.qos_bytes_limit;
+ param->qos_limit_sec = server->config->param.qos_limit_sec;
+ param->qos_limit_usec = server->config->param.qos_limit_usec;
+ }
}
initiator = rconn->initiator;
}
if (sconn) {
if (sconn->param) {
- if (sconn->param->keepalive_secs)
- hearbeat_timeout = sconn->param->keepalive_secs;
+ param = rconn->param;
+
+ if (!param->keepalive_secs)
+ param->keepalive_secs = server->config->param.keepalive_secs;
+
+ if (!param->qos && server->config->param.qos) {
+ param->qos = server->config->param.qos;
+ param->qos_rate_limit = server->config->param.qos_rate_limit;
+ param->qos_bytes_limit = server->config->param.qos_bytes_limit;
+ param->qos_limit_sec = server->config->param.qos_limit_sec;
+ param->qos_limit_usec = server->config->param.qos_limit_usec;
+ }
}
backup_router = sconn->backup_router;
/* Connection has been fully established now. Everything is ok. */
SILC_LOG_DEBUG(("New connection authenticated"));
- /* Perform keepalive. The `hb_context' will be freed automatically
- when finally calling the silc_socket_free function. */
- silc_socket_set_heartbeat(sock, hearbeat_timeout, server,
- silc_server_perform_heartbeat,
- server->schedule);
+ /* Perform keepalive. */
+ if (param->keepalive_secs)
+ silc_socket_set_heartbeat(sock, param->keepalive_secs, server,
+ silc_server_perform_heartbeat,
+ server->schedule);
+
+ /* Perform Quality of Service */
+ if (param->qos)
+ silc_socket_set_qos(sock, param->qos_rate_limit, param->qos_bytes_limit,
+ param->qos_limit_sec, param->qos_limit_usec,
+ server->schedule);
out:
silc_schedule_task_del_by_callback(server->schedule,
else if (!strcmp(name, "detach_timeout")) {
config->detach_timeout = (SilcUInt32) *(int *)val;
}
+ else if (!strcmp(name, "qos")) {
+ config->param.qos = *(bool *)val;
+ }
+ else if (!strcmp(name, "qos_rate_limit")) {
+ config->param.qos_rate_limit = *(SilcUInt32 *)val;
+ }
+ else if (!strcmp(name, "qos_bytes_limit")) {
+ config->param.qos_bytes_limit = *(SilcUInt32 *)val;
+ }
+ else if (!strcmp(name, "qos_limit_sec")) {
+ config->param.qos_limit_sec = *(SilcUInt32 *)val;
+ }
+ else if (!strcmp(name, "qos_limit_usec")) {
+ config->param.qos_limit_usec = *(SilcUInt32 *)val;
+ }
else
return SILC_CONFIG_EINTERNAL;
else if (!strcmp(name, "anonymous")) {
tmp->anonymous = *(bool *)val;
}
+ else if (!strcmp(name, "qos")) {
+ tmp->qos = *(bool *)val;
+ }
+ else if (!strcmp(name, "qos_rate_limit")) {
+ tmp->qos_rate_limit = *(SilcUInt32 *)val;
+ }
+ else if (!strcmp(name, "qos_bytes_limit")) {
+ tmp->qos_bytes_limit = *(SilcUInt32 *)val;
+ }
+ else if (!strcmp(name, "qos_limit_sec")) {
+ tmp->qos_limit_sec = *(SilcUInt32 *)val;
+ }
+ else if (!strcmp(name, "qos_limit_usec")) {
+ tmp->qos_limit_usec = *(SilcUInt32 *)val;
+ }
else
return SILC_CONFIG_EINTERNAL;
{ "version_software_vendor", SILC_CONFIG_ARG_STR, fetch_generic, NULL },
{ "detach_disabled", SILC_CONFIG_ARG_TOGGLE, fetch_generic, NULL },
{ "detach_timeout", SILC_CONFIG_ARG_INT, fetch_generic, NULL },
+ { "qos", SILC_CONFIG_ARG_TOGGLE, fetch_generic, NULL },
+ { "qos_rate_limit", SILC_CONFIG_ARG_INT, fetch_generic, NULL },
+ { "qos_bytes_limit", SILC_CONFIG_ARG_INT, fetch_generic, NULL },
+ { "qos_limit_sec", SILC_CONFIG_ARG_INT, fetch_generic, NULL },
+ { "qos_limit_usec", SILC_CONFIG_ARG_INT, fetch_generic, NULL },
{ 0, 0, 0, 0 }
};
{ "version_software", SILC_CONFIG_ARG_STR, fetch_connparam, NULL },
{ "version_software_vendor", SILC_CONFIG_ARG_STR, fetch_connparam, NULL },
{ "anonymous", SILC_CONFIG_ARG_TOGGLE, fetch_connparam, NULL },
+ { "qos", SILC_CONFIG_ARG_TOGGLE, fetch_generic, NULL },
+ { "qos_rate_limit", SILC_CONFIG_ARG_INT, fetch_generic, NULL },
+ { "qos_bytes_limit", SILC_CONFIG_ARG_INT, fetch_generic, NULL },
+ { "qos_limit_sec", SILC_CONFIG_ARG_INT, fetch_generic, NULL },
+ { "qos_limit_usec", SILC_CONFIG_ARG_INT, fetch_generic, NULL },
{ 0, 0, 0, 0 }
};
/* Connection parameters */
typedef struct SilcServerConfigConnParams {
+ struct SilcServerConfigConnParams *next;
char *name;
char *version_protocol;
char *version_software;
SilcUInt32 reconnect_interval;
SilcUInt32 reconnect_interval_max;
SilcUInt32 key_exchange_rekey;
- bool key_exchange_pfs;
- bool reconnect_keep_trying;
- bool anonymous;
- struct SilcServerConfigConnParams *next;
+ SilcUInt32 qos_rate_limit;
+ SilcUInt32 qos_bytes_limit;
+ SilcUInt32 qos_limit_sec;
+ SilcUInt32 qos_limit_usec;
+ unsigned int key_exchange_pfs : 1;
+ unsigned int reconnect_keep_trying : 1;
+ unsigned int anonymous : 1;
+ unsigned int qos : 1;
} SilcServerConfigConnParams;
/* Holds all client authentication data from config file */
# sessions are persistent as long as server is running.
#detach_disabled = true;
#detach_timeout = 1440;
+
+ # Quality of Service (QoS) settings. The QoS can be used to handle
+ # the incoming data and limit its handling rate to avoid flooding.
+ # By default QoS is disabled and can be enabled by setting "qos" to
+ # true value. The "qos_rate_limit" is the incmoing data reading
+ # per second, and if more frequently than the set limit is read the
+ # QoS is applied to the data. The "qos_bytes_limit" is maximum bytes
+ # allowed for incoming data. If more is received at once the QoS
+ # is applied to the data. The "qos_limit_sec" and "qos_limit_usec"
+ # is the timeout used to delay the data handling, seconds and
+ # microseconds, respectively. This can be overridden with
+ # ConnectionParams.
+ #qos = true;
+ #qos_rate_limit = 5;
+ #qos_bytes_limit = 1024;
+ #qos_limit_sec = 0;
+ #qos_limit_usec = 500000;
};
#
# is scrambled and anonymous mode is set for the user.
#anonymous = true;
- #TODO:
- #connections_interval - incoming connection interval limit ?
+ # Quality of Service (QoS) settings. The QoS can be used to handle
+ # the incoming data and limit its handling rate to avoid flooding.
+ # By default QoS is disabled and can be enabled by setting "qos" to
+ # true value. The "qos_rate_limit" is the incmoing data reading
+ # per second, and if more frequently than the set limit is read the
+ # QoS is applied to the data. The "qos_bytes_limit" is maximum bytes
+ # allowed for incoming data. If more is received at once the QoS
+ # is applied to the data. The "qos_limit_sec" and "qos_limit_usec"
+ # is the timeout used to delay the data handling, seconds and
+ # microseconds, respectively. For server connections QoS SHOULD NOT
+ # be set.
+ #qos = true;
+ #qos_rate_limit = 5;
+ #qos_bytes_limit = 1024;
+ #qos_limit_sec = 0;
+ #qos_limit_usec = 500000;
};
#
static SilcTask silc_task_add_timeout(SilcTaskQueue queue, SilcTask newtask,
SilcTaskPriority priority);
static int silc_schedule_task_remove(SilcTaskQueue queue, SilcTask task);
-static int silc_schedule_task_timeout_compare(struct timeval *smaller,
- struct timeval *bigger);
static void silc_task_del_by_context(SilcTaskQueue queue, void *context);
static void silc_task_del_by_callback(SilcTaskQueue queue,
SilcTaskCallback callback);
while(1) {
/* Execute the task if the timeout has expired */
if (dispatch_all ||
- silc_schedule_task_timeout_compare(&task->timeout, &curtime)) {
+ silc_compare_timeval(&task->timeout, &curtime)) {
if (task->valid) {
silc_mutex_unlock(queue->lock);
SILC_SCHEDULE_UNLOCK(schedule);
if (task && task->valid == TRUE) {
/* If the timeout is in past, we will run the task and all other
timeout tasks from the past. */
- if (silc_schedule_task_timeout_compare(&task->timeout, &curtime)) {
+ if (silc_compare_timeval(&task->timeout, &curtime)) {
silc_schedule_dispatch_timeout(schedule, FALSE);
/* The task(s) has expired and doesn't exist on the task queue
if (first == prev)
break;
- if (silc_schedule_task_timeout_compare(&prev->timeout, &task->timeout))
+ if (silc_compare_timeval(&prev->timeout, &task->timeout))
task = prev;
prev = prev->prev;
/* If we have longer timeout than with the task head of us
we have found our spot. */
- if (silc_schedule_task_timeout_compare(&prev->timeout,
- &newtask->timeout))
+ if (silc_compare_timeval(&prev->timeout, &newtask->timeout))
break;
/* If we are equal size of timeout we will be after it. */
- if (!silc_schedule_task_timeout_compare(&newtask->timeout,
- &prev->timeout))
+ if (!silc_compare_timeval(&newtask->timeout, &prev->timeout))
break;
/* We have shorter timeout, compare to next one. */
if (prev == task) {
/* Check if we are going to be the first task in the queue */
- if (silc_schedule_task_timeout_compare(&prev->timeout,
- &newtask->timeout))
+ if (silc_compare_timeval(&prev->timeout, &newtask->timeout))
break;
- if (!silc_schedule_task_timeout_compare(&newtask->timeout,
- &prev->timeout))
+ if (!silc_compare_timeval(&newtask->timeout, &prev->timeout))
break;
/* We are now the first task in queue */
/* If we have longer timeout than with the task head of us
we have found our spot. */
- if (silc_schedule_task_timeout_compare(&prev->timeout,
- &newtask->timeout))
+ if (silc_compare_timeval(&prev->timeout, &newtask->timeout))
break;
/* If we are equal size of timeout, priority kicks in place. */
- if (!silc_schedule_task_timeout_compare(&newtask->timeout,
- &prev->timeout))
+ if (!silc_compare_timeval(&newtask->timeout, &prev->timeout))
if (prev->priority >= SILC_TASK_PRI_NORMAL)
break;
if (prev == task) {
/* Check if we are going to be the first task in the queue */
- if (silc_schedule_task_timeout_compare(&prev->timeout,
- &newtask->timeout))
+ if (silc_compare_timeval(&prev->timeout, &newtask->timeout))
break;
- if (!silc_schedule_task_timeout_compare(&newtask->timeout,
- &prev->timeout))
+ if (!silc_compare_timeval(&newtask->timeout, &prev->timeout))
if (prev->priority >= SILC_TASK_PRI_NORMAL)
break;
}
}
-/* Compare two time values. If the first argument is smaller than the
- second this function returns TRUE. */
-
-static int silc_schedule_task_timeout_compare(struct timeval *smaller,
- struct timeval *bigger)
-{
- if ((smaller->tv_sec < bigger->tv_sec) ||
- ((smaller->tv_sec == bigger->tv_sec) &&
- (smaller->tv_usec < bigger->tv_usec)))
- return TRUE;
-
- return FALSE;
-}
-
static void silc_task_del_by_fd(SilcTaskQueue queue, SilcUInt32 fd)
{
SilcTask next;
silc_schedule_task_del(sock->hb->schedule, sock->hb->hb_task);
silc_free(sock->hb);
}
-
+ silc_free(sock->qos);
silc_free(sock->ip);
silc_free(sock->hostname);
SILC_TASK_PRI_LOW);
}
+/* Sets a "Quality of Service" settings for socket connection `sock'.
+ The `read_rate' specifies the maximum read operations per second.
+ If more read operations are executed the limit will be applied for
+ the reading. The `read_limit_bytes' specifies the maximum data
+ that is read. It is guaranteed that silc_socket_read never returns
+ more that `read_limit_bytes' of data. If more is read the limit
+ will be applied for the reading. The `limit_sec' and `limit_usec'
+ specifies the limit that is applied if `read_rate' and/or
+ `read_limit_bytes' is reached. The `schedule' is the application's
+ scheduler. */
+
+void silc_socket_set_qos(SilcSocketConnection sock,
+ SilcUInt32 read_rate,
+ SilcUInt32 read_limit_bytes,
+ SilcUInt32 limit_sec,
+ SilcUInt32 limit_usec,
+ SilcSchedule schedule)
+{
+ if (!sock->qos) {
+ sock->qos = silc_calloc(1, sizeof(*sock->qos));
+ if (!sock->qos)
+ return;
+ }
+ sock->qos->read_rate = read_rate;
+ sock->qos->read_limit_bytes = read_limit_bytes;
+ sock->qos->limit_sec = limit_sec;
+ sock->qos->limit_usec = limit_usec;
+ sock->qos->schedule = schedule;
+ memset(&sock->qos->next_limit, 0, sizeof(sock->qos->next_limit));
+ sock->qos->cur_rate = 0;
+}
+
/* Finishing timeout callback that will actually call the user specified
host lookup callback. This is executed back in the calling thread and
not in the lookup thread. */
***/
typedef struct SilcSocketConnectionHBStruct *SilcSocketConnectionHB;
+/****s* silcutil/SilcSocketConnectionAPI/SilcSocketConnectionQos
+ *
+ * NAME
+ *
+ * typedef struct SilcSocketConnectionQosStruct *SilcSocketConnectionQos;
+ *
+ * DESCRIPTION
+ *
+ * This structure is "Quality of Service" structure for the socket
+ * connection and is set with silc_socket_set_qos function for a
+ * socket context.
+ *
+ ***/
+typedef struct SilcSocketConnectionQosStruct {
+ SilcUInt16 read_limit_bytes; /* Max read bytes */
+ SilcUInt16 read_rate; /* Max read rate/second */
+ SilcUInt16 limit_sec; /* Limit seconds */
+ SilcUInt32 limit_usec; /* Limit microseconds */
+ SilcSchedule schedule;
+ struct timeval next_limit;
+ unsigned int cur_rate : 31;
+ unsigned int applied : 1;
+ SilcUInt32 data_len;
+} *SilcSocketConnectionQos;
+
/****d* silcutil/SilcSocketConnectionAPI/SilcSocketType
*
* NAME
int users;
SilcSocketConnectionHB hb;
+ SilcSocketConnectionQos qos;
SilcBuffer inbuf;
SilcBuffer outbuf;
SilcSocketConnectionHBCb hb_callback,
SilcSchedule schedule);
+/****f* silcutil/SilcSocketConnectionAPI/silc_socket_set_qos
+ *
+ * SYNOPSIS
+ *
+ * void silc_socket_set_qos(SilcSocketConnection sock,
+ * SilcUInt32 read_rate,
+ * SilcUInt32 read_limit_bytes,
+ * SilcUInt32 limit_sec,
+ * SilcUInt32 limit_usec,
+ * SilcSchedule schedule)
+ *
+ * DESCRIPTION
+ *
+ * Sets a "Quality of Service" settings for socket connection `sock'.
+ * The `read_rate' specifies the maximum read operations per second.
+ * If more read operations are executed the limit will be applied for
+ * the reading. The `read_limit_bytes' specifies the maximum data
+ * that is read. It is guaranteed that silc_socket_read never returns
+ * more that `read_limit_bytes' of data. If more is read the limit
+ * will be applied for the reading. The `limit_sec' and `limit_usec'
+ * specifies the limit that is applied if `read_rate' and/or
+ * `read_limit_bytes' is reached. The `schedule' is the application's
+ * scheduler.
+ *
+ ***/
+void silc_socket_set_qos(SilcSocketConnection sock,
+ SilcUInt32 read_rate,
+ SilcUInt32 read_limit_bytes,
+ SilcUInt32 limit_sec,
+ SilcUInt32 limit_usec,
+ SilcSchedule schedule);
+
/****f* silcutil/SilcSocketConnectionAPI/SilcSocketHostLookupCb
*
* SYNOPSIS
return "UNKNOWN";
return command_name[command];
}
+
+/* Return TRUE if `smaller' is smaller than `bigger'. */
+
+bool silc_compare_timeval(struct timeval *smaller,
+ struct timeval *bigger)
+{
+ if ((smaller->tv_sec < bigger->tv_sec) ||
+ ((smaller->tv_sec == bigger->tv_sec) &&
+ (smaller->tv_usec < bigger->tv_usec)))
+ return TRUE;
+
+ return FALSE;
+}
*
* DESCRIPTION
*
- * Return current time to struct timeval.
- *
- * This function is system dependant.
+ * Return current time to struct timeval. This function is system
+ * dependant. Returns 0 on success and -1 on error.
*
***/
int silc_gettimeofday(struct timeval *p);
+/****f* silcutil/SilcUtilAPI/silc_compare_timeval
+ *
+ * SYNOPSIS
+ *
+ * bool silc_compare_timeval(struct timeval *smaller,
+ * struct timeval *bigger)
+ *
+ * DESCRIPTION
+ *
+ * Compare two timeval structures and return TRUE if the first
+ * time value is smaller than the second time value.
+ *
+ ***/
+bool silc_compare_timeval(struct timeval *smaller,
+ struct timeval *bigger);
+
/****f* silcutil/SilcUtilAPI/silc_string_regexify
*
* SYNOPSIS
return ret;
}
+/* QoS read handler, this will call the read and write events to indicate
+ that data is available again after a timeout */
+
+SILC_TASK_CALLBACK(silc_socket_read_qos)
+{
+ SilcSocketConnection sock = context;
+ sock->qos->applied = TRUE;
+ silc_schedule_set_listen_fd(sock->qos->schedule, sock->sock,
+ (SILC_TASK_READ | SILC_TASK_WRITE), TRUE);
+ sock->qos->applied = FALSE;
+ silc_socket_free(sock);
+}
+
/* Reads data from the socket connection into the incoming data buffer.
It reads as much as possible from the socket connection. This returns
amount of bytes read or -1 on error or -2 on case where all of the
if (SILC_IS_DISABLED(sock))
return -1;
+ /* If QoS was applied to socket then return earlier read data but apply
+ QoS to it too, if necessary. */
+ if (sock->qos) {
+ if (sock->qos->applied) {
+ if (sock->qos->data_len) {
+ /* Pull hidden data since we have it from earlier QoS apply */
+ silc_buffer_pull_tail(sock->inbuf, sock->qos->data_len);
+ len = sock->qos->data_len;
+ sock->qos->data_len = 0;
+ }
+
+ if (sock->inbuf->len - len > sock->qos->read_limit_bytes) {
+ /* Seems we need to apply QoS for the remaining data as well */
+ silc_schedule_task_add(sock->qos->schedule, sock->sock,
+ silc_socket_read_qos, silc_socket_dup(sock),
+ sock->qos->limit_sec, sock->qos->limit_usec,
+ SILC_TASK_TIMEOUT, SILC_TASK_PRI_LOW);
+ silc_schedule_unset_listen_fd(sock->qos->schedule, sock->sock);
+
+ /* Hide the rest of the data from the buffer. */
+ sock->qos->data_len = (sock->inbuf->len - len -
+ sock->qos->read_limit_bytes);
+ silc_buffer_push_tail(sock->inbuf, sock->qos->data_len);
+ }
+
+ if (sock->inbuf->len)
+ return sock->inbuf->len;
+ }
+
+ /* If we were called and we have active QoS data pending, return
+ with no data */
+ if (sock->qos->data_len) {
+ silc_schedule_unset_listen_fd(sock->qos->schedule, sock->sock);
+ return -2;
+ }
+ }
+
SILC_LOG_DEBUG(("Reading data from socket %d", fd));
/* Read the data from the socket. */
SILC_LOG_DEBUG(("Read %d bytes", len));
+ /* Apply QoS to the read data if necessary */
+ if (sock->qos) {
+ struct timeval curtime;
+ silc_gettimeofday(&curtime);
+
+ /* If we have passed the rate time limit, set our new time limit,
+ and zero the rate limit. */
+ if (!silc_compare_timeval(&curtime, &sock->qos->next_limit)) {
+ curtime.tv_sec++;
+ sock->qos->next_limit = curtime;
+ sock->qos->cur_rate = 0;
+ }
+ sock->qos->cur_rate++;
+
+ /* If we are not withing rate limit apply QoS for the read data */
+ if (sock->qos->cur_rate > sock->qos->read_rate) {
+ silc_schedule_task_add(sock->qos->schedule, sock->sock,
+ silc_socket_read_qos, silc_socket_dup(sock),
+ sock->qos->limit_sec, sock->qos->limit_usec,
+ SILC_TASK_TIMEOUT, SILC_TASK_PRI_LOW);
+ silc_schedule_unset_listen_fd(sock->qos->schedule, sock->sock);
+
+ /* Check the byte limit as well, and do not return more than allowed */
+ if (sock->inbuf->len > sock->qos->read_limit_bytes) {
+ /* Hide the rest of the data from the buffer. */
+ sock->qos->data_len = sock->inbuf->len - sock->qos->read_limit_bytes;
+ silc_buffer_push_tail(sock->inbuf, sock->qos->data_len);
+ len = sock->inbuf->len;
+ } else {
+ /* Rate limit kicked in, do not return data yet */
+ return -2;
+ }
+ } else {
+ /* Check the byte limit, and do not return more than allowed */
+ if (sock->inbuf->len > sock->qos->read_limit_bytes) {
+ silc_schedule_task_add(sock->qos->schedule, sock->sock,
+ silc_socket_read_qos, silc_socket_dup(sock),
+ sock->qos->limit_sec, sock->qos->limit_usec,
+ SILC_TASK_TIMEOUT, SILC_TASK_PRI_LOW);
+ silc_schedule_unset_listen_fd(sock->qos->schedule, sock->sock);
+
+ /* Hide the rest of the data from the buffer. */
+ sock->qos->data_len = sock->inbuf->len - sock->qos->read_limit_bytes;
+ silc_buffer_push_tail(sock->inbuf, sock->qos->data_len);
+ len = sock->inbuf->len;
+ }
+ }
+ }
+
return len;
}