silc_socket_set_qos with NULL arguments resets the QoS.
+Fri Oct 31 12:33:59 EET 2003 Pekka Riikonen <priikone@silcnet.org>
+
+ * Optimized the socket referencing in packet routines, client
+ library and server. Affected files are lib/silccore/silcpacket.c,
+ lib/silcclient/client.c and silcd/server.c.
+
+ * If silc_socket_set_qos is given with NULL arguments the QoS is
+ reset from the socket. Affected files are
+ lib/silcutil/silcsockconn.[ch].
+
Thu Oct 30 21:23:24 CET 2003 Jochen Eisinger <jochen@penguin-breeder.org>
* Add a /LISTKEYS command which lists the locally cached
- client and server keys. Affected files
+ client and server keys. Affected files
irssi/src/fe-common/module-formats.[ch],
irssi/src/silc/core/silc-{channels,servers}.c
irssi/docs/help/in/listkeys.in
out:
silc_packet_context_free(packet);
- silc_socket_free(parse_ctx->sock);
silc_free(parse_ctx);
}
SilcIDListData idata = (SilcIDListData)sock->user_data;
bool ret;
- if (SILC_IS_DISCONNECTING(sock) || SILC_IS_DISCONNECTED(sock)) {
- SILC_LOG_DEBUG(("Connection is disconnected"));
- silc_socket_free(parser_context->sock);
- silc_free(parser_context);
- return FALSE;
- }
-
if (idata)
idata->psn_receive = parser_context->packet->sequence + 1;
if (sock->protocol && sock->protocol->protocol &&
(sock->protocol->protocol->type == SILC_PROTOCOL_SERVER_KEY_EXCHANGE ||
sock->protocol->protocol->type == SILC_PROTOCOL_SERVER_REKEY)) {
- silc_socket_dup(sock);
silc_server_packet_parse_real(server->schedule, server, 0, sock->sock,
parser_context);
if (SILC_IS_DISCONNECTING(sock) || SILC_IS_DISCONNECTED(sock)) {
SILC_LOG_DEBUG(("Connection is disconnected"));
- silc_socket_free(sock);
return FALSE;
}
- silc_socket_free(sock);
/* Reprocess data since we'll return FALSE here. This is because
the idata->receive_key might have become valid in the last packet
"Router"), tmp[0] ? tmp : ""));
SILC_SET_DISCONNECTED(sock);
+ silc_socket_set_qos(sock, 0, 0, 0, 0, NULL);
silc_schedule_task_add(server->schedule, sock->sock,
silc_server_close_connection_final,
(void *)sock, 0, 1, SILC_TASK_TIMEOUT,
if (ret == SILC_PACKET_NONE) {
silc_packet_context_free(packet);
- silc_socket_free(parser_context->sock);
silc_free(parser_context);
return FALSE;
}
silc_client_packet_parse, client);
silc_packet_context_free(packet);
- silc_socket_free(parser_context->sock);
silc_free(parser_context);
return FALSE;
/* Parse the incoming packet type */
silc_client_packet_parse_type(client, sock, packet);
silc_packet_context_free(packet);
- silc_socket_free(parser_context->sock);
silc_free(parser_context);
return TRUE;
}
mac_len = silc_hmac_len(hmac);
/* Parse the packets from the data */
+ silc_socket_dup(sock);
while (sock->inbuf->len > 0 && cont) {
if (sock->inbuf->len < SILC_PACKET_MIN_HEADER_LEN) {
SILC_LOG_DEBUG(("Partial packet in queue, waiting for the rest"));
+ silc_socket_free(sock);
return TRUE;
}
SILC_LOG_ERROR(("Received too short packet"));
memset(header, 0, sizeof(header));
silc_buffer_clear(sock->inbuf);
+ silc_socket_free(sock);
return FALSE;
}
"(%d bytes)", paddedlen + mac_len - sock->inbuf->len));
SILC_SET_INBUF_PENDING(sock);
memset(tmp, 0, sizeof(tmp));
+ silc_socket_free(sock);
return TRUE;
}
sock->protocol ? sock->protocol->protocol->type : -1));
memset(tmp, 0, sizeof(tmp));
silc_buffer_clear(sock->inbuf);
+ silc_socket_free(sock);
return FALSE;
}
SILC_UNSET_INBUF_PENDING(sock);
parse_ctx = silc_calloc(1, sizeof(*parse_ctx));
- if (!parse_ctx)
+ if (!parse_ctx) {
+ silc_socket_free(sock);
return FALSE;
+ }
parse_ctx->packet = silc_packet_context_alloc();
parse_ctx->packet->buffer = silc_buffer_alloc_size(paddedlen);
parse_ctx->packet->type = (SilcPacketType)header[3];
parse_ctx->packet->padlen = (SilcUInt8)header[4];
parse_ctx->packet->sequence = sequence++;
- parse_ctx->sock = silc_socket_dup(sock);
+ parse_ctx->sock = sock;
parse_ctx->context = parser_context;
/* Check whether this is normal or special packet */
memset(tmp, 0, sizeof(tmp));
silc_packet_context_free(parse_ctx->packet);
silc_free(parse_ctx);
+ silc_socket_free(sock);
return FALSE;
}
}
}
/* Don't clear buffer if pending data is in the buffer */
- if (cont == FALSE && sock->inbuf->len > 0)
+ if (cont == FALSE && sock->inbuf->len > 0) {
+ silc_socket_free(sock);
return TRUE;
+ }
/* Don't clear buffer if QoS data exists in the buffer */
- if (sock->qos && sock->qos->data_len > 0)
+ if (sock->qos && sock->qos->data_len > 0) {
+ silc_socket_free(sock);
return TRUE;
+ }
SILC_LOG_DEBUG(("Clearing inbound buffer"));
silc_buffer_clear(sock->inbuf);
+ silc_socket_free(sock);
return TRUE;
}
SilcUInt32 limit_usec,
SilcSchedule schedule)
{
+ if (!sock)
+ return;
+
+ if (sock->qos && !read_rate && !read_limit_bytes &&
+ !limit_sec && !limit_usec && !schedule) {
+ silc_schedule_task_del_by_context(sock->qos->schedule, sock->qos);
+ silc_free(sock->qos);
+ sock->qos = NULL;
+ return;
+ }
+ if (!schedule)
+ return;
+
if (!sock->qos) {
sock->qos = silc_calloc(1, sizeof(*sock->qos));
if (!sock->qos)
* will be applied for the reading. The `limit_sec' and `limit_usec'
* specifies the limit that is applied if `read_rate' and/or
* `read_limit_bytes' is reached. The `schedule' is the application's
- * scheduler.
+ * scheduler. If all arguments except `sock' are NULL or zero this
+ * resets the QoS from the socket, all QoS for this socket that may
+ * be pending will be cancelled.
*
***/
void silc_socket_set_qos(SilcSocketConnection sock,