5 Author: Pekka Riikonen <priikone@silcnet.org>
7 Copyright (C) 2002 - 2007 Pekka Riikonen
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; version 2 of the License.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
20 /****h* silcutil/SILC Types
24 * This header file includes basic types and definitions, and various system
25 * specific macros and functions used in SILC Toolkits. Application programmer
26 * may use them when needed.
33 /* The bool macro is deprecated. Use SilcBool instead. */
39 #define bool unsigned char
43 #if SILC_SIZEOF_SHORT > 2
44 #error "size of the short must be 2 bytes"
47 /******************************* Public API *********************************/
49 /****d* silcutil/SILCTypes/SilcBool
53 * typedef unigned char SilcBool;
57 * Boolean value, and is always 8-bits. Represents value 0 or 1.
60 typedef unsigned char SilcBool;
62 /****d* silcutil/SILCTypes/TRUE
70 * Boolean true value indicator.
79 /****d* silcutil/SILCTypes/FALSE
87 * Boolean false value indicator.
96 /****d* silcutil/SILCTypes/SilcUInt8
100 * typedef unsigned char SilcUInt8;
104 * 8-bit unsigned integer.
108 typedef unsigned char SilcUInt8;
111 /****d* silcutil/SILCTypes/SilcInt8
115 * typedef signed char SilcInt8;
119 * 8-bit signed integer.
123 typedef signed char SilcInt8;
126 /****d* silcutil/SILCTypes/SilcUInt16
130 * typedef unsigned short SilcUInt16;
134 * 16-bit unsigned integer. Guaranteed to be 16-bits.
138 typedef unsigned short SilcUInt16;
141 /****d* silcutil/SILCTypes/SilcInt16
145 * typedef signed short SilcInt16;
149 * 16-bit signed integer. Guaranteed to be 16-bits.
153 typedef signed short SilcInt16;
156 /****d* silcutil/SILCTypes/SilcUInt32
160 * typedef unsigned long SilcUInt32;
164 * 32-bit unsigned integer. Guaranteed to be 32-bits.
168 #if SILC_SIZEOF_LONG == 4
169 typedef unsigned long SilcUInt32;
170 typedef signed long SilcInt32;
172 #if SILC_SIZEOF_INT == 4
173 typedef unsigned int SilcUInt32;
174 typedef signed int SilcInt32;
176 #if SILC_SIZEOF_LONG_LONG >= 4
178 typedef unsigned long long SilcUInt32;
179 typedef signed long long SilcInt32;
186 /****d* silcutil/SILCTypes/SilcInt32
190 * typedef signed long SilcInt32;
194 * 32-bit signed integer. Guaranteed to be 32-bits.
198 /****d* silcutil/SILCTypes/SilcUInt64
202 * typedef unsigned long long SilcUInt64;
206 * 64-bit unsigned integer. Guaranteed to be 64-bits on systems that
211 #if SILC_SIZEOF_LONG >= 8
212 typedef unsigned long SilcUInt64;
213 typedef signed long SilcInt64;
215 #if SILC_SIZEOF_LONG_LONG >= 8
217 typedef unsigned long long SilcUInt64;
218 typedef signed long long SilcInt64;
220 typedef unsigned __int64 SilcUInt64;
221 typedef signed __int64 SilcInt64;
224 typedef SilcUInt32 SilcUInt64;
225 typedef SilcInt32 SilcInt64;
230 /****d* silcutil/SILCTypes/SilcInt64
234 * typedef signed long long SilcInt64;
238 * 64-bit signed integer. Guaranteed to be 64-bits on systems that
243 #if SILC_SIZEOF_VOID_P < 4
244 typedef SilcUInt32 * void *;
247 /****d* silcutil/SILCTypes/SilcSocket
255 * Platform specific socket. On POSIX compliant systems this is simply
256 * an integer, representing the socket. On other systems it is platform
257 * specific socket context. Access it only through routines that can
258 * handle SilcSocket types, unless you know what you are doing.
262 #if defined(SILC_UNIX)
263 typedef int SilcSocket;
264 #elif defined(SILC_WIN32)
265 typedef SOCKET SilcSocket;
266 #elif defined(SILC_SYMBIAN)
267 typedef void * SilcSocket;
272 /****d* silcutil/SILCTypes/SilcParam
276 * typedef SilcUInt32 SilcParam;
280 * A generic parameters that describe the type of an parameter or argument.
281 * They can be used to describe function arguments, buffer encoding format,
286 typedef SilcUInt32 SilcParam;
288 #define SILC_PARAM_SINT8 1 /* SilcInt8 */
289 #define SILC_PARAM_UINT8 2 /* SilcUInt8 */
290 #define SILC_PARAM_SINT16 3 /* SilcInt16 */
291 #define SILC_PARAM_UINT16 4 /* SilcUInt16 */
292 #define SILC_PARAM_SINT32 5 /* SilcInt32 */
293 #define SILC_PARAM_UINT32 6 /* SilcUInt32 */
294 #define SILC_PARAM_SINT64 7 /* SilcInt64 */
295 #define SILC_PARAM_UINT64 8 /* SilcUInt64 */
296 #define SILC_PARAM_SICHAR 9 /* signed char * */
297 #define SILC_PARAM_UICHAR 10 /* unsigned char * */
298 #define SILC_PARAM_BUFFER 11 /* SilcBuffer */
299 #define SILC_PARAM_PTR 12 /* void * */
300 #define SILC_PARAM_END 0xfeeefff1 /* End of parameters */
303 /* Internal parameter types, not publicly documented */
304 #define SILC_PARAM_UI8_STRING 100 /* String (max len 8-bits) */
305 #define SILC_PARAM_UI16_STRING 101 /* String (max len 16-bits) */
306 #define SILC_PARAM_UI32_STRING 102 /* String (max len 32-bits) */
307 #define SILC_PARAM_UI8_NSTRING 103 /* String (max len 8-bits) */
308 #define SILC_PARAM_UI16_NSTRING 104 /* String (max len 16-bits) */
309 #define SILC_PARAM_UI32_NSTRING 105 /* String (max len 32-bits) */
310 #define SILC_PARAM_OFFSET 106
311 #define SILC_PARAM_ADVANCE 107
312 #define SILC_PARAM_FUNC 108
313 #define SILC_PARAM_ALLOC 0x00010000 /* Allocate, bitmask */
317 #if (defined(SILC_I486) || defined(SILC_X86_64)) && defined(__GNUC__)
318 #define SILC_GET_WORD(cp) \
320 SilcUInt32 _result_; \
321 asm volatile ("movl (%1), %0; bswapl %0" \
322 : "=q" (_result_) : "q" (cp)); \
326 #define SILC_GET_WORD(cp) ((SilcUInt32)(SilcUInt8)(cp)[0]) << 24 \
327 | ((SilcUInt32)(SilcUInt8)(cp)[1] << 16) \
328 | ((SilcUInt32)(SilcUInt8)(cp)[2] << 8) \
329 | ((SilcUInt32)(SilcUInt8)(cp)[3])
330 #endif /* (SILC_I486 || SILC_X86_64) && __GNUC__ */
332 /****d* silcutil/SILCTypes/SILC_GET16_MSB
336 * #define SILC_GET16_MSB(dest, src)
340 * Return two 8-bit bytes, most significant bytes first.
344 #if (defined(SILC_I386) || defined(SILC_X86_64)) && defined(__GNUC__)
345 #define SILC_GET16_MSB(l, cp) \
346 asm volatile ("movw (%1), %w0; rolw $8, %w0" \
347 : "=q" (l) : "q" (cp) : "memory", "cc");
349 #define SILC_GET16_MSB(l, cp) \
351 (l) = ((SilcUInt32)(SilcUInt8)(cp)[0] << 8) \
352 | ((SilcUInt32)(SilcUInt8)(cp)[1]); \
354 #endif /* (SILC_I386 || SILC_X86_64) && __GNUC__ */
357 /****d* silcutil/SILCTypes/SILC_GET32_MSB
361 * #define SILC_GET32_MSB(dest, src)
365 * Return four 8-bit bytes, most significant bytes first.
369 #if (defined(SILC_I486) || defined(SILC_X86_64)) && defined(__GNUC__)
370 #define SILC_GET32_MSB(l, cp) \
371 asm volatile ("movl (%1), %0; bswapl %0" \
372 : "=q" (l) : "q" (cp) : "memory", "cc");
374 #define SILC_GET32_MSB(l, cp) \
376 (l) = ((SilcUInt32)(SilcUInt8)(cp)[0]) << 24 \
377 | ((SilcUInt32)(SilcUInt8)(cp)[1] << 16) \
378 | ((SilcUInt32)(SilcUInt8)(cp)[2] << 8) \
379 | ((SilcUInt32)(SilcUInt8)(cp)[3]); \
381 #endif /* (SILC_I486 || SILC_X86_64) && __GNUC__ */
384 /* Same as upper but XOR the result always. Special purpose macro. */
385 #if (defined(SILC_I486) || defined(SILC_X86_64)) && defined(__GNUC__)
386 #define SILC_GET32_X_MSB(l, cp) \
388 register volatile SilcUInt32 _x_; \
389 asm volatile ("movl %1, %3; movl (%2), %0;\n\t" \
390 "bswapl %0; xorl %3, %0" \
391 : "=r" (l) : "0" (l), "r" (cp), "r" (_x_) \
395 #define SILC_GET32_X_MSB(l, cp) \
396 (l) ^= ((SilcUInt32)(SilcUInt8)(cp)[0]) << 24 \
397 | ((SilcUInt32)(SilcUInt8)(cp)[1] << 16) \
398 | ((SilcUInt32)(SilcUInt8)(cp)[2] << 8) \
399 | ((SilcUInt32)(SilcUInt8)(cp)[3]);
400 #endif /* (SILC_I486 || SILC_X86_64) && __GNUC__ */
402 /****d* silcutil/SILCTypes/SILC_GET64_MSB
406 * #define SILC_GET64_MSB(dest, src)
410 * Return eight 8-bit bytes, most significant bytes first.
414 #if defined(SILC_X86_64) && defined(__GNUC__)
415 #define SILC_GET64_MSB(l, cp) \
416 asm volatile ("movq (%1), %0; bswapq %0" \
417 : "=r" (l) : "r" (cp) : "memory", "cc");
419 #define SILC_GET64_MSB(l, cp) \
421 (l) = ((((SilcUInt64)SILC_GET_WORD((cp))) << 32) | \
422 ((SilcUInt64)SILC_GET_WORD((cp) + 4))); \
424 #endif /* SILC_X86_64 && __GNUC__ */
427 /****d* silcutil/SILCTypes/SILC_GET16_LSB
431 * #define SILC_GET16_MSB(dest, src)
435 * Return two 8-bit bytes, least significant bytes first.
439 #if defined(SILC_I386) || defined(SILC_X86_64)
440 #define SILC_GET16_LSB(l, cp) (l) = (*(SilcUInt16 *)(cp))
442 #define SILC_GET16_LSB(l, cp) \
444 (l) = ((SilcUInt32)(SilcUInt8)(cp)[0]) \
445 | ((SilcUInt32)(SilcUInt8)(cp)[1] << 8); \
447 #endif /* SILC_I386 || SILC_X86_64 */
450 /****d* silcutil/SILCTypes/SILC_GET32_LSB
454 * #define SILC_GET32_LSB(dest, src)
458 * Return four 8-bit bytes, least significant bytes first.
462 #if defined(SILC_I386) || defined(SILC_X86_64)
463 #define SILC_GET32_LSB(l, cp) (l) = (*(SilcUInt32 *)(cp))
465 #define SILC_GET32_LSB(l, cp) \
467 (l) = ((SilcUInt32)(SilcUInt8)(cp)[0]) \
468 | ((SilcUInt32)(SilcUInt8)(cp)[1] << 8) \
469 | ((SilcUInt32)(SilcUInt8)(cp)[2] << 16) \
470 | ((SilcUInt32)(SilcUInt8)(cp)[3] << 24); \
472 #endif /* SILC_I386 || SILC_X86_64 */
475 /* Same as upper but XOR the result always. Special purpose macro. */
476 #if defined(SILC_I386) || defined(SILC_X86_64)
477 #define SILC_GET32_X_LSB(l, cp) (l) ^= (*(SilcUInt32 *)(cp))
479 #define SILC_GET32_X_LSB(l, cp) \
480 (l) ^= ((SilcUInt32)(SilcUInt8)(cp)[0]) \
481 | ((SilcUInt32)(SilcUInt8)(cp)[1] << 8) \
482 | ((SilcUInt32)(SilcUInt8)(cp)[2] << 16) \
483 | ((SilcUInt32)(SilcUInt8)(cp)[3] << 24)
484 #endif /* SILC_I386 || SILC_X86_64 */
486 /****d* silcutil/SILCTypes/SILC_PUT16_MSB
490 * #define SILC_PUT16_MSB(dest, src)
494 * Put two 8-bit bytes, most significant bytes first.
498 #if (defined(SILC_I386) || defined(SILC_X86_64)) && defined(__GNUC__)
499 #define SILC_PUT16_MSB(l, cp) \
500 asm volatile ("rolw $8, %w1; movw %w1, (%0)" \
501 : : "q" (cp), "q" (l) : "memory", "cc");
503 #define SILC_PUT16_MSB(l, cp) \
505 (cp)[0] = (SilcUInt8)((l) >> 8); \
506 (cp)[1] = (SilcUInt8)(l); \
508 #endif /* (SILC_I386 || SILC_X86_64) && __GNUC__ */
511 /****d* silcutil/SILCTypes/SILC_PUT32_MSB
515 * #define SILC_PUT32_MSB(dest, src)
519 * Put four 8-bit bytes, most significant bytes first.
523 #if (defined(SILC_I486) || defined(SILC_X86_64)) && defined(__GNUC__)
524 #define SILC_PUT32_MSB(l, cp) \
525 asm volatile ("bswapl %1; movl %1, (%0); bswapl %1" \
526 : : "q" (cp), "q" (l) : "memory", "cc");
528 #define SILC_PUT32_MSB(l, cp) \
530 (cp)[0] = (SilcUInt8)((l) >> 24); \
531 (cp)[1] = (SilcUInt8)((l) >> 16); \
532 (cp)[2] = (SilcUInt8)((l) >> 8); \
533 (cp)[3] = (SilcUInt8)(l); \
535 #endif /* (SILC_I486 || SILC_X86_64) && __GNUC__ */
538 /****d* silcutil/SILCTypes/SILC_PUT64_MSB
542 * #define SILC_PUT64_MSB(dest, src)
546 * Put eight 8-bit bytes, most significant bytes first.
550 #if defined(SILC_X86_64) && defined(__GNUC__)
551 #define SILC_PUT64_MSB(l, cp) \
552 asm volatile ("bswapq %1; movq %1, (%0); bswapq %1" \
553 : : "r" (cp), "r" (l) : "memory", "cc");
555 #define SILC_PUT64_MSB(l, cp) \
557 SILC_PUT32_MSB((SilcUInt32)((SilcUInt64)(l) >> 32), (cp)); \
558 SILC_PUT32_MSB((SilcUInt32)(l), (cp) + 4); \
560 #endif /* SILC_X86_64 && __GNUC__ */
563 /****d* silcutil/SILCTypes/SILC_PUT16_LSB
567 * #define SILC_PUT16_LSB(dest, src)
571 * Put two 8-bit bytes, least significant bytes first.
575 #if defined(SILC_I386) || defined(SILC_X86_64)
576 #define SILC_PUT16_LSB(l, cp) (*(SilcUInt16 *)(cp)) = (l)
578 #define SILC_PUT16_LSB(l, cp) \
580 (cp)[0] = (SilcUInt8)(l); \
581 (cp)[1] = (SilcUInt8)((l) >> 8); \
583 #endif /* SILC_I386 || SILC_X86_64 */
586 /****d* silcutil/SILCTypes/SILC_PUT32_LSB
590 * #define SILC_PUT32_LSB(dest, src)
594 * Put four 8-bit bytes, least significant bytes first.
598 #if defined(SILC_I386) || defined(SILC_X86_64)
599 #define SILC_PUT32_LSB(l, cp) (*(SilcUInt32 *)(cp)) = (l)
601 #define SILC_PUT32_LSB(l, cp) \
603 (cp)[0] = (SilcUInt8)(l); \
604 (cp)[1] = (SilcUInt8)((l) >> 8); \
605 (cp)[2] = (SilcUInt8)((l) >> 16); \
606 (cp)[3] = (SilcUInt8)((l) >> 24); \
608 #endif /* SILC_I386 || SILC_X86_64 */
611 /****d* silcutil/SILCTypes/SILC_SWAB_16
615 * #define SILC_SWAB_16(integer)
619 * Swabs 16-bit unsigned integer byte order. Returns the new value.
623 #if (defined(SILC_I386) || defined(SILC_X86_64)) && defined(__GNUC__)
624 #define SILC_SWAB_16(l) \
626 SilcUInt16 _result_; \
627 asm volatile ("movw %w1, %w0; rolw $8, %w0" \
628 : "=q" (_result_) : "q" (l)); \
632 #define SILC_SWAB_16(l) \
633 ((SilcUInt16)(((SilcUInt16)(l) & (SilcUInt16)0x00FFU) << 8) | \
634 (((SilcUInt16)(l) & (SilcUInt16)0xFF00U) >> 8))
635 #endif /* (SILC_I386 || SILC_X86_64) && __GNUC__ */
638 /****d* silcutil/SILCTypes/SILC_SWAB_32
642 * #define SILC_SWAB_32(integer)
646 * Swabs 32-bit unsigned integer byte order. Returns the new value.
650 #if (defined(SILC_I486) || defined(SILC_X86_64)) && defined(__GNUC__)
651 #define SILC_SWAB_32(l) \
653 SilcUInt32 _result_; \
654 asm volatile ("movl %1, %0; bswapl %0" \
655 : "=q" (_result_) : "q" (l)); \
659 #define SILC_SWAB_32(l) \
660 ((SilcUInt32)(((SilcUInt32)(l) & (SilcUInt32)0x000000FFUL) << 24) | \
661 (((SilcUInt32)(l) & (SilcUInt32)0x0000FF00UL) << 8) | \
662 (((SilcUInt32)(l) & (SilcUInt32)0x00FF0000UL) >> 8) | \
663 (((SilcUInt32)(l) & (SilcUInt32)0xFF000000UL) >> 24))
664 #endif /* (SILC_I486 || SILC_X86_64) && __GNUC__ */
667 /****d* silcutil/SILCTypes/SILC_PTR_TO_32
671 * #define SILC_PTR_TO_32(ptr)
675 * Type casts a pointer's value into a 32-bit integer. Use this to
676 * avoid compiler warnings when type casting pointers to integers
681 #if SILC_SIZEOF_VOID_P < 8
682 #define SILC_PTR_TO_32(_ptr__) ((SilcUInt32)(_ptr__))
684 #define SILC_PTR_TO_32(_ptr__) \
685 ((SilcUInt32)((SilcUInt64)(_ptr__) & (SilcUInt32)0xFFFFFFFFUL))
689 /****d* silcutil/SILCTypes/SILC_PTR_TO_64
693 * #define SILC_PTR_TO_64(ptr)
697 * Type casts a pointer's value into a 64-bit integer. Use this to
698 * avoid compiler warnings when type casting pointers to integers
703 #if SILC_SIZEOF_VOID_P < 8
704 #define SILC_PTR_TO_64(_ptr__) ((SilcUInt64)((SilcUInt32)(_ptr__)))
706 #define SILC_PTR_TO_64(_ptr__) ((SilcUInt64)((SilcUInt64)(_ptr__)))
710 /****d* silcutil/SILCTypes/SILC_32_TO_PTR
714 * #define SILC_32_TO_PTR(ptr)
718 * Type casts a 32-bit integer value into a pointer. Use this to
719 * avoid compiler warnings when type casting integers to pointers of
724 #if SILC_SIZEOF_VOID_P < 8
725 #define SILC_32_TO_PTR(_ival__) ((void *)((SilcUInt32)(_ival__)))
727 #define SILC_32_TO_PTR(_ival__) ((void *)((SilcUInt64)(_ival__)))
731 /****d* silcutil/SILCTypes/SILC_64_TO_PTR
735 * #define SILC_64_TO_PTR(ptr)
739 * Type casts a 64-bit integer value into a pointer. Use this to
740 * avoid compiler warnings when type casting integers to pointers of
745 #if SILC_SIZEOF_VOID_P < 8
746 #define SILC_64_TO_PTR(_ival__) \
747 ((void *)((SilcUInt32)((SilcUInt64)(_ival__) & (SilcUInt32)0xFFFFFFFFUL)))
749 #define SILC_64_TO_PTR(_ival__) ((void *)((SilcUInt64)(_ival__)))
753 /****d* silcutil/SILCTypes/silc_rol
757 * static inline SilcUInt32 silc_rol(SilcUInt32 val, int num);
761 * Rotate 32-bit integer's bits to left `num' times. Bits pushed to the
762 * left will appear from the right side of the integer, thus rotating.
763 * Returns the rotated value.
766 static inline SilcUInt32 silc_rol(SilcUInt32 val, int num)
768 #if (defined(SILC_I386) || defined(SILC_X86_64)) && defined(__GNUC__)
769 asm volatile ("roll %%cl, %0"
770 : "=q" (val) : "0" (val), "c" (num));
773 return ((val << (SilcUInt32)num) | (val >> (32 - (SilcUInt32)num)));
774 #endif /* (SILC_I486 || SILC_X86_64) && __GNUC__ */
777 /****d* silcutil/SILCTypes/silc_ror
781 * static inline SilcUInt32 silc_ror(SilcUInt32 val, int num);
785 * Rotate 32-bit integer's bits to right `num' times. Bits pushed to the
786 * right will appear from the left side of the integer, thus rotating.
787 * Returns the rotated value.
790 static inline SilcUInt32 silc_ror(SilcUInt32 val, int num)
792 #if (defined(SILC_I386) || defined(SILC_X86_64)) && defined(__GNUC__)
793 asm volatile ("rorl %%cl, %0"
794 : "=q" (val) : "0" (val), "c" (num));
797 return ((val >> (SilcUInt32)num) | (val << (32 - (SilcUInt32)num)));
798 #endif /* (SILC_I486 || SILC_X86_64) && __GNUC__ */
801 /****d* silcutil/SILCTypes/silc_rol64
805 * static inline SilcUInt64 silc_rol64(SilcUInt64 val, int num);
809 * Rotate 64-bit integer's bits to left `num' times. Bits pushed to the
810 * left will appear from the right side of the integer, thus rotating.
811 * Returns the rotated value.
814 static inline SilcUInt64 silc_rol64(SilcUInt64 val, int num)
816 #if defined(SILC_X86_64) && defined(__GNUC__)
817 asm volatile ("rolq %%cl, %0"
818 : "=q" (val) : "0" (val), "c" (num));
821 return ((val << (SilcUInt64)num) | (val >> (64 - (SilcUInt64)num)));
822 #endif /* SILC_X86_64 && __GNUC__ */
825 /****d* silcutil/SILCTypes/silc_ror64
829 * static inline SilcUInt64 silc_ror64(SilcUInt64 val, int num);
833 * Rotate 64-bit integer's bits to right `num' times. Bits pushed to the
834 * right will appear from the left side of the integer, thus rotating.
835 * Returns the rotated value.
838 static inline SilcUInt64 silc_ror64(SilcUInt64 val, int num)
840 #if defined(SILC_X86_64) && defined(__GNUC__)
841 asm volatile ("rorq %%cl, %0"
842 : "=q" (val) : "0" (val), "c" (num));
845 return ((val >> (SilcUInt64)num) | (val << (64 - (SilcUInt64)num)));
846 #endif /* SILC_X86_64 && __GNUC__ */
849 /****d* silcutil/SILCTypes/silc_offsetof
853 * #define silc_offsetof(TYPE, MEMBER)
857 * offsetof() macro replacement. Use this instead of offsetof().
860 #define silc_offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
862 /****d* silcutil/SILCTypes/silc_attribute
866 * #define silc_attribute(attrlist)
870 * Compiler attributes. If compiler doesn't support attributes this macro
871 * doesn't do anything. Currently this works only with GCC compiler.
872 * See GCC documentation for specified attributes.
876 * int printf(const char *fmt, ...) silc_attribute((format(printf, 1, 2)));
879 #if defined(__GNUC__)
880 #define silc_attribute(attrlist) __attribute__(attrlist)
882 #define silc_attribute(attrlist)
883 #endif /* __GNUC__ */
885 /****d* silcutil/SILCTypes/silc_likely
889 * #define silc_likely(expression)
893 * Branch prediction macro. It specifies that it is likely that the branch
894 * where silc_likely is applied is taken. Compiler will optimize the
895 * code based on this prediction. Never use this before you have profiled
900 /****d* silcutil/SILCTypes/silc_unlikely
904 * #define silc_unlikely(expression)
908 * Branch prediction macro. It specifies that it is unlikely that the
909 * branch where silc_unlikely is applied is taken. Compiler will optimize
910 * the code based on this prediction. Never use this before you have
911 * profiled the code first.
915 #define silc_likely(expr) __builtin_expect(!!(expr), 1)
916 #define silc_unlikely(expr) __builtin_expect(!!(expr), 0)
918 #define silc_likely(expr) (expr)
919 #define silc_unlikely(expr) (expr)
920 #endif /* __GNUC__ >= 3 */
922 /* Prefetch operations. Use these to prefetch data to CPU cache before
923 reading or writing if you think that the data will be needed soon after
926 /****d* silcutil/SILCTypes/silc_prefetch
930 * static inline void silc_prefetch(void *addr, int rw, int locality);
934 * Simple prefetch. Loads memory from specified address to CPU cache.
935 * The amount of data loaded is CPU dependant (cache line length). The
936 * `rw' argument defines the reason for prefetch: 0=read, 1=write. The
937 * `locality' argument defines the locality of the prefetch, 0=non-temporal
938 * (non-temporal cache, cache closest to CPU, data will not stay long in
939 * the cache), 1=temporal (L2+ cache), 2=temporal (L2, L3+ cache),
940 * 3=temporal (fetch to all caches, data stays longer time in cache).
944 * This produces only a hint for CPU. CPU doesn't have to actually
945 * prefetch the data. Use silc_prefetch_block to ensure CPU always
950 static inline silc_attribute((always_inline))
951 void silc_prefetch(void *addr, int rw, int locality)
954 __builtin_prefetch(addr, rw, locality);
955 #endif /* __GNUC__ */
958 /****d* silcutil/SILCTypes/silc_prefetch_block
962 * static inline void silc_prefetch_block(void *addr,
963 * int prefetch_length,
964 * const int cache_line_length)
968 * Enforced block prefetch. This function loads the specified amount
969 * `prefetch_length' of memory from the specified address `addr' to CPU
970 * cache with each loaded cache line being the size of `cache_line_length'.
971 * If you don't know the cache line size use 64 bytes. Note that, the
972 * `cache_line_length' is a const int. In this context this mean its
973 * value must not come from a variable but must be a constant (the code
974 * won't compile if it comes from a variable).
976 * The `prefetch_length' must be multiple of twice of the
977 * `cache_line_length' or 128 if you don't know the cache line size, hence
978 * the minimum length for `prefetch_length' is 128 bytes when the
979 * `cache_line_length' is 64 bytes. Shorter cache line length (32 bytes)
982 * You should use the correct `cache_line_length' value for your CPU or
983 * the value of the CPU for which you want to optimize your code. Intel
984 * CPUs usually have cache size of 32 or 64 bytes. The most optimal
985 * prefetch is achieved if the `cache_line_length' is the actual CPU cache
986 * line size. Always do performance testing with and without prefetching
987 * to make sure the prefetch actually helps. If used improperly, it may
988 * slow down your program.
990 * The difference to silc_prefetch is that this function always performs
991 * the prefetch and has the ability to prefetch more than one cache line
992 * worth of memory, whereas silc_prefetch can prefetch only one cache line
993 * and may not do the prefetch at all.
997 static inline silc_attribute((always_inline))
998 void silc_prefetch_block(void *addr,
1000 const int cache_line_length)
1003 SILC_ASSERT(cache_line_length >= 32);
1004 SILC_ASSERT(cache_line_length % 32 == 0);
1005 SILC_ASSERT(prefetch_length >= cache_line_length);
1006 SILC_ASSERT(prefetch_length % (cache_line_length * 2) == 0);
1009 #if SILC_SIZEOF_VOID_P < 8
1010 #define SILC_PREFETCH_UINT SilcUInt32
1012 #define SILC_PREFETCH_UINT SilcUInt64
1013 #endif /* SILC_SIZEOF_VOID_P < 8 */
1015 #if defined(__GNUC__) && (defined(SILC_I386) || defined(SILC_X86_64))
1017 /* Assembler implementation.
1019 The idea here is to simply enforce the CPU to load the requested amount
1020 of bytes to cache. We simply mov data from the memory to a register.
1021 Each mov will load a full cache line worth of data from the memory.
1023 We expect the `cache_line_length' to be the actual cache line size.
1024 It doesn't matter if it is. If it is smaller the prefetch is a bit
1025 slower as there is redundancy. If it is larger we skip some of the
1026 data and don't prefetch everything.
1028 The loop is unrolled to handle two mov's at once, this why we expect
1029 the `prefetch_length' to be multiple of twice the length of
1030 `cache_line_length`. We also mov the data from end to beginning instead
1031 of from the beginning to assure CPU doesn't prefetch the data before
1032 we actually want to do it.
1034 This technique is described by AMD in:
1035 http://cdrom.amd.com/devconn/events/AMD_block_prefetch_paper.pdf */
1038 SILC_PREFETCH_UINT temp;
1040 #define SILC_PREFETCH_ASM(ip, rp) \
1041 asm volatile ("1: \n\t" \
1042 "mov" ip " -%c4(%2, %" rp "3), %0 \n\t" \
1043 "mov" ip " -%c5(%2, %" rp "3), %0 \n\t" \
1044 "sub" ip " %5, %" rp "3 \n\t" \
1046 : "=&r" (temp), "=r" (prefetch_length) \
1047 : "r" (addr), "1" (prefetch_length), \
1048 "Z" (cache_line_length), \
1049 "Z" (cache_line_length * 2) \
1052 #if defined(SILC_I386)
1053 /* 32-bit prefetch */
1054 SILC_PREFETCH_ASM("l", "");
1056 /* 64-bit prefetch */
1057 SILC_PREFETCH_ASM("q", "q");
1058 #endif /* SILC_I386 */
1062 /* C implementation. Yes, you can do it in C too. In fact, we'll try to
1063 make the compiler generate nearly identical code to the above assembler
1064 code. Note that, the memory access must be volatile, otherwise the
1065 compiler will optimize them away because the temp variable isn't actually
1066 used for anything. This should be as fast as the assembler code above,
1067 unless the compiler decides to start meddling with it (don't use
1068 -funroll-loops with this code). */
1071 register unsigned char *a = addr;
1072 register int len = prefetch_length;
1073 register SILC_PREFETCH_UINT temp;
1076 temp = *(SILC_PREFETCH_UINT volatile *)
1077 (a + (len - cache_line_length));
1078 temp = *(SILC_PREFETCH_UINT volatile *)
1079 (a + (len - (cache_line_length * 2)));
1080 len -= (cache_line_length * 2);
1083 #endif /* __GNUC__ */
1084 #undef SILC_PREFETCH_UINT
1085 #undef SILC_PREFETCH_ASM
1088 #endif /* SILCTYPES_H */