+/****f* silcutil/SilcAtomicAPI/silc_atomic_add_int32
+ *
+ * SYNOPSIS
+ *
+ * static inline
+ * SilcUInt32 silc_atomic_add_int32(SilcAtomic32 *atomic, SilcInt32 value);
+ *
+ * DESCRIPTION
+ *
+ * Atomically adds `value' to 32-bit integer. Returns the value after
+ * addition.
+ *
+ ***/
+
+/****f* silcutil/SilcAtomicAPI/silc_atomic_add_int16
+ *
+ * SYNOPSIS
+ *
+ * static inline
+ * SilcUInt16 silc_atomic_add_int16(SilcAtomic16 *atomic, SilcInt16 value);
+ *
+ * DESCRIPTION
+ *
+ * Atomically adds `value' to 16-bit integer. Returns the value after
+ * addition.
+ *
+ ***/
+
+/****f* silcutil/SilcAtomicAPI/silc_atomic_add_int8
+ *
+ * SYNOPSIS
+ *
+ * static inline
+ * SilcUInt8 silc_atomic_add_int8(SilcAtomic8 *atomic, SilcInt8 value);
+ *
+ * DESCRIPTION
+ *
+ * Atomically adds `value' to 8-bit integer. Returns the value after
+ * addition.
+ *
+ ***/
+
+#define SILC_ATOMIC_ADD_INT_F(bits) \
+static inline \
+SilcUInt##bits silc_atomic_add_int##bits(SilcAtomic##bits *atomic, \
+ SilcInt##bits value)
+
+#if !defined(SILC_THREADS)
+#define SILC_ATOMIC_ADD_INT(bits, bp) \
+SILC_ATOMIC_ADD_INT_F(bits) \
+{ \
+ SilcUInt##bits ret; \
+ /* No atomic operations */ \
+ ret = atomic->value; \
+ atomic->value += value; \
+ return ret + value; \
+}
+
+#elif defined(SILC_WIN32)
+#define SILC_ATOMIC_ADD_INT(bits, bp) \
+SILC_ATOMIC_ADD_INT_F(bits) \
+{ \
+ SilcUInt##bits ret; \
+ LONG val = value; \
+ /* Windows */ \
+ ret = InterlockedExchangeAdd(&atomic->value, val); \
+ return ret + value; \
+}
+
+#elif defined(__GNUC__) && (defined(SILC_I486) || defined(SILC_X86_64))
+#define SILC_ATOMIC_ADD_INT(bits, bp) \
+SILC_ATOMIC_ADD_INT_F(bits) \
+{ \
+ SilcUInt##bits ret; \
+ /* GCC + i486 or x86_64 */ \
+ __asm __volatile(SILC_SMP_LOCK "xadd" bp " %0, %1" \
+ : "=r" (ret), "+m" (atomic->value) \
+ : "0" (value)); \
+ return ret + value; \
+}
+
+#elif defined(__GNUC__) && defined(SILC_IA64)
+#define SILC_ATOMIC_ADD_INT(bits, bp) \
+SILC_ATOMIC_ADD_INT_F(bits) \
+{ \
+ SilcUInt##bits ret; \
+ SilcInt32 val = value;
+ /* GCC + IA64 (GCC builtin atomic operations) */ \
+ ret = __sync_fetch_and_add(&atomic->value, val); \
+ return ret + value; \
+}
+
+#elif defined(__GNUC__) && defined(SILC_POWERPC)
+#define SILC_ATOMIC_ADD_INT(bits, bp) \
+SILC_ATOMIC_ADD_INT_F(bits) \
+{ \
+ SilcUInt32 ret; \
+ SilcInt32 val = value; \
+ /* GCC + PowerPC (code adapted from IBM's documentation) */ \
+ __asm __volatile("0: lwarx %0, 0, %2\n" \
+ " add %0, %1, %0\n" \
+ " stwcx. %0, 0, %2\n" \
+ " bne- 0b" \
+ : "=&r" (ret) \
+ : "r" (val), "r" (&atomic->value) \
+ : "cc"); \
+ return ret; \
+}
+
+#else /* SILC_ATOMIC_MUTEX */
+#define SILC_ATOMIC_ADD_INT(bits, bp) \
+SILC_ATOMIC_ADD_INT_F(bits) \
+{ \
+ SilcUInt##bits ret; \
+ /* Mutex */ \
+ silc_mutex_lock(atomic->lock); \
+ ret = atomic->value; \
+ atomic->value += value; \
+ silc_mutex_unlock(atomic->lock); \
+ return ret + value; \
+}
+#endif /* !SILC_THREADS */
+
+SILC_ATOMIC_ADD_INT(8, "b")
+SILC_ATOMIC_ADD_INT(16, "w")
+SILC_ATOMIC_ADD_INT(32, "l")
+
+/****f* silcutil/SilcAtomicAPI/silc_atomic_sub_int32
+ *
+ * SYNOPSIS
+ *
+ * static inline
+ * SilcUInt32 silc_atomic_sub_int32(SilcAtomic32 *atomic, SilcInt32 value);
+ *
+ * DESCRIPTION
+ *
+ * Atomically subtracts `value' from 32-bit integer. Returns the value
+ * after subtraction.
+ *
+ ***/
+
+/****f* silcutil/SilcAtomicAPI/silc_atomic_sub_int16
+ *
+ * SYNOPSIS
+ *
+ * static inline
+ * SilcUInt16 silc_atomic_sub_int16(SilcAtomic16 *atomic, SilcInt16 value);
+ *
+ * DESCRIPTION
+ *
+ * Atomically subtracts `value' from 16-bit integer. Returns the value
+ * after subtraction.
+ *
+ ***/
+
+/****f* silcutil/SilcAtomicAPI/silc_atomic_sub_int8
+ *
+ * SYNOPSIS
+ *
+ * static inline
+ * SilcUInt8 silc_atomic_sub_int8(SilcAtomic8 *atomic, SilcInt8 value);
+ *
+ * DESCRIPTION
+ *
+ * Atomically subtracts `value' from 8-bit integer. Returns the value
+ * after subtraction.
+ *
+ ***/
+
+#define silc_atomic_sub_int8(a, v) silc_atomic_add_int8(a, (-v))
+#define silc_atomic_sub_int16(a, v) silc_atomic_add_int16(a, (-v))
+#define silc_atomic_sub_int32(a, v) silc_atomic_add_int32(a, (-v))
+
+/****f* silcutil/SilcAtomicAPI/silc_atomic_inc32
+ *
+ * SYNOPSIS
+ *
+ * static inline
+ * void silc_atomic_inc32(SilcAtomic32 *atomic);
+ *
+ * DESCRIPTION
+ *
+ * Atomically increments 32-bit integer by one.
+ *
+ ***/
+
+/****f* silcutil/SilcAtomicAPI/silc_atomic_inc16
+ *
+ * SYNOPSIS
+ *
+ * static inline
+ * void silc_atomic_inc16(SilcAtomic16 *atomic);
+ *
+ * DESCRIPTION
+ *
+ * Atomically increments 16-bit integer by one.
+ *
+ ***/
+
+/****f* silcutil/SilcAtomicAPI/silc_atomic_inc8
+ *
+ * SYNOPSIS
+ *
+ * static inline
+ * void silc_atomic_inc8(SilcAtomic8 *atomic);
+ *
+ * DESCRIPTION
+ *
+ * Atomically increments 8-bit integer by one.
+ *
+ ***/
+
+#define SILC_ATOMIC_INC_F(bits) \
+static inline void silc_atomic_inc##bits(SilcAtomic##bits *atomic)
+
+#if !defined(SILC_THREADS)
+#define SILC_ATOMIC_INC(bits, bp) \
+SILC_ATOMIC_INC_F(bits) \
+{ \
+ /* No atomic operations */ \
+ ++atomic->value; \
+}
+
+#elif defined(SILC_WIN32)
+#define SILC_ATOMIC_INC(bits, bp) \
+SILC_ATOMIC_INC_F(bits) \
+{ \
+ /* Windows */ \
+ InterlockedIncrement((LONG)&atomic->value); \
+}
+
+#elif defined(__GNUC__) && (defined(SILC_I486) || defined(SILC_X86_64))
+#define SILC_ATOMIC_INC(bits, bp) \
+SILC_ATOMIC_INC_F(bits) \
+{ \
+ /* GCC + i486 or x86_64 */ \
+ __asm __volatile(SILC_SMP_LOCK "inc" bp " %0" \
+ : "+m" (atomic->value)); \
+}
+
+#elif defined(__GNUC__) && defined(SILC_IA64)
+#define SILC_ATOMIC_INC(bits, bp) \
+SILC_ATOMIC_INC_F(bits) \
+{ \
+ /* GCC + IA64 (GCC builtin atomic operations) */ \
+ __sync_fetch_and_add(&atomic->value, 1); \
+}
+
+#elif defined(__GNUC__) && defined(SILC_POWERPC)
+#define SILC_ATOMIC_INC(bits, bp) \
+SILC_ATOMIC_INC_F(bits) \
+{ \
+ SilcUInt32 ret; \
+ SilcInt32 val = 1; \
+ /* GCC + PowerPC (code adapted from IBM's documentation) */ \
+ __asm __volatile("0: lwarx %0, 0, %2\n" \
+ " add %0, %1, %0\n" \
+ " stwcx. %0, 0, %2\n" \
+ " bne- 0b" \
+ : "=&r" (ret) \
+ : "r" (val), "r" (&atomic->value) \
+ : "cc"); \
+}
+
+#else /* SILC_ATOMIC_MUTEX */
+#define SILC_ATOMIC_INC(bits, bp) \
+SILC_ATOMIC_INC_F(bits) \
+{ \
+ /* Mutex */ \
+ silc_mutex_lock(atomic->lock); \
+ ++atomic->value; \
+ silc_mutex_unlock(atomic->lock); \
+}
+#endif /* !SILC_THREADS */
+
+SILC_ATOMIC_INC(8, "b")
+SILC_ATOMIC_INC(16, "w")
+SILC_ATOMIC_INC(32, "l")
+
+/****f* silcutil/SilcAtomicAPI/silc_atomic_dec32
+ *
+ * SYNOPSIS
+ *
+ * static inline
+ * void silc_atomic_dec32(SilcAtomic32 *atomic);
+ *
+ * DESCRIPTION
+ *
+ * Atomically decrements 32-bit integer by one.
+ *
+ ***/
+
+/****f* silcutil/SilcAtomicAPI/silc_atomic_dec16
+ *
+ * SYNOPSIS
+ *
+ * static inline
+ * void silc_atomic_dec16(SilcAtomic16 *atomic);
+ *
+ * DESCRIPTION
+ *
+ * Atomically decrements 16-bit integer by one.
+ *
+ ***/
+
+/****f* silcutil/SilcAtomicAPI/silc_atomic_dec8
+ *
+ * SYNOPSIS
+ *
+ * static inline
+ * void silc_atomic_dec8(SilcAtomic8 *atomic);
+ *
+ * DESCRIPTION
+ *
+ * Atomically decrements 8-bit integer by one.
+ *
+ ***/
+
+#define SILC_ATOMIC_DEC_F(bits) \
+static inline void silc_atomic_dec##bits(SilcAtomic##bits *atomic)
+
+#if !defined(SILC_THREADS)
+#define SILC_ATOMIC_DEC(bits, bp) \
+SILC_ATOMIC_DEC_F(bits) \
+{ \
+ /* No atomic operations */ \
+ --atomic->value; \
+}
+
+#elif defined(SILC_WIN32)
+#define SILC_ATOMIC_DEC(bits, bp) \
+SILC_ATOMIC_DEC_F(bits) \
+{ \
+ /* Windows */ \
+ InterlockedDecrement((LONG)&atomic->value); \
+}
+
+#elif defined(__GNUC__) && (defined(SILC_I486) || defined(SILC_X86_64))
+#define SILC_ATOMIC_DEC(bits, bp) \
+SILC_ATOMIC_DEC_F(bits) \
+{ \
+ /* GCC + i486 or x86_64 */ \
+ __asm __volatile(SILC_SMP_LOCK "dec" bp " %0" \
+ : "+m" (atomic->value)); \
+}
+
+#elif defined(__GNUC__) && defined(SILC_IA64)
+#define SILC_ATOMIC_DEC(bits, bp) \
+SILC_ATOMIC_DEC_F(bits) \
+{ \
+ /* GCC + IA64 (GCC builtin atomic operations) */ \
+ __sync_fetch_and_sub(&atomic->value, 1); \
+}
+
+#elif defined(__GNUC__) && defined(SILC_POWERPC)
+#define SILC_ATOMIC_DEC(bits, bp) \
+SILC_ATOMIC_DEC_F(bits) \
+{ \
+ SilcUInt32 ret; \
+ SilcInt32 val = -1; \
+ /* GCC + PowerPC (code adapted from IBM's documentation) */ \
+ __asm __volatile("0: lwarx %0, 0, %2\n" \
+ " add %0, %1, %0\n" \
+ " stwcx. %0, 0, %2\n" \
+ " bne- 0b" \
+ : "=&r" (ret) \
+ : "r" (val), "r" (&atomic->value) \
+ : "cc"); \
+}
+
+#else /* SILC_ATOMIC_MUTEX */
+#define SILC_ATOMIC_DEC(bits, bp) \
+SILC_ATOMIC_DEC_F(bits) \
+{ \
+ /* Mutex */ \
+ silc_mutex_lock(atomic->lock); \
+ --atomic->value; \
+ silc_mutex_unlock(atomic->lock); \
+}
+#endif /* !SILC_THREADS */
+
+SILC_ATOMIC_DEC(8, "b")
+SILC_ATOMIC_DEC(16, "w")
+SILC_ATOMIC_DEC(32, "l")
+
+/****f* silcutil/SilcAtomicAPI/silc_atomic_cas32
+ *
+ * SYNOPSIS
+ *
+ * static inline
+ * SilcBool silc_atomic_cas32(SilcAtomic32 *atomic, SilcUInt32 old_val,
+ * SilcUInt32 new_val)
+ *
+ * DESCRIPTION
+ *
+ * Performs compare and swap (CAS). Atomically compares if the variable
+ * `atomic' has the value `old_val' and in that case swaps it with the
+ * value `new_val'. Returns TRUE if the old value was same and it was
+ * swapped and FALSE if it differed and was not swapped.
+ *
+ ***/
+
+/****f* silcutil/SilcAtomicAPI/silc_atomic_cas16
+ *
+ * SYNOPSIS
+ *
+ * static inline
+ * SilcBool silc_atomic_cas16(SilcAtomic16 *atomic, SilcUInt16 old_val,
+ * SilcUInt16 new_val)
+ *
+ * DESCRIPTION
+ *
+ * Performs compare and swap (CAS). Atomically compares if the variable
+ * `atomic' has the value `old_val' and in that case swaps it with the
+ * value `new_val'. Returns TRUE if the old value was same and it was
+ * swapped and FALSE if it differed and was not swapped.
+ *
+ ***/
+
+/****f* silcutil/SilcAtomicAPI/silc_atomic_cas8
+ *
+ * SYNOPSIS
+ *
+ * static inline
+ * SilcBool silc_atomic_cas8(SilcAtomic8 *atomic, SilcUInt8 old_val,
+ * SilcUInt8 new_val)
+ *
+ * DESCRIPTION
+ *
+ * Performs compare and swap (CAS). Atomically compares if the variable
+ * `atomic' has the value `old_val' and in that case swaps it with the
+ * value `new_val'. Returns TRUE if the old value was same and it was
+ * swapped and FALSE if it differed and was not swapped.
+ *
+ ***/
+
+#define SILC_ATOMIC_CAS_F(bits) \
+static inline SilcBool silc_atomic_cas##bits(SilcAtomic##bits *atomic, \
+ SilcInt##bits old_val, \
+ SilcInt##bits new_val)
+
+#if !defined(SILC_THREADS)
+#define SILC_ATOMIC_CAS(bits, bp) \
+SILC_ATOMIC_CAS_F(bits) \
+{ \
+ /* No atomic operations */ \
+ if (atomic->value == (SilcUInt##bits)old_val) { \
+ atomic->value = new_val; \
+ return TRUE; \
+ } \
+ return FALSE; \
+}
+
+#elif defined(SILC_WIN32)
+#define SILC_ATOMIC_CAS(bits, bp) \
+SILC_ATOMIC_CAS_F(bits) \
+{ \
+ /* Windows */ \
+ LONG o = old_val, n = new_val; \
+ return InterlockedCompareExchange(&atomic->value, n, o) == o; \
+}
+
+#elif defined(__GNUC__) && (defined(SILC_I486) || defined(SILC_X86_64))
+#define SILC_ATOMIC_CAS(bits, bp) \
+SILC_ATOMIC_CAS_F(bits) \
+{ \
+ /* GCC + i486 or x86_64 */ \
+ SilcUInt##bits ret; \
+ __asm __volatile(SILC_SMP_LOCK "cmpxchg" bp " %2, %1" \
+ : "=a" (ret), "=m" (atomic->value) \
+ : "r" (new_val), "m" (atomic->value), "0" (old_val)); \
+ return ret == (SilcUInt##bits)old_val; \
+}
+
+#elif defined(__GNUC__) && defined(SILC_IA64)
+#define SILC_ATOMIC_CAS(bits, bp) \
+SILC_ATOMIC_CAS_F(bits) \
+{ \
+ /* GCC + IA64 (GCC builtin atomic operations) */ \
+ SilcUInt32 o = old_val, n = new_val; \
+ return __sync_bool_compare_and_swap(&atomic->value, o, n); \
+}
+
+#elif defined(__GNUC__) && defined(SILC_POWERPC)
+#define SILC_ATOMIC_CAS(bits, bp) \
+SILC_ATOMIC_CAS_F(bits) \
+{ \
+ /* GCC + PowerPC */ \
+ /* XXX TODO */ \
+}
+
+#else /* SILC_ATOMIC_MUTEX */
+#define SILC_ATOMIC_CAS(bits, bp) \
+SILC_ATOMIC_CAS_F(bits) \
+{ \
+ /* Mutex */ \
+ silc_mutex_lock(atomic->lock); \
+ if (atomic->value == (SilcUInt##bits)old_val) { \
+ atomic->value = new_val; \
+ silc_mutex_unlock(atomic->lock); \
+ return TRUE; \
+ } \
+ silc_mutex_unlock(atomic->lock); \
+ return FALSE; \
+}
+#endif /* !SILC_THREADS */
+
+SILC_ATOMIC_CAS(8, "b")
+SILC_ATOMIC_CAS(16, "w")
+SILC_ATOMIC_CAS(32, "l")
+
+/****f* silcutil/SilcAtomicAPI/silc_atomic_cas_pointer
+ *
+ * SYNOPSIS
+ *
+ * static inline
+ * SilcBool silc_atomic_cas_pointer(SilcAtomicPointer *atomic,
+ * void *old_ptr, void *new_ptr);
+ *
+ * DESCRIPTION
+ *
+ * Performs compare and swap (CAS). Atomically compares if the variable
+ * `atomic' has the pointer `old_ptr' and in that case swaps it with the
+ * pointer `new_ptr'. Returns TRUE if the old pointer was same and it was
+ * swapped and FALSE if it differed and was not swapped.
+ *
+ ***/
+
+static inline
+SilcBool silc_atomic_cas_pointer(SilcAtomicPointer *atomic, void *old_val,
+ void *new_val)
+{
+#if !defined(SILC_THREADS)
+ /* No atomic operations */
+ if (atomic->value == old_val) {
+ atomic->value = new_val;
+ return TRUE;
+ }
+ return FALSE;
+
+#elif defined(SILC_WIN32)
+ /* Windows */
+ return InterlockedCompareExchangePointer(&atomic->value, new_val, old_val)
+ == old_val;
+
+#elif defined(__GNUC__) && defined(SILC_I486)
+ /* GCC + i486 */
+ void *ret;
+ __asm __volatile(SILC_SMP_LOCK "cmpxchgl %2, %1"
+ : "=a" (ret), "=m" (atomic->value)
+ : "c" (new_val), "m" (atomic->value), "0" (old_val));
+ return ret == old_val;
+
+#elif defined(__GNUC__) && defined(SILC_X86_64)
+ /* GCC + x86_64 */
+ void *ret;
+ __asm __volatile(SILC_SMP_LOCK "cmpxchgq %q2, %1"
+ : "=a" (ret), "=m" (atomic->value)
+ : "c" (new_val), "m" (atomic->value), "0" (old_val));
+ return ret == old_val;
+
+#elif defined(__GNUC__) && defined(SILC_IA64)
+ /* GCC + IA64 (GCC builtin atomic operations) */
+ return __sync_bool_compare_and_swap((long)&atomic->value, (long)old_val,
+ (long)new_val);
+
+#elif defined(__GNUC__) && defined(SILC_POWERPC)
+ /* GCC + PowerPC */
+ /* XXX TODO */
+
+#else
+ /* Mutex */
+ silc_mutex_lock(atomic->lock);
+ if (atomic->value == old_val) {
+ atomic->value = new_val;
+ silc_mutex_unlock(atomic->lock);
+ return TRUE;
+ }
+ silc_mutex_unlock(atomic->lock);
+ return FALSE;
+#endif
+}
+