Commit 94671527 authored by David Reid's avatar David Reid

Integrate c89atomic to replace the old atomics API.

This is an internal change and does not affect any public APIs. This
change is in preparation for future lock-free high level APIs.
parent 5cd8c3be
......@@ -7602,58 +7602,943 @@ ma_atomic_increment/decrement_*() takes a pointer to the variable being incremen
**************************************************************************************************************************************************************/
/* c89atomic.h begin */
#ifndef c89atomic_h
#define c89atomic_h
/* c89atomic.h end */
#if defined(__cplusplus)
extern "C" {
#endif
#if defined(__clang__)
#if defined(__has_builtin)
#if __has_builtin(__sync_swap)
#define MA_HAS_SYNC_SWAP
#endif
#endif
#elif defined(__GNUC__)
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC__ >= 7)
#define MA_HAS_GNUC_ATOMICS
#endif
typedef signed char c89atomic_int8;
typedef unsigned char c89atomic_uint8;
typedef signed short c89atomic_int16;
typedef unsigned short c89atomic_uint16;
typedef signed int c89atomic_int32;
typedef unsigned int c89atomic_uint32;
#if defined(_MSC_VER)
typedef signed __int64 c89atomic_int64;
typedef unsigned __int64 c89atomic_uint64;
#else
typedef unsigned long long c89atomic_int64;
typedef unsigned long long c89atomic_uint64;
#endif
#if defined(_WIN32) && !defined(__GNUC__) && !defined(__clang__)
#define ma_memory_barrier() MemoryBarrier()
#define ma_atomic_exchange_32(a, b) InterlockedExchange((LONG*)a, (LONG)b)
#define ma_atomic_exchange_64(a, b) InterlockedExchange64((LONGLONG*)a, (LONGLONG)b)
#define ma_atomic_increment_32(a) InterlockedIncrement((LONG*)a)
#define ma_atomic_decrement_32(a) InterlockedDecrement((LONG*)a)
#define ma_compare_and_swap_16(d, e, c) _InterlockedCompareExchange16((short*)d, (short)e, (short)c)
#define ma_compare_and_swap_32(d, e, c) _InterlockedCompareExchange((LONG*)d, (LONG)e, (LONG)c)
#define ma_compare_and_swap_64(d, e, c) _InterlockedCompareExchange64((LONGLONG*)d, (LONGLONG)e, (LONGLONG)c)
#define ma_compare_and_swap_ptr(d, e, c) _InterlockedCompareExchangePointer((void*volatile*)d, (void*)e, (void*)c)
typedef int c89atomic_memory_order;
typedef unsigned char c89atomic_bool;
typedef unsigned char c89atomic_flag;
/* Architecture Detection */
#if !defined(C89ATOMIC_64BIT) && !defined(C89ATOMIC_32BIT)
#ifdef _WIN32
#ifdef _WIN64
#define C89ATOMIC_64BIT
#else
#define ma_memory_barrier() __sync_synchronize()
#if defined(MA_HAS_SYNC_SWAP)
#define ma_atomic_exchange_32(a, b) __sync_swap(a, b)
#define ma_atomic_exchange_64(a, b) __sync_swap(a, b)
#elif defined(MA_HAS_GNUC_ATOMICS)
#define ma_atomic_exchange_32(a, b) (void)__atomic_exchange_n(a, b, __ATOMIC_ACQ_REL)
#define ma_atomic_exchange_64(a, b) (void)__atomic_exchange_n(a, b, __ATOMIC_ACQ_REL)
#define C89ATOMIC_32BIT
#endif
#endif
#endif
#if !defined(C89ATOMIC_64BIT) && !defined(C89ATOMIC_32BIT)
#ifdef __GNUC__
#ifdef __LP64__
#define C89ATOMIC_64BIT
#else
#define ma_atomic_exchange_32(a, b) __sync_synchronize(); (void)__sync_lock_test_and_set(a, b)
#define ma_atomic_exchange_64(a, b) __sync_synchronize(); (void)__sync_lock_test_and_set(a, b)
#define C89ATOMIC_32BIT
#endif
#endif
#define ma_atomic_increment_32(a) __sync_add_and_fetch(a, 1)
#define ma_atomic_decrement_32(a) __sync_sub_and_fetch(a, 1)
#define ma_compare_and_swap_16(d, e, c) __sync_val_compare_and_swap(d, c, e)
#define ma_compare_and_swap_32(d, e, c) __sync_val_compare_and_swap(d, c, e)
#define ma_compare_and_swap_64(d, e, c) __sync_val_compare_and_swap(d, c, e)
#define ma_compare_and_swap_ptr(d, e, c) __sync_val_compare_and_swap(d, c, e)
#endif
#ifdef MA_64BIT
#define ma_atomic_exchange_ptr ma_atomic_exchange_64
#if !defined(C89ATOMIC_64BIT) && !defined(C89ATOMIC_32BIT)
#include <stdint.h>
#if INTPTR_MAX == INT64_MAX
#define C89ATOMIC_64BIT
#else
#define C89ATOMIC_32BIT
#endif
#ifdef MA_32BIT
#define ma_atomic_exchange_ptr ma_atomic_exchange_32
#endif
#if defined(__x86_64__) || defined(_M_X64)
#define C89ATOMIC_X64
#elif defined(__i386) || defined(_M_IX86)
#define C89ATOMIC_X86
#elif defined(__arm__) || defined(_M_ARM)
#define C89ATOMIC_ARM
#endif
#ifdef _MSC_VER
#define C89ATOMIC_INLINE __forceinline
#elif defined(__GNUC__)
/*
I've had a bug report where GCC is emitting warnings about functions possibly not being inlineable. This warning happens when
the __attribute__((always_inline)) attribute is defined without an "inline" statement. I think therefore there must be some
case where "__inline__" is not always defined, thus the compiler emitting these warnings. When using -std=c89 or -ansi on the
command line, we cannot use the "inline" keyword and instead need to use "__inline__". In an attempt to work around this issue
I am using "__inline__" only when we're compiling in strict ANSI mode.
*/
#if defined(__STRICT_ANSI__)
#define C89ATOMIC_INLINE __inline__ __attribute__((always_inline))
#else
#define C89ATOMIC_INLINE inline __attribute__((always_inline))
#endif
#else
#define C89ATOMIC_INLINE
#endif
#if defined(_MSC_VER) /*&& !defined(__clang__)*/
/* Visual C++. */
#define c89atomic_memory_order_relaxed 0
#define c89atomic_memory_order_consume 1
#define c89atomic_memory_order_acquire 2
#define c89atomic_memory_order_release 3
#define c89atomic_memory_order_acq_rel 4
#define c89atomic_memory_order_seq_cst 5
/* Visual Studio 2003 and earlier have no support for sized atomic operations. We'll need to use inlined assembly for these compilers. */
#if _MSC_VER >= 1400 /* 1400 = Visual Studio 2005 */
/* New Visual C++. */
#include <intrin.h>
#define c89atomic_exchange_explicit_8( dst, src, order) (c89atomic_uint8 )_InterlockedExchange8 ((volatile char* )dst, (char )src)
#define c89atomic_exchange_explicit_16(dst, src, order) (c89atomic_uint16)_InterlockedExchange16((volatile short*)dst, (short)src)
#define c89atomic_exchange_explicit_32(dst, src, order) (c89atomic_uint32)_InterlockedExchange ((volatile long* )dst, (long )src)
#if defined(C89ATOMIC_64BIT)
#define c89atomic_exchange_explicit_64(dst, src, order) (c89atomic_uint64)_InterlockedExchange64((volatile long long*)dst, (long long)src)
#endif
#define c89atomic_fetch_add_explicit_8( dst, src, order) (c89atomic_uint8 )_InterlockedExchangeAdd8 ((volatile char* )dst, (char )src)
#define c89atomic_fetch_add_explicit_16(dst, src, order) (c89atomic_uint16)_InterlockedExchangeAdd16((volatile short*)dst, (short)src)
#define c89atomic_fetch_add_explicit_32(dst, src, order) (c89atomic_uint32)_InterlockedExchangeAdd ((volatile long* )dst, (long )src)
#if defined(C89ATOMIC_64BIT)
#define c89atomic_fetch_add_explicit_64(dst, src, order) (c89atomic_uint64)_InterlockedExchangeAdd64((volatile long long*)dst, (long long)src)
#endif
#define c89atomic_compare_and_swap_8( dst, expected, desired) (c89atomic_uint8 )_InterlockedCompareExchange8 ((volatile char* )dst, (char )desired, (char )expected)
#define c89atomic_compare_and_swap_16(dst, expected, desired) (c89atomic_uint16)_InterlockedCompareExchange16((volatile short* )dst, (short )desired, (short )expected)
#define c89atomic_compare_and_swap_32(dst, expected, desired) (c89atomic_uint32)_InterlockedCompareExchange ((volatile long* )dst, (long )desired, (long )expected)
#define c89atomic_compare_and_swap_64(dst, expected, desired) (c89atomic_uint64)_InterlockedCompareExchange64((volatile long long*)dst, (long long)desired, (long long)expected)
/* Can't use MemoryBarrier() for this as it require Windows headers which we want to avoid in the header. */
#if defined(C89ATOMIC_X64)
#define c89atomic_thread_fence(order) __faststorefence()
#else
static C89ATOMIC_INLINE void c89atomic_thread_fence(c89atomic_memory_order order)
{
volatile c89atomic_uint32 barrier = 0;
(void)order;
c89atomic_fetch_add_explicit_32(&barrier, 0, order);
}
#endif /* C89ATOMIC_X64 */
#else
/* Old Visual C++. */
#if defined(__i386) || defined(_M_IX86)
/* x86. Implemented via inlined assembly. */
/* thread_fence() */
static C89ATOMIC_INLINE void __stdcall c89atomic_thread_fence(int order)
{
volatile c89atomic_uint32 barrier;
__asm {
xchg barrier, eax
}
}
/* exchange() */
static C89ATOMIC_INLINE c89atomic_uint8 __stdcall c89atomic_exchange_explicit_8(volatile c89atomic_uint8* dst, c89atomic_uint8 src, int order)
{
(void)order;
__asm {
mov ecx, dst
mov al, src
lock xchg [ecx], al
}
}
static C89ATOMIC_INLINE c89atomic_uint16 __stdcall c89atomic_exchange_explicit_16(volatile c89atomic_uint16* dst, c89atomic_uint16 src, int order)
{
(void)order;
__asm {
mov ecx, dst
mov ax, src
lock xchg [ecx], ax
}
}
static C89ATOMIC_INLINE c89atomic_uint32 __stdcall c89atomic_exchange_explicit_32(volatile c89atomic_uint32* dst, c89atomic_uint32 src, int order)
{
(void)order;
__asm {
mov ecx, dst
mov eax, src
lock xchg [ecx], eax
}
}
/* fetch_add() */
static C89ATOMIC_INLINE c89atomic_uint8 __stdcall c89atomic_fetch_add_explicit_8(volatile c89atomic_uint8* dst, c89atomic_uint8 src, int order)
{
(void)order;
__asm {
mov ecx, dst
mov al, src
lock xadd [ecx], al
}
}
static C89ATOMIC_INLINE c89atomic_uint16 __stdcall c89atomic_fetch_add_explicit_16(volatile c89atomic_uint16* dst, c89atomic_uint16 src, int order)
{
(void)order;
__asm {
mov ecx, dst
mov ax, src
lock xadd [ecx], ax
}
}
static C89ATOMIC_INLINE c89atomic_uint32 __stdcall c89atomic_fetch_add_explicit_32(volatile c89atomic_uint32* dst, c89atomic_uint32 src, int order)
{
(void)order;
__asm {
mov ecx, dst
mov eax, src
lock xadd [ecx], eax
}
}
/* compare_and_swap() */
static C89ATOMIC_INLINE c89atomic_uint8 __stdcall c89atomic_compare_and_swap_8(volatile c89atomic_uint8* dst, c89atomic_uint8 expected, c89atomic_uint8 desired)
{
__asm {
mov ecx, dst
mov al, expected
mov dl, desired
lock cmpxchg [ecx], dl /* Writes to EAX which MSVC will treat as the return value. */
}
}
static C89ATOMIC_INLINE c89atomic_uint16 __stdcall c89atomic_compare_and_swap_16(volatile c89atomic_uint16* dst, c89atomic_uint16 expected, c89atomic_uint16 desired)
{
__asm {
mov ecx, dst
mov ax, expected
mov dx, desired
lock cmpxchg [ecx], dx /* Writes to EAX which MSVC will treat as the return value. */
}
}
static C89ATOMIC_INLINE c89atomic_uint32 __stdcall c89atomic_compare_and_swap_32(volatile c89atomic_uint32* dst, c89atomic_uint32 expected, c89atomic_uint32 desired)
{
__asm {
mov ecx, dst
mov eax, expected
mov edx, desired
lock cmpxchg [ecx], edx /* Writes to EAX which MSVC will treat as the return value. */
}
}
static C89ATOMIC_INLINE c89atomic_uint64 __stdcall c89atomic_compare_and_swap_64(volatile c89atomic_uint64* dst, c89atomic_uint64 expected, c89atomic_uint64 desired)
{
__asm {
mov esi, dst /* From Microsoft documentation: "... you don't need to preserve the EAX, EBX, ECX, EDX, ESI, or EDI registers." Choosing ESI since it's the next available one in their list. */
mov eax, dword ptr expected
mov edx, dword ptr expected + 4
mov ebx, dword ptr desired
mov ecx, dword ptr desired + 4
lock cmpxchg8b qword ptr [esi] /* Writes to EAX:EDX which MSVC will treat as the return value. */
}
}
#else
/* x64 or ARM. Should never get here because these are not valid targets for older versions of Visual Studio. */
error "Unsupported architecture."
#endif
#endif
/*
I'm not sure how to implement a compiler barrier for old MSVC so I'm just making it a thread_fence() and hopefull the compiler will see the volatile and not do
any reshuffling. If anybody has a better idea on this please let me know! Cannot use _ReadWriteBarrier() as it has been marked as deprecated.
*/
#define c89atomic_compiler_fence() c89atomic_thread_fence(c89atomic_memory_order_seq_cst)
/* I'm not sure how to implement this for MSVC. For now just using thread_fence(). */
#define c89atomic_signal_fence(order) c89atomic_thread_fence(order)
/* Atomic loads can be implemented in terms of a compare-and-swap. */
#define c89atomic_load_explicit_8( ptr, order) c89atomic_compare_and_swap_8 (ptr, 0, 0)
#define c89atomic_load_explicit_16(ptr, order) c89atomic_compare_and_swap_16(ptr, 0, 0)
#define c89atomic_load_explicit_32(ptr, order) c89atomic_compare_and_swap_32(ptr, 0, 0)
#define c89atomic_load_explicit_64(ptr, order) c89atomic_compare_and_swap_64(ptr, 0, 0)
/* atomic_store() is the same as atomic_exchange() but returns void. */
#define c89atomic_store_explicit_8( dst, src, order) (void)c89atomic_exchange_explicit_8 (dst, src, order)
#define c89atomic_store_explicit_16(dst, src, order) (void)c89atomic_exchange_explicit_16(dst, src, order)
#define c89atomic_store_explicit_32(dst, src, order) (void)c89atomic_exchange_explicit_32(dst, src, order)
#define c89atomic_store_explicit_64(dst, src, order) (void)c89atomic_exchange_explicit_64(dst, src, order)
/* Some 64-bit atomic operations are not supported by MSVC when compiling in 32-bit mode.*/
#if defined(C89ATOMIC_32BIT)
static C89ATOMIC_INLINE c89atomic_uint64 __stdcall c89atomic_exchange_explicit_64(volatile c89atomic_uint64* dst, c89atomic_uint64 src, int order)
{
volatile c89atomic_uint64 oldValue;
do {
oldValue = *dst;
} while (c89atomic_compare_and_swap_64(dst, oldValue, src) != oldValue);
(void)order;
return oldValue;
}
static C89ATOMIC_INLINE c89atomic_uint64 __stdcall c89atomic_fetch_add_explicit_64(volatile c89atomic_uint64* dst, c89atomic_uint64 src, int order)
{
volatile c89atomic_uint64 oldValue;
volatile c89atomic_uint64 newValue;
do {
oldValue = *dst;
newValue = oldValue + src;
} while (c89atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue);
(void)order;
return oldValue;
}
#endif
/* fetch_sub() */
static C89ATOMIC_INLINE c89atomic_uint8 __stdcall c89atomic_fetch_sub_explicit_8(volatile c89atomic_uint8* dst, c89atomic_uint8 src, int order)
{
volatile c89atomic_uint8 oldValue;
volatile c89atomic_uint8 newValue;
do {
oldValue = *dst;
newValue = oldValue - src;
} while (c89atomic_compare_and_swap_8(dst, oldValue, newValue) != oldValue);
(void)order;
return oldValue;
}
static C89ATOMIC_INLINE c89atomic_uint16 __stdcall c89atomic_fetch_sub_explicit_16(volatile c89atomic_uint16* dst, c89atomic_uint16 src, int order)
{
volatile c89atomic_uint16 oldValue;
volatile c89atomic_uint16 newValue;
do {
oldValue = *dst;
newValue = oldValue - src;
} while (c89atomic_compare_and_swap_16(dst, oldValue, newValue) != oldValue);
(void)order;
return oldValue;
}
static C89ATOMIC_INLINE c89atomic_uint32 __stdcall c89atomic_fetch_sub_explicit_32(volatile c89atomic_uint32* dst, c89atomic_uint32 src, int order)
{
volatile c89atomic_uint32 oldValue;
volatile c89atomic_uint32 newValue;
do {
oldValue = *dst;
newValue = oldValue - src;
} while (c89atomic_compare_and_swap_32(dst, oldValue, newValue) != oldValue);
(void)order;
return oldValue;
}
static C89ATOMIC_INLINE c89atomic_uint64 __stdcall c89atomic_fetch_sub_explicit_64(volatile c89atomic_uint64* dst, c89atomic_uint64 src, int order)
{
volatile c89atomic_uint64 oldValue;
volatile c89atomic_uint64 newValue;
do {
oldValue = *dst;
newValue = oldValue - src;
} while (c89atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue);
(void)order;
return oldValue;
}
/* fetch_and() */
static C89ATOMIC_INLINE c89atomic_uint8 __stdcall c89atomic_fetch_and_explicit_8(volatile c89atomic_uint8* dst, c89atomic_uint8 src, int order)
{
volatile c89atomic_uint8 oldValue;
volatile c89atomic_uint8 newValue;
do {
oldValue = *dst;
newValue = oldValue & src;
} while (c89atomic_compare_and_swap_8(dst, oldValue, newValue) != oldValue);
(void)order;
return oldValue;
}
static C89ATOMIC_INLINE c89atomic_uint16 __stdcall c89atomic_fetch_and_explicit_16(volatile c89atomic_uint16* dst, c89atomic_uint16 src, int order)
{
volatile c89atomic_uint16 oldValue;
volatile c89atomic_uint16 newValue;
do {
oldValue = *dst;
newValue = oldValue & src;
} while (c89atomic_compare_and_swap_16(dst, oldValue, newValue) != oldValue);
(void)order;
return oldValue;
}
static C89ATOMIC_INLINE c89atomic_uint32 __stdcall c89atomic_fetch_and_explicit_32(volatile c89atomic_uint32* dst, c89atomic_uint32 src, int order)
{
volatile c89atomic_uint32 oldValue;
volatile c89atomic_uint32 newValue;
do {
oldValue = *dst;
newValue = oldValue & src;
} while (c89atomic_compare_and_swap_32(dst, oldValue, newValue) != oldValue);
(void)order;
return oldValue;
}
static C89ATOMIC_INLINE c89atomic_uint64 __stdcall c89atomic_fetch_and_explicit_64(volatile c89atomic_uint64* dst, c89atomic_uint64 src, int order)
{
volatile c89atomic_uint64 oldValue;
volatile c89atomic_uint64 newValue;
do {
oldValue = *dst;
newValue = oldValue & src;
} while (c89atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue);
(void)order;
return oldValue;
}
/* fetch_xor() */
static C89ATOMIC_INLINE c89atomic_uint8 __stdcall c89atomic_fetch_xor_explicit_8(volatile c89atomic_uint8* dst, c89atomic_uint8 src, int order)
{
volatile c89atomic_uint8 oldValue;
volatile c89atomic_uint8 newValue;
do {
oldValue = *dst;
newValue = oldValue ^ src;
} while (c89atomic_compare_and_swap_8(dst, oldValue, newValue) != oldValue);
(void)order;
return oldValue;
}
static C89ATOMIC_INLINE c89atomic_uint16 __stdcall c89atomic_fetch_xor_explicit_16(volatile c89atomic_uint16* dst, c89atomic_uint16 src, int order)
{
volatile c89atomic_uint16 oldValue;
volatile c89atomic_uint16 newValue;
do {
oldValue = *dst;
newValue = oldValue ^ src;
} while (c89atomic_compare_and_swap_16(dst, oldValue, newValue) != oldValue);
(void)order;
return oldValue;
}
static C89ATOMIC_INLINE c89atomic_uint32 __stdcall c89atomic_fetch_xor_explicit_32(volatile c89atomic_uint32* dst, c89atomic_uint32 src, int order)
{
volatile c89atomic_uint32 oldValue;
volatile c89atomic_uint32 newValue;
do {
oldValue = *dst;
newValue = oldValue ^ src;
} while (c89atomic_compare_and_swap_32(dst, oldValue, newValue) != oldValue);
(void)order;
return oldValue;
}
static C89ATOMIC_INLINE c89atomic_uint64 __stdcall c89atomic_fetch_xor_explicit_64(volatile c89atomic_uint64* dst, c89atomic_uint64 src, int order)
{
volatile c89atomic_uint64 oldValue;
volatile c89atomic_uint64 newValue;
do {
oldValue = *dst;
newValue = oldValue ^ src;
} while (c89atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue);
(void)order;
return oldValue;
}
/* fetch_or() */
static C89ATOMIC_INLINE c89atomic_uint8 __stdcall c89atomic_fetch_or_explicit_8(volatile c89atomic_uint8* dst, c89atomic_uint8 src, int order)
{
volatile c89atomic_uint8 oldValue;
volatile c89atomic_uint8 newValue;
do {
oldValue = *dst;
newValue = oldValue | src;
} while (c89atomic_compare_and_swap_8(dst, oldValue, newValue) != oldValue);
(void)order;
return oldValue;
}
static C89ATOMIC_INLINE c89atomic_uint16 __stdcall c89atomic_fetch_or_explicit_16(volatile c89atomic_uint16* dst, c89atomic_uint16 src, int order)
{
volatile c89atomic_uint16 oldValue;
volatile c89atomic_uint16 newValue;
do {
oldValue = *dst;
newValue = oldValue | src;
} while (c89atomic_compare_and_swap_16(dst, oldValue, newValue) != oldValue);
(void)order;
return oldValue;
}
static C89ATOMIC_INLINE c89atomic_uint32 __stdcall c89atomic_fetch_or_explicit_32(volatile c89atomic_uint32* dst, c89atomic_uint32 src, int order)
{
volatile c89atomic_uint32 oldValue;
volatile c89atomic_uint32 newValue;
do {
oldValue = *dst;
newValue = oldValue | src;
} while (c89atomic_compare_and_swap_32(dst, oldValue, newValue) != oldValue);
(void)order;
return oldValue;
}
static C89ATOMIC_INLINE c89atomic_uint64 __stdcall c89atomic_fetch_or_explicit_64(volatile c89atomic_uint64* dst, c89atomic_uint64 src, int order)
{
volatile c89atomic_uint64 oldValue;
volatile c89atomic_uint64 newValue;
do {
oldValue = *dst;
newValue = oldValue | src;
} while (c89atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue);
(void)order;
return oldValue;
}
#define c89atomic_test_and_set_explicit_8( dst, order) c89atomic_exchange_explicit_8 (dst, 1, order)
#define c89atomic_test_and_set_explicit_16(dst, order) c89atomic_exchange_explicit_16(dst, 1, order)
#define c89atomic_test_and_set_explicit_32(dst, order) c89atomic_exchange_explicit_32(dst, 1, order)
#define c89atomic_test_and_set_explicit_64(dst, order) c89atomic_exchange_explicit_64(dst, 1, order)
#define c89atomic_clear_explicit_8( dst, order) c89atomic_store_explicit_8 (dst, 0, order)
#define c89atomic_clear_explicit_16(dst, order) c89atomic_store_explicit_16(dst, 0, order)
#define c89atomic_clear_explicit_32(dst, order) c89atomic_store_explicit_32(dst, 0, order)
#define c89atomic_clear_explicit_64(dst, order) c89atomic_store_explicit_64(dst, 0, order)
#define c89atomic_flag_test_and_set_explicit(ptr, order) (c89atomic_flag)c89atomic_test_and_set_explicit_8(ptr, order)
#define c89atomic_flag_clear_explicit(ptr, order) c89atomic_clear_explicit_8(ptr, order)
#elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC__ >= 7)))
/* Modern GCC atomic built-ins. */
#define C89ATOMIC_HAS_NATIVE_COMPARE_EXCHANGE
#define C89ATOMIC_HAS_NATIVE_IS_LOCK_FREE
#define c89atomic_memory_order_relaxed __ATOMIC_RELAXED
#define c89atomic_memory_order_consume __ATOMIC_CONSUME
#define c89atomic_memory_order_acquire __ATOMIC_ACQUIRE
#define c89atomic_memory_order_release __ATOMIC_RELEASE
#define c89atomic_memory_order_acq_rel __ATOMIC_ACQ_REL
#define c89atomic_memory_order_seq_cst __ATOMIC_SEQ_CST
#define c89atomic_compiler_fence() __asm__ __volatile__("":::"memory")
#define c89atomic_thread_fence(order) __atomic_thread_fence(order)
#define c89atomic_signal_fence(order) __atomic_signal_fence(order)
#define c89atomic_is_lock_free_8(ptr) __atomic_is_lock_free(1, ptr)
#define c89atomic_is_lock_free_16(ptr) __atomic_is_lock_free(2, ptr)
#define c89atomic_is_lock_free_32(ptr) __atomic_is_lock_free(4, ptr)
#define c89atomic_is_lock_free_64(ptr) __atomic_is_lock_free(8, ptr)
#define c89atomic_flag_test_and_set_explicit(dst, order) (c89atomic_flag)__atomic_test_and_set(dst, order)
#define c89atomic_flag_clear_explicit(dst, order) __atomic_clear(dst, order)
#define c89atomic_test_and_set_explicit_8( dst, order) __atomic_exchange_n(dst, 1, order)
#define c89atomic_test_and_set_explicit_16(dst, order) __atomic_exchange_n(dst, 1, order)
#define c89atomic_test_and_set_explicit_32(dst, order) __atomic_exchange_n(dst, 1, order)
#define c89atomic_test_and_set_explicit_64(dst, order) __atomic_exchange_n(dst, 1, order)
#define c89atomic_clear_explicit_8( dst, order) __atomic_store_n(dst, 0, order)
#define c89atomic_clear_explicit_16(dst, order) __atomic_store_n(dst, 0, order)
#define c89atomic_clear_explicit_32(dst, order) __atomic_store_n(dst, 0, order)
#define c89atomic_clear_explicit_64(dst, order) __atomic_store_n(dst, 0, order)
#define c89atomic_store_explicit_8( dst, src, order) __atomic_store_n(dst, src, order)
#define c89atomic_store_explicit_16(dst, src, order) __atomic_store_n(dst, src, order)
#define c89atomic_store_explicit_32(dst, src, order) __atomic_store_n(dst, src, order)
#define c89atomic_store_explicit_64(dst, src, order) __atomic_store_n(dst, src, order)
#define c89atomic_load_explicit_8( dst, order) __atomic_load_n(dst, order)
#define c89atomic_load_explicit_16(dst, order) __atomic_load_n(dst, order)
#define c89atomic_load_explicit_32(dst, order) __atomic_load_n(dst, order)
#define c89atomic_load_explicit_64(dst, order) __atomic_load_n(dst, order)
#define c89atomic_exchange_explicit_8( dst, src, order) __atomic_exchange_n(dst, src, order)
#define c89atomic_exchange_explicit_16(dst, src, order) __atomic_exchange_n(dst, src, order)
#define c89atomic_exchange_explicit_32(dst, src, order) __atomic_exchange_n(dst, src, order)
#define c89atomic_exchange_explicit_64(dst, src, order) __atomic_exchange_n(dst, src, order)
#define c89atomic_compare_exchange_strong_explicit_8( dst, expected, desired, successOrder, failureOrder) __atomic_compare_exchange_n(dst, expected, desired, 0, successOrder, failureOrder)
#define c89atomic_compare_exchange_strong_explicit_16(dst, expected, desired, successOrder, failureOrder) __atomic_compare_exchange_n(dst, expected, desired, 0, successOrder, failureOrder)
#define c89atomic_compare_exchange_strong_explicit_32(dst, expected, desired, successOrder, failureOrder) __atomic_compare_exchange_n(dst, expected, desired, 0, successOrder, failureOrder)
#define c89atomic_compare_exchange_strong_explicit_64(dst, expected, desired, successOrder, failureOrder) __atomic_compare_exchange_n(dst, expected, desired, 0, successOrder, failureOrder)
#define c89atomic_compare_exchange_weak_explicit_8( dst, expected, desired, successOrder, failureOrder) __atomic_compare_exchange_n(dst, expected, desired, 1, successOrder, failureOrder)
#define c89atomic_compare_exchange_weak_explicit_16(dst, expected, desired, successOrder, failureOrder) __atomic_compare_exchange_n(dst, expected, desired, 1, successOrder, failureOrder)
#define c89atomic_compare_exchange_weak_explicit_32(dst, expected, desired, successOrder, failureOrder) __atomic_compare_exchange_n(dst, expected, desired, 1, successOrder, failureOrder)
#define c89atomic_compare_exchange_weak_explicit_64(dst, expected, desired, successOrder, failureOrder) __atomic_compare_exchange_n(dst, expected, desired, 1, successOrder, failureOrder)
#define c89atomic_fetch_add_explicit_8( dst, src, order) __atomic_fetch_add(dst, src, order)
#define c89atomic_fetch_add_explicit_16(dst, src, order) __atomic_fetch_add(dst, src, order)
#define c89atomic_fetch_add_explicit_32(dst, src, order) __atomic_fetch_add(dst, src, order)
#define c89atomic_fetch_add_explicit_64(dst, src, order) __atomic_fetch_add(dst, src, order)
#define c89atomic_fetch_sub_explicit_8( dst, src, order) __atomic_fetch_sub(dst, src, order)
#define c89atomic_fetch_sub_explicit_16(dst, src, order) __atomic_fetch_sub(dst, src, order)
#define c89atomic_fetch_sub_explicit_32(dst, src, order) __atomic_fetch_sub(dst, src, order)
#define c89atomic_fetch_sub_explicit_64(dst, src, order) __atomic_fetch_sub(dst, src, order)
#define c89atomic_fetch_or_explicit_8( dst, src, order) __atomic_fetch_or(dst, src, order)
#define c89atomic_fetch_or_explicit_16(dst, src, order) __atomic_fetch_or(dst, src, order)
#define c89atomic_fetch_or_explicit_32(dst, src, order) __atomic_fetch_or(dst, src, order)
#define c89atomic_fetch_or_explicit_64(dst, src, order) __atomic_fetch_or(dst, src, order)
#define c89atomic_fetch_xor_explicit_8( dst, src, order) __atomic_fetch_xor(dst, src, order)
#define c89atomic_fetch_xor_explicit_16(dst, src, order) __atomic_fetch_xor(dst, src, order)
#define c89atomic_fetch_xor_explicit_32(dst, src, order) __atomic_fetch_xor(dst, src, order)
#define c89atomic_fetch_xor_explicit_64(dst, src, order) __atomic_fetch_xor(dst, src, order)
#define c89atomic_fetch_and_explicit_8( dst, src, order) __atomic_fetch_and(dst, src, order)
#define c89atomic_fetch_and_explicit_16(dst, src, order) __atomic_fetch_and(dst, src, order)
#define c89atomic_fetch_and_explicit_32(dst, src, order) __atomic_fetch_and(dst, src, order)
#define c89atomic_fetch_and_explicit_64(dst, src, order) __atomic_fetch_and(dst, src, order)
#define c89atomic_compare_and_swap_8 (dst, expected, desired) __sync_val_compare_and_swap(dst, expected, desired)
#define c89atomic_compare_and_swap_16(dst, expected, desired) __sync_val_compare_and_swap(dst, expected, desired)
#define c89atomic_compare_and_swap_32(dst, expected, desired) __sync_val_compare_and_swap(dst, expected, desired)
#define c89atomic_compare_and_swap_64(dst, expected, desired) __sync_val_compare_and_swap(dst, expected, desired)
#else
/* Legacy GCC atomic built-ins. Everything is a full memory barrier. */
#define c89atomic_memory_order_relaxed 1
#define c89atomic_memory_order_consume 2
#define c89atomic_memory_order_acquire 3
#define c89atomic_memory_order_release 4
#define c89atomic_memory_order_acq_rel 5
#define c89atomic_memory_order_seq_cst 6
#define c89atomic_compiler_fence() __asm__ __volatile__("":::"memory")
#define c89atomic_thread_fence(order) __sync_synchronize()
#define c89atomic_signal_fence(order) c89atomic_thread_fence(order)
static C89ATOMIC_INLINE c89atomic_uint8 c89atomic_exchange_explicit_8(volatile c89atomic_uint8* dst, c89atomic_uint8 src, c89atomic_memory_order order)
{
if (order > c89atomic_memory_order_acquire) {
__sync_synchronize();
}
return __sync_lock_test_and_set(dst, src);
}
static C89ATOMIC_INLINE c89atomic_uint16 c89atomic_exchange_explicit_16(volatile c89atomic_uint16* dst, c89atomic_uint16 src, c89atomic_memory_order order)
{
volatile c89atomic_uint16 oldValue;
do {
oldValue = *dst;
} while (__sync_val_compare_and_swap(dst, oldValue, src) != oldValue);
(void)order;
return oldValue;
}
static C89ATOMIC_INLINE c89atomic_uint32 c89atomic_exchange_explicit_32(volatile c89atomic_uint32* dst, c89atomic_uint32 src, c89atomic_memory_order order)
{
volatile c89atomic_uint32 oldValue;
do {
oldValue = *dst;
} while (__sync_val_compare_and_swap(dst, oldValue, src) != oldValue);
(void)order;
return oldValue;
}
static C89ATOMIC_INLINE c89atomic_uint64 c89atomic_exchange_explicit_64(volatile c89atomic_uint64* dst, c89atomic_uint64 src, c89atomic_memory_order order)
{
volatile c89atomic_uint64 oldValue;
do {
oldValue = *dst;
} while (__sync_val_compare_and_swap(dst, oldValue, src) != oldValue);
(void)order;
return oldValue;
}
#define c89atomic_fetch_add_explicit_8( dst, src, order) __sync_fetch_and_add(dst, src)
#define c89atomic_fetch_add_explicit_16(dst, src, order) __sync_fetch_and_add(dst, src)
#define c89atomic_fetch_add_explicit_32(dst, src, order) __sync_fetch_and_add(dst, src)
#define c89atomic_fetch_add_explicit_64(dst, src, order) __sync_fetch_and_add(dst, src)
#define c89atomic_fetch_sub_explicit_8( dst, src, order) __sync_fetch_and_sub(dst, src)
#define c89atomic_fetch_sub_explicit_16(dst, src, order) __sync_fetch_and_sub(dst, src)
#define c89atomic_fetch_sub_explicit_32(dst, src, order) __sync_fetch_and_sub(dst, src)
#define c89atomic_fetch_sub_explicit_64(dst, src, order) __sync_fetch_and_sub(dst, src)
#define c89atomic_fetch_or_explicit_8( dst, src, order) __sync_fetch_and_or(dst, src)
#define c89atomic_fetch_or_explicit_16(dst, src, order) __sync_fetch_and_or(dst, src)
#define c89atomic_fetch_or_explicit_32(dst, src, order) __sync_fetch_and_or(dst, src)
#define c89atomic_fetch_or_explicit_64(dst, src, order) __sync_fetch_and_or(dst, src)
#define c89atomic_fetch_xor_explicit_8( dst, src, order) __sync_fetch_and_xor(dst, src)
#define c89atomic_fetch_xor_explicit_16(dst, src, order) __sync_fetch_and_xor(dst, src)
#define c89atomic_fetch_xor_explicit_32(dst, src, order) __sync_fetch_and_xor(dst, src)
#define c89atomic_fetch_xor_explicit_64(dst, src, order) __sync_fetch_and_xor(dst, src)
#define c89atomic_fetch_and_explicit_8( dst, src, order) __sync_fetch_and_and(dst, src)
#define c89atomic_fetch_and_explicit_16(dst, src, order) __sync_fetch_and_and(dst, src)
#define c89atomic_fetch_and_explicit_32(dst, src, order) __sync_fetch_and_and(dst, src)
#define c89atomic_fetch_and_explicit_64(dst, src, order) __sync_fetch_and_and(dst, src)
#define c89atomic_compare_and_swap_8( dst, expected, desired) __sync_val_compare_and_swap(dst, expected, desired)
#define c89atomic_compare_and_swap_16(dst, expected, desired) __sync_val_compare_and_swap(dst, expected, desired)
#define c89atomic_compare_and_swap_32(dst, expected, desired) __sync_val_compare_and_swap(dst, expected, desired)
#define c89atomic_compare_and_swap_64(dst, expected, desired) __sync_val_compare_and_swap(dst, expected, desired)
#define c89atomic_load_explicit_8( ptr, order) c89atomic_compare_and_swap_8 (ptr, 0, 0)
#define c89atomic_load_explicit_16(ptr, order) c89atomic_compare_and_swap_16(ptr, 0, 0)
#define c89atomic_load_explicit_32(ptr, order) c89atomic_compare_and_swap_32(ptr, 0, 0)
#define c89atomic_load_explicit_64(ptr, order) c89atomic_compare_and_swap_64(ptr, 0, 0)
#define c89atomic_store_explicit_8( dst, src, order) (void)c89atomic_exchange_explicit_8 (dst, src, order)
#define c89atomic_store_explicit_16(dst, src, order) (void)c89atomic_exchange_explicit_16(dst, src, order)
#define c89atomic_store_explicit_32(dst, src, order) (void)c89atomic_exchange_explicit_32(dst, src, order)
#define c89atomic_store_explicit_64(dst, src, order) (void)c89atomic_exchange_explicit_64(dst, src, order)
#define c89atomic_test_and_set_explicit_8( dst, order) c89atomic_exchange_explicit_8 (dst, 1, order)
#define c89atomic_test_and_set_explicit_16(dst, order) c89atomic_exchange_explicit_16(dst, 1, order)
#define c89atomic_test_and_set_explicit_32(dst, order) c89atomic_exchange_explicit_32(dst, 1, order)
#define c89atomic_test_and_set_explicit_64(dst, order) c89atomic_exchange_explicit_64(dst, 1, order)
#define c89atomic_clear_explicit_8( dst, order) c89atomic_store_explicit_8 (dst, 0, order)
#define c89atomic_clear_explicit_16(dst, order) c89atomic_store_explicit_16(dst, 0, order)
#define c89atomic_clear_explicit_32(dst, order) c89atomic_store_explicit_32(dst, 0, order)
#define c89atomic_clear_explicit_64(dst, order) c89atomic_store_explicit_64(dst, 0, order)
#define c89atomic_flag_test_and_set_explicit(ptr, order) (c89atomic_flag)c89atomic_test_and_set_explicit_8(ptr, order)
#define c89atomic_flag_clear_explicit(ptr, order) c89atomic_clear_explicit_8(ptr, order)
#endif
/* compare_exchange() */
#if !defined(C89ATOMIC_HAS_NATIVE_COMPARE_EXCHANGE)
c89atomic_bool c89atomic_compare_exchange_strong_explicit_8(volatile c89atomic_uint8* dst, volatile c89atomic_uint8* expected, c89atomic_uint8 desired, c89atomic_memory_order successOrder, c89atomic_memory_order failureOrder)
{
c89atomic_uint8 expectedValue;
c89atomic_uint8 result;
(void)successOrder;
(void)failureOrder;
expectedValue = c89atomic_load_explicit_8(expected, c89atomic_memory_order_seq_cst);
result = c89atomic_compare_and_swap_8(dst, expectedValue, desired);
if (result == expectedValue) {
return 1;
} else {
c89atomic_store_explicit_8(expected, result, failureOrder);
return 0;
}
}
c89atomic_bool c89atomic_compare_exchange_strong_explicit_16(volatile c89atomic_uint16* dst, volatile c89atomic_uint16* expected, c89atomic_uint16 desired, c89atomic_memory_order successOrder, c89atomic_memory_order failureOrder)
{
c89atomic_uint16 expectedValue;
c89atomic_uint16 result;
(void)successOrder;
(void)failureOrder;
expectedValue = c89atomic_load_explicit_16(expected, c89atomic_memory_order_seq_cst);
result = c89atomic_compare_and_swap_16(dst, expectedValue, desired);
if (result == expectedValue) {
return 1;
} else {
c89atomic_store_explicit_16(expected, result, failureOrder);
return 0;
}
}
c89atomic_bool c89atomic_compare_exchange_strong_explicit_32(volatile c89atomic_uint32* dst, volatile c89atomic_uint32* expected, c89atomic_uint32 desired, c89atomic_memory_order successOrder, c89atomic_memory_order failureOrder)
{
c89atomic_uint32 expectedValue;
c89atomic_uint32 result;
(void)successOrder;
(void)failureOrder;
expectedValue = c89atomic_load_explicit_32(expected, c89atomic_memory_order_seq_cst);
result = c89atomic_compare_and_swap_32(dst, expectedValue, desired);
if (result == expectedValue) {
return 1;
} else {
c89atomic_store_explicit_32(expected, result, failureOrder);
return 0;
}
}
c89atomic_bool c89atomic_compare_exchange_strong_explicit_64(volatile c89atomic_uint64* dst, volatile c89atomic_uint64* expected, c89atomic_uint64 desired, c89atomic_memory_order successOrder, c89atomic_memory_order failureOrder)
{
c89atomic_uint64 expectedValue;
c89atomic_uint64 result;
(void)successOrder;
(void)failureOrder;
expectedValue = c89atomic_load_explicit_64(expected, c89atomic_memory_order_seq_cst);
result = c89atomic_compare_and_swap_64(dst, expectedValue, desired);
if (result == expectedValue) {
return 1;
} else {
c89atomic_store_explicit_64(expected, result, failureOrder);
return 0;
}
}
#define c89atomic_compare_exchange_weak_explicit_8( dst, expected, desired, successOrder, failureOrder) c89atomic_compare_exchange_strong_explicit_8 (dst, expected, desired, successOrder, failureOrder)
#define c89atomic_compare_exchange_weak_explicit_16(dst, expected, desired, successOrder, failureOrder) c89atomic_compare_exchange_strong_explicit_16(dst, expected, desired, successOrder, failureOrder)
#define c89atomic_compare_exchange_weak_explicit_32(dst, expected, desired, successOrder, failureOrder) c89atomic_compare_exchange_strong_explicit_32(dst, expected, desired, successOrder, failureOrder)
#define c89atomic_compare_exchange_weak_explicit_64(dst, expected, desired, successOrder, failureOrder) c89atomic_compare_exchange_strong_explicit_64(dst, expected, desired, successOrder, failureOrder)
#endif /* C89ATOMIC_HAS_NATIVE_COMPARE_EXCHANGE */
#if !defined(C89ATOMIC_HAS_NATIVE_IS_LOCK_FREE)
#define c89atomic_is_lock_free_8( ptr) 1
#define c89atomic_is_lock_free_16(ptr) 1
#define c89atomic_is_lock_free_32(ptr) 1
/* For 64-bit atomics, we can only safely say atomics are lock free on 64-bit architectures or x86. Otherwise we need to be conservative and assume not lock free. */
#if defined(C89ATOMIC_64BIT)
#define c89atomic_is_lock_free_64(ptr) 1
#else
#if defined(C89ATOMIC_X86) || defined(C89ATOMIC_X64)
#define c89atomic_is_lock_free_64(ptr) 1
#else
#define c89atomic_is_lock_free_64(ptr) 0
#endif
#endif
#endif /* C89ATOMIC_HAS_NATIVE_IS_LOCK_FREE */
/* Pointer versions of relevant operations. */
#if defined(C89ATOMIC_64BIT)
#define c89atomic_is_lock_free_ptr(ptr) c89atomic_is_lock_free_64((volatile c89atomic_uint64*)ptr)
#define c89atomic_load_explicit_ptr(ptr, order) (void*)c89atomic_load_explicit_64((volatile c89atomic_uint64*)ptr, order)
#define c89atomic_store_explicit_ptr(dst, src, order) (void*)c89atomic_store_explicit_64((volatile c89atomic_uint64*)dst, (c89atomic_uint64)src, order)
#define c89atomic_exchange_explicit_ptr(dst, src, order) (void*)c89atomic_exchange_explicit_64((volatile c89atomic_uint64*)dst, (c89atomic_uint64)src, order)
#define c89atomic_compare_exchange_strong_explicit_ptr(dst, expected, desired, successOrder, failureOrder) c89atomic_compare_exchange_strong_explicit_64((volatile c89atomic_uint64*)dst, (volatile c89atomic_uint64*)expected, (c89atomic_uint64)desired, successOrder, failureOrder)
#define c89atomic_compare_exchange_weak_explicit_ptr(dst, expected, desired, successOrder, failureOrder) c89atomic_compare_exchange_weak_explicit_64((volatile c89atomic_uint64*)dst, (volatile c89atomic_uint64*)expected, (c89atomic_uint64)desired, successOrder, failureOrder)
#define c89atomic_compare_and_swap_ptr(dst, expected, desired) (void*)c89atomic_compare_and_swap_64 ((volatile c89atomic_uint64*)dst, (c89atomic_uint64)expected, (c89atomic_uint64)desired)
#elif defined(C89ATOMIC_32BIT)
#define c89atomic_is_lock_free_ptr(ptr) c89atomic_is_lock_free_32((volatile c89atomic_uint32*)ptr)
#define c89atomic_load_explicit_ptr(ptr, order) (void*)c89atomic_load_explicit_32((volatile c89atomic_uint32*)ptr, order)
#define c89atomic_store_explicit_ptr(dst, src, order) (void*)c89atomic_store_explicit_32((volatile c89atomic_uint32*)dst, (c89atomic_uint32)src, order)
#define c89atomic_exchange_explicit_ptr(dst, src, order) (void*)c89atomic_exchange_explicit_32((volatile c89atomic_uint32*)dst, (c89atomic_uint32)src, order)
#define c89atomic_compare_exchange_strong_explicit_ptr(dst, expected, desired, successOrder, failureOrder) c89atomic_compare_exchange_strong_explicit_32((volatile c89atomic_uint32*)dst, (volatile c89atomic_uint32*)expected, (c89atomic_uint32)desired, successOrder, failureOrder)
#define c89atomic_compare_exchange_weak_explicit_ptr(dst, expected, desired, successOrder, failureOrder) c89atomic_compare_exchange_weak_explicit_32((volatile c89atomic_uint32*)dst, (volatile c89atomic_uint32*)expected, (c89atomic_uint32)desired, successOrder, failureOrder)
#define c89atomic_compare_and_swap_ptr(dst, expected, desired) (void*)c89atomic_compare_and_swap_32((volatile c89atomic_uint32*)dst, (c89atomic_uint32)expected, (c89atomic_uint32)desired)
#else
error "Unsupported architecture."
#endif
#define c89atomic_flag_test_and_set(ptr) c89atomic_flag_test_and_set_explicit(ptr, c89atomic_memory_order_seq_cst)
#define c89atomic_flag_clear(ptr) c89atomic_flag_clear_explicit(ptr, c89atomic_memory_order_seq_cst)
#define c89atomic_test_and_set_8( ptr) c89atomic_test_and_set_explicit_8 (ptr, c89atomic_memory_order_seq_cst)
#define c89atomic_test_and_set_16(ptr) c89atomic_test_and_set_explicit_16(ptr, c89atomic_memory_order_seq_cst)
#define c89atomic_test_and_set_32(ptr) c89atomic_test_and_set_explicit_32(ptr, c89atomic_memory_order_seq_cst)
#define c89atomic_test_and_set_64(ptr) c89atomic_test_and_set_explicit_64(ptr, c89atomic_memory_order_seq_cst)
#define c89atomic_clear_8( ptr) c89atomic_clear_explicit_8 (ptr, c89atomic_memory_order_seq_cst)
#define c89atomic_clear_16(ptr) c89atomic_clear_explicit_16(ptr, c89atomic_memory_order_seq_cst)
#define c89atomic_clear_32(ptr) c89atomic_clear_explicit_32(ptr, c89atomic_memory_order_seq_cst)
#define c89atomic_clear_64(ptr) c89atomic_clear_explicit_64(ptr, c89atomic_memory_order_seq_cst)
#define c89atomic_store_8( dst, src) c89atomic_store_explicit_8 ( dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_store_16( dst, src) c89atomic_store_explicit_16( dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_store_32( dst, src) c89atomic_store_explicit_32( dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_store_64( dst, src) c89atomic_store_explicit_64( dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_store_ptr(dst, src) c89atomic_store_explicit_ptr(dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_load_8( ptr) c89atomic_load_explicit_8 ( ptr, c89atomic_memory_order_seq_cst)
#define c89atomic_load_16( ptr) c89atomic_load_explicit_16( ptr, c89atomic_memory_order_seq_cst)
#define c89atomic_load_32( ptr) c89atomic_load_explicit_32( ptr, c89atomic_memory_order_seq_cst)
#define c89atomic_load_64( ptr) c89atomic_load_explicit_64( ptr, c89atomic_memory_order_seq_cst)
#define c89atomic_load_ptr(ptr) c89atomic_load_explicit_ptr(ptr, c89atomic_memory_order_seq_cst)
#define c89atomic_exchange_8( dst, src) c89atomic_exchange_explicit_8 ( dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_exchange_16( dst, src) c89atomic_exchange_explicit_16( dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_exchange_32( dst, src) c89atomic_exchange_explicit_32( dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_exchange_64( dst, src) c89atomic_exchange_explicit_64( dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_exchange_ptr(dst, src) c89atomic_exchange_explicit_ptr(dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_compare_exchange_strong_8( dst, expected, desired) c89atomic_compare_exchange_strong_explicit_8 ( dst, expected, desired, c89atomic_memory_order_seq_cst, c89atomic_memory_order_seq_cst)
#define c89atomic_compare_exchange_strong_16( dst, expected, desired) c89atomic_compare_exchange_strong_explicit_16( dst, expected, desired, c89atomic_memory_order_seq_cst, c89atomic_memory_order_seq_cst)
#define c89atomic_compare_exchange_strong_32( dst, expected, desired) c89atomic_compare_exchange_strong_explicit_32( dst, expected, desired, c89atomic_memory_order_seq_cst, c89atomic_memory_order_seq_cst)
#define c89atomic_compare_exchange_strong_64( dst, expected, desired) c89atomic_compare_exchange_strong_explicit_64( dst, expected, desired, c89atomic_memory_order_seq_cst, c89atomic_memory_order_seq_cst)
#define c89atomic_compare_exchange_strong_ptr(dst, expected, desired) c89atomic_compare_exchange_strong_explicit_ptr(dst, expected, desired, c89atomic_memory_order_seq_cst, c89atomic_memory_order_seq_cst)
#define c89atomic_compare_exchange_weak_8( dst, expected, desired) c89atomic_compare_exchange_weak_explicit_8 ( dst, expected, desired, c89atomic_memory_order_seq_cst, c89atomic_memory_order_seq_cst)
#define c89atomic_compare_exchange_weak_16( dst, expected, desired) c89atomic_compare_exchange_weak_explicit_16( dst, expected, desired, c89atomic_memory_order_seq_cst, c89atomic_memory_order_seq_cst)
#define c89atomic_compare_exchange_weak_32( dst, expected, desired) c89atomic_compare_exchange_weak_explicit_32( dst, expected, desired, c89atomic_memory_order_seq_cst, c89atomic_memory_order_seq_cst)
#define c89atomic_compare_exchange_weak_64( dst, expected, desired) c89atomic_compare_exchange_weak_explicit_64( dst, expected, desired, c89atomic_memory_order_seq_cst, c89atomic_memory_order_seq_cst)
#define c89atomic_compare_exchange_weak_ptr(dst, expected, desired) c89atomic_compare_exchange_weak_explicit_ptr(dst, expected, desired, c89atomic_memory_order_seq_cst, c89atomic_memory_order_seq_cst)
#define c89atomic_fetch_add_8( dst, src) c89atomic_fetch_add_explicit_8 (dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_fetch_add_16(dst, src) c89atomic_fetch_add_explicit_16(dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_fetch_add_32(dst, src) c89atomic_fetch_add_explicit_32(dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_fetch_add_64(dst, src) c89atomic_fetch_add_explicit_64(dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_fetch_sub_8( dst, src) c89atomic_fetch_sub_explicit_8 (dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_fetch_sub_16(dst, src) c89atomic_fetch_sub_explicit_16(dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_fetch_sub_32(dst, src) c89atomic_fetch_sub_explicit_32(dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_fetch_sub_64(dst, src) c89atomic_fetch_sub_explicit_64(dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_fetch_or_8( dst, src) c89atomic_fetch_or_explicit_8 (dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_fetch_or_16(dst, src) c89atomic_fetch_or_explicit_16(dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_fetch_or_32(dst, src) c89atomic_fetch_or_explicit_32(dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_fetch_or_64(dst, src) c89atomic_fetch_or_explicit_64(dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_fetch_xor_8( dst, src) c89atomic_fetch_xor_explicit_8 (dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_fetch_xor_16(dst, src) c89atomic_fetch_xor_explicit_16(dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_fetch_xor_32(dst, src) c89atomic_fetch_xor_explicit_32(dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_fetch_xor_64(dst, src) c89atomic_fetch_xor_explicit_64(dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_fetch_and_8( dst, src) c89atomic_fetch_and_explicit_8 (dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_fetch_and_16(dst, src) c89atomic_fetch_and_explicit_16(dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_fetch_and_32(dst, src) c89atomic_fetch_and_explicit_32(dst, src, c89atomic_memory_order_seq_cst)
#define c89atomic_fetch_and_64(dst, src) c89atomic_fetch_and_explicit_64(dst, src, c89atomic_memory_order_seq_cst)
#if defined(__cplusplus)
}
#endif
#endif /* c89atomic_h */
/* c89atomic.h end */
static void* ma__malloc_default(size_t sz, void* pUserData)
......@@ -9601,7 +10486,7 @@ static ma_result ma_device__handle_duplex_callback_playback(ma_device* pDevice,
/* A helper for changing the state of the device. */
static MA_INLINE void ma_device__set_state(ma_device* pDevice, ma_uint32 newState)
{
ma_atomic_exchange_32(&pDevice->state, newState);
c89atomic_exchange_32(&pDevice->state, newState);
}
/* A helper for getting the state of the device. */
......@@ -9719,7 +10604,7 @@ static ma_thread_result MA_THREADCALL ma_device_thread__null(void* pData)
/* Starting the device needs to put the thread into a loop. */
if (pDevice->null_device.operation == MA_DEVICE_OP_START__NULL) {
ma_atomic_exchange_32(&pDevice->null_device.operation, MA_DEVICE_OP_NONE__NULL);
c89atomic_exchange_32(&pDevice->null_device.operation, MA_DEVICE_OP_NONE__NULL);
/* Reset the timer just in case. */
ma_timer_init(&pDevice->null_device.timer);
......@@ -9730,29 +10615,29 @@ static ma_thread_result MA_THREADCALL ma_device_thread__null(void* pData)
}
/* Getting here means a suspend or kill operation has been requested. */
ma_atomic_exchange_32(&pDevice->null_device.operationResult, MA_SUCCESS);
c89atomic_exchange_32(&pDevice->null_device.operationResult, MA_SUCCESS);
ma_event_signal(&pDevice->null_device.operationCompletionEvent);
continue;
}
/* Suspending the device means we need to stop the timer and just continue the loop. */
if (pDevice->null_device.operation == MA_DEVICE_OP_SUSPEND__NULL) {
ma_atomic_exchange_32(&pDevice->null_device.operation, MA_DEVICE_OP_NONE__NULL);
c89atomic_exchange_32(&pDevice->null_device.operation, MA_DEVICE_OP_NONE__NULL);
/* We need to add the current run time to the prior run time, then reset the timer. */
pDevice->null_device.priorRunTime += ma_timer_get_time_in_seconds(&pDevice->null_device.timer);
ma_timer_init(&pDevice->null_device.timer);
/* We're done. */
ma_atomic_exchange_32(&pDevice->null_device.operationResult, MA_SUCCESS);
c89atomic_exchange_32(&pDevice->null_device.operationResult, MA_SUCCESS);
ma_event_signal(&pDevice->null_device.operationCompletionEvent);
continue;
}
/* Killing the device means we need to get out of this loop so that this thread can terminate. */
if (pDevice->null_device.operation == MA_DEVICE_OP_KILL__NULL) {
ma_atomic_exchange_32(&pDevice->null_device.operation, MA_DEVICE_OP_NONE__NULL);
ma_atomic_exchange_32(&pDevice->null_device.operationResult, MA_SUCCESS);
c89atomic_exchange_32(&pDevice->null_device.operation, MA_DEVICE_OP_NONE__NULL);
c89atomic_exchange_32(&pDevice->null_device.operationResult, MA_SUCCESS);
ma_event_signal(&pDevice->null_device.operationCompletionEvent);
break;
}
......@@ -9760,7 +10645,7 @@ static ma_thread_result MA_THREADCALL ma_device_thread__null(void* pData)
/* Getting a signal on a "none" operation probably means an error. Return invalid operation. */
if (pDevice->null_device.operation == MA_DEVICE_OP_NONE__NULL) {
MA_ASSERT(MA_FALSE); /* <-- Trigger this in debug mode to ensure developers are aware they're doing something wrong (or there's a bug in a miniaudio). */
ma_atomic_exchange_32(&pDevice->null_device.operationResult, MA_INVALID_OPERATION);
c89atomic_exchange_32(&pDevice->null_device.operationResult, MA_INVALID_OPERATION);
ma_event_signal(&pDevice->null_device.operationCompletionEvent);
continue; /* Continue the loop. Don't terminate. */
}
......@@ -9771,7 +10656,7 @@ static ma_thread_result MA_THREADCALL ma_device_thread__null(void* pData)
static ma_result ma_device_do_operation__null(ma_device* pDevice, ma_uint32 operation)
{
ma_atomic_exchange_32(&pDevice->null_device.operation, operation);
c89atomic_exchange_32(&pDevice->null_device.operation, operation);
if (ma_event_signal(&pDevice->null_device.operationEvent) != MA_SUCCESS) {
return MA_ERROR;
}
......@@ -9941,7 +10826,7 @@ static ma_result ma_device_start__null(ma_device* pDevice)
ma_device_do_operation__null(pDevice, MA_DEVICE_OP_START__NULL);
ma_atomic_exchange_32(&pDevice->null_device.isStarted, MA_TRUE);
c89atomic_exchange_32(&pDevice->null_device.isStarted, MA_TRUE);
return MA_SUCCESS;
}
......@@ -9951,7 +10836,7 @@ static ma_result ma_device_stop__null(ma_device* pDevice)
ma_device_do_operation__null(pDevice, MA_DEVICE_OP_SUSPEND__NULL);
ma_atomic_exchange_32(&pDevice->null_device.isStarted, MA_FALSE);
c89atomic_exchange_32(&pDevice->null_device.isStarted, MA_FALSE);
return MA_SUCCESS;
}
......@@ -11206,12 +12091,12 @@ static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_QueryInterface(ma_IMMN
static ULONG STDMETHODCALLTYPE ma_IMMNotificationClient_AddRef(ma_IMMNotificationClient* pThis)
{
return (ULONG)ma_atomic_increment_32(&pThis->counter);
return (ULONG)c89atomic_fetch_add_32(&pThis->counter, 1) + 1;
}
static ULONG STDMETHODCALLTYPE ma_IMMNotificationClient_Release(ma_IMMNotificationClient* pThis)
{
ma_uint32 newRefCount = ma_atomic_decrement_32(&pThis->counter);
ma_uint32 newRefCount = c89atomic_fetch_sub_32(&pThis->counter, 1) - 1;
if (newRefCount == 0) {
return 0; /* We don't free anything here because we never allocate the object on the heap. */
}
......@@ -11295,10 +12180,10 @@ static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDefaultDeviceChanged
that properly.
*/
if (dataFlow == ma_eRender && pThis->pDevice->type != ma_device_type_loopback) {
ma_atomic_exchange_32(&pThis->pDevice->wasapi.hasDefaultPlaybackDeviceChanged, MA_TRUE);
c89atomic_exchange_32(&pThis->pDevice->wasapi.hasDefaultPlaybackDeviceChanged, MA_TRUE);
}
if (dataFlow == ma_eCapture || pThis->pDevice->type == ma_device_type_loopback) {
ma_atomic_exchange_32(&pThis->pDevice->wasapi.hasDefaultCaptureDeviceChanged, MA_TRUE);
c89atomic_exchange_32(&pThis->pDevice->wasapi.hasDefaultCaptureDeviceChanged, MA_TRUE);
}
(void)pDefaultDeviceID;
......@@ -12720,8 +13605,8 @@ static ma_result ma_device_init__wasapi(ma_context* pContext, const ma_device_co
}
#endif
ma_atomic_exchange_32(&pDevice->wasapi.isStartedCapture, MA_FALSE);
ma_atomic_exchange_32(&pDevice->wasapi.isStartedPlayback, MA_FALSE);
c89atomic_exchange_32(&pDevice->wasapi.isStartedCapture, MA_FALSE);
c89atomic_exchange_32(&pDevice->wasapi.isStartedPlayback, MA_FALSE);
return MA_SUCCESS;
}
......@@ -12785,10 +13670,10 @@ static ma_result ma_device_reroute__wasapi(ma_device* pDevice, ma_device_type de
}
if (deviceType == ma_device_type_playback) {
ma_atomic_exchange_32(&pDevice->wasapi.hasDefaultPlaybackDeviceChanged, MA_FALSE);
c89atomic_exchange_32(&pDevice->wasapi.hasDefaultPlaybackDeviceChanged, MA_FALSE);
}
if (deviceType == ma_device_type_capture || deviceType == ma_device_type_loopback) {
ma_atomic_exchange_32(&pDevice->wasapi.hasDefaultCaptureDeviceChanged, MA_FALSE);
c89atomic_exchange_32(&pDevice->wasapi.hasDefaultCaptureDeviceChanged, MA_FALSE);
}
......@@ -12857,7 +13742,7 @@ static ma_result ma_device_main_loop__wasapi(ma_device* pDevice)
if (FAILED(hr)) {
return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to start internal capture device.", ma_result_from_HRESULT(hr));
}
ma_atomic_exchange_32(&pDevice->wasapi.isStartedCapture, MA_TRUE);
c89atomic_exchange_32(&pDevice->wasapi.isStartedCapture, MA_TRUE);
}
while (ma_device__get_state(pDevice) == MA_STATE_STARTED && !exitLoop) {
......@@ -13184,7 +14069,7 @@ static ma_result ma_device_main_loop__wasapi(ma_device* pDevice)
ma_IAudioClient_Reset((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to start internal playback device.", ma_result_from_HRESULT(hr));
}
ma_atomic_exchange_32(&pDevice->wasapi.isStartedPlayback, MA_TRUE);
c89atomic_exchange_32(&pDevice->wasapi.isStartedPlayback, MA_TRUE);
}
}
} break;
......@@ -13343,7 +14228,7 @@ static ma_result ma_device_main_loop__wasapi(ma_device* pDevice)
exitLoop = MA_TRUE;
break;
}
ma_atomic_exchange_32(&pDevice->wasapi.isStartedPlayback, MA_TRUE);
c89atomic_exchange_32(&pDevice->wasapi.isStartedPlayback, MA_TRUE);
}
}
} break;
......@@ -13370,7 +14255,7 @@ static ma_result ma_device_main_loop__wasapi(ma_device* pDevice)
return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to reset internal capture device.", ma_result_from_HRESULT(hr));
}
ma_atomic_exchange_32(&pDevice->wasapi.isStartedCapture, MA_FALSE);
c89atomic_exchange_32(&pDevice->wasapi.isStartedCapture, MA_FALSE);
}
if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
......@@ -13425,7 +14310,7 @@ static ma_result ma_device_main_loop__wasapi(ma_device* pDevice)
return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to reset internal playback device.", ma_result_from_HRESULT(hr));
}
ma_atomic_exchange_32(&pDevice->wasapi.isStartedPlayback, MA_FALSE);
c89atomic_exchange_32(&pDevice->wasapi.isStartedPlayback, MA_FALSE);
}
return MA_SUCCESS;
......@@ -39540,7 +40425,7 @@ MA_API ma_result ma_rb_commit_read(ma_rb* pRB, size_t sizeInBytes, void* pBuffer
newReadOffsetLoopFlag ^= 0x80000000;
}
ma_atomic_exchange_32(&pRB->encodedReadOffset, ma_rb__construct_offset(newReadOffsetLoopFlag, newReadOffsetInBytes));
c89atomic_exchange_32(&pRB->encodedReadOffset, ma_rb__construct_offset(newReadOffsetLoopFlag, newReadOffsetInBytes));
return MA_SUCCESS;
}
......@@ -39626,7 +40511,7 @@ MA_API ma_result ma_rb_commit_write(ma_rb* pRB, size_t sizeInBytes, void* pBuffe
newWriteOffsetLoopFlag ^= 0x80000000;
}
ma_atomic_exchange_32(&pRB->encodedWriteOffset, ma_rb__construct_offset(newWriteOffsetLoopFlag, newWriteOffsetInBytes));
c89atomic_exchange_32(&pRB->encodedWriteOffset, ma_rb__construct_offset(newWriteOffsetLoopFlag, newWriteOffsetInBytes));
return MA_SUCCESS;
}
......@@ -39671,7 +40556,7 @@ MA_API ma_result ma_rb_seek_read(ma_rb* pRB, size_t offsetInBytes)
}
}
ma_atomic_exchange_32(&pRB->encodedReadOffset, ma_rb__construct_offset(newReadOffsetInBytes, newReadOffsetLoopFlag));
c89atomic_exchange_32(&pRB->encodedReadOffset, ma_rb__construct_offset(newReadOffsetInBytes, newReadOffsetLoopFlag));
return MA_SUCCESS;
}
......@@ -39716,7 +40601,7 @@ MA_API ma_result ma_rb_seek_write(ma_rb* pRB, size_t offsetInBytes)
}
}
ma_atomic_exchange_32(&pRB->encodedWriteOffset, ma_rb__construct_offset(newWriteOffsetInBytes, newWriteOffsetLoopFlag));
c89atomic_exchange_32(&pRB->encodedWriteOffset, ma_rb__construct_offset(newWriteOffsetInBytes, newWriteOffsetLoopFlag));
return MA_SUCCESS;
}
......@@ -44989,6 +45874,7 @@ The following miscellaneous changes have also been made.
REVISION HISTORY
================
v0.10.9 - TBD
- Changes to the internal atomics library. This has been replaced with c89atomic.h which is embedded within this file.
v0.10.8 - 2020-06-22
- Remove dependency on ma_context from mutexes.
......@@ -725,9 +725,9 @@ MA_API ma_result ma_slot_allocator_alloc(ma_slot_allocator* pAllocator, ma_uint3
newBitfield = oldBitfield | (1 << bitOffset);
if ((ma_uint32)ma_compare_and_swap_32(&pAllocator->groups[iGroup].bitfield, newBitfield, oldBitfield) == oldBitfield) {
if ((ma_uint32)c89atomic_compare_and_swap_32(&pAllocator->groups[iGroup].bitfield, oldBitfield, newBitfield) == oldBitfield) {
*pSlot = iGroup*32 + bitOffset;
ma_atomic_increment_32(&pAllocator->counter);
c89atomic_fetch_add_32(&pAllocator->counter, 1);
return MA_SUCCESS;
}
}
......@@ -823,8 +823,8 @@ MA_API ma_result ma_slot_allocator_free(ma_slot_allocator* pAllocator, ma_uint32
oldBitfield = pAllocator->groups[iGroup].bitfield;
newBitfield = oldBitfield & ~(1 << iBit);
if ((ma_uint32)ma_compare_and_swap_32(&pAllocator->groups[iGroup].bitfield, newBitfield, oldBitfield) == oldBitfield) {
ma_atomic_decrement_32(&pAllocator->counter);
if ((ma_uint32)c89atomic_compare_and_swap_32(&pAllocator->groups[iGroup].bitfield, oldBitfield, newBitfield) == oldBitfield) {
c89atomic_fetch_sub_32(&pAllocator->counter, 1);
return MA_SUCCESS;
}
#else
......@@ -837,8 +837,8 @@ MA_API ma_result ma_slot_allocator_free(ma_slot_allocator* pAllocator, ma_uint32
/* Before releasing the group we need to ensure the write operation above has completed so we'll throw a memory barrier in here for safety. */
ma_memory_barrier();
ma_atomic_decrement_32(&pAllocator->counter); /* Incrementing the counter should happen before releasing the group's ref count to ensure we don't waste loop iterations in out-of-memory scenarios. */
ma_atomic_decrement_32(&pAllocator->groups[iGroup].refcount); /* Release the hold as soon as possible to allow other things to access the bitfield. */
c89atomic_fetch_sub_32(&pAllocator->counter, 1); /* Incrementing the counter should happen before releasing the group's ref count to ensure we don't waste loop iterations in out-of-memory scenarios. */
c89atomic_fetch_sub_32(&pAllocator->groups[iGroup].refcount, 1); /* Release the hold as soon as possible to allow other things to access the bitfield. */
return MA_SUCCESS;
} else {
......@@ -847,7 +847,7 @@ MA_API ma_result ma_slot_allocator_free(ma_slot_allocator* pAllocator, ma_uint32
}
/* Getting here means something is holding our lock. We need to release and spin. */
ma_atomic_decrement_32(&pAllocator->groups[iGroup]);
c89atomic_fetch_sub_32(&pAllocator->groups[iGroup], 1);
ma_yield();
#endif
}
......@@ -950,15 +950,15 @@ MA_API ma_result ma_job_queue_post(ma_job_queue* pQueue, const ma_job* pJob)
if (tail == pQueue->tail) {
if (next == 0xFFFF) {
if (ma_compare_and_swap_16(&pQueue->jobs[tail].next, slot, next) == next) {
if (c89atomic_compare_and_swap_16(&pQueue->jobs[tail].next, next, slot) == next) {
break;
}
} else {
ma_compare_and_swap_16(&pQueue->tail, next, tail);
c89atomic_compare_and_swap_16(&pQueue->tail, tail, next);
}
}
}
ma_compare_and_swap_16(&pQueue->tail, slot, tail);
c89atomic_compare_and_swap_16(&pQueue->tail, tail, slot);
/* Signal the semaphore as the last step if we're using synchronous mode. */
......@@ -995,10 +995,10 @@ MA_API ma_result ma_job_queue_next(ma_job_queue* pQueue, ma_job* pJob)
if (next == 0xFFFF) {
return MA_NO_DATA_AVAILABLE;
}
ma_compare_and_swap_16(&pQueue->tail, next, tail);
c89atomic_compare_and_swap_16(&pQueue->tail, tail, next);
} else {
*pJob = pQueue->jobs[next];
if (ma_compare_and_swap_16(&pQueue->head, next, head) == head) {
if (c89atomic_compare_and_swap_16(&pQueue->head, head, next) == head) {
break;
}
}
......@@ -1100,7 +1100,7 @@ static ma_uint32 ma_hash_32(const void* key, int len, ma_uint32 seed)
static ma_uint32 ma_hash_string_32(const char* str)
{
return ma_hash_32(str, strlen(str), MA_DEFAULT_HASH_SEED);
return ma_hash_32(str, (int)strlen(str), MA_DEFAULT_HASH_SEED);
}
......@@ -1217,7 +1217,7 @@ static ma_result ma_resource_manager_message_queue_post_nolock(ma_resource_manag
putLoopFlag ^= 0x80000000;
}
ma_atomic_exchange_32(&pQueue->putCursor, ma_rb__construct_offset(putIndex, putLoopFlag));
c89atomic_exchange_32(&pQueue->putCursor, ma_rb__construct_offset(putIndex, putLoopFlag));
/* Now that the message is in the queue we can let the consumer thread know about it by releasing the semaphore. */
ma_semaphore_release(&pQueue->sem);
......@@ -1279,7 +1279,7 @@ MA_API ma_result ma_resource_manager_message_queue_next(ma_resource_manager_mess
getLoopFlag ^= 0x80000000;
}
ma_atomic_exchange_32(&pQueue->getCursor, ma_rb__construct_offset(getIndex, getLoopFlag));
c89atomic_exchange_32(&pQueue->getCursor, ma_rb__construct_offset(getIndex, getLoopFlag));
return MA_SUCCESS;
}
......@@ -1612,7 +1612,7 @@ static ma_result ma_resource_manager_data_buffer_increment_ref(ma_resource_manag
(void)pResourceManager;
refCount = ma_atomic_increment_32(&pDataBuffer->refCount);
refCount = c89atomic_fetch_add_32(&pDataBuffer->refCount, 1) + 1;
if (pNewRefCount != NULL) {
*pNewRefCount = refCount;
......@@ -1630,7 +1630,7 @@ static ma_result ma_resource_manager_data_buffer_decrement_ref(ma_resource_manag
(void)pResourceManager;
refCount = ma_atomic_decrement_32(&pDataBuffer->refCount);
refCount = c89atomic_fetch_sub_32(&pDataBuffer->refCount, 1) - 1;
if (pNewRefCount != NULL) {
*pNewRefCount = refCount;
......@@ -1951,7 +1951,7 @@ static ma_result ma_resource_manager_delete_data_buffer_nolock(ma_resource_manag
The data buffer has been removed from the BST so now we need to delete the underyling data. This needs to be done in a separate thread. We don't
want to delete anything if the data is owned by the application. Also, just to be safe, we set the result to MA_UNAVAILABLE.
*/
ma_atomic_exchange_32(&pDataBuffer->result, MA_UNAVAILABLE);
c89atomic_exchange_32(&pDataBuffer->result, MA_UNAVAILABLE);
/* Don't delete any underlying data if it's not owned by the resource manager. */
if (pDataBuffer->isDataOwnedByResourceManager) {
......@@ -2130,7 +2130,7 @@ MA_API ma_result ma_resource_manager_delete_data_stream(ma_resource_manager* pRe
}
/* The first thing to do is set the result to unavailable. This will prevent future page decoding. */
ma_atomic_exchange_32(&pDataStream->result, MA_UNAVAILABLE);
c89atomic_exchange_32(&pDataStream->result, MA_UNAVAILABLE);
/*
We need to post a message to ensure we're not in the middle or decoding or anything. Because the object is owned by the caller, we'll need
......@@ -2165,7 +2165,7 @@ MA_API ma_result ma_resource_manager_data_stream_set_looping(ma_resource_manager
return MA_INVALID_ARGS;
}
ma_atomic_exchange_32(&pDataStream->isLooping, isLooping);
c89atomic_exchange_32(&pDataStream->isLooping, isLooping);
return MA_SUCCESS;
}
......@@ -2268,7 +2268,7 @@ MA_API ma_result ma_resource_manager_data_stream_seek_to_pcm_frame(ma_resource_m
}
/* Increment the seek counter first to indicate to read_paged_pcm_frames() and map_paged_pcm_frames() that we are in the middle of a seek and MA_BUSY should be returned. */
ma_atomic_increment_32(&pDataStream->seekCounter);
c89atomic_fetch_add_32(&pDataStream->seekCounter, 1);
/*
We need to clear our currently loaded pages so that the stream starts playback from the new seek point as soon as possible. These are for the purpose of the public
......@@ -2277,8 +2277,8 @@ MA_API ma_result ma_resource_manager_data_stream_seek_to_pcm_frame(ma_resource_m
*/
pDataStream->relativeCursor = 0;
pDataStream->currentPageIndex = 0;
ma_atomic_exchange_32(&pDataStream->isPageValid[0], MA_FALSE);
ma_atomic_exchange_32(&pDataStream->isPageValid[1], MA_FALSE);
c89atomic_exchange_32(&pDataStream->isPageValid[0], MA_FALSE);
c89atomic_exchange_32(&pDataStream->isPageValid[1], MA_FALSE);
/*
The public API is not allowed to touch the internal decoder so we need to use a message to perform the seek. When seeking, the async thread will assume both pages
......@@ -2384,7 +2384,7 @@ MA_API ma_result ma_resource_manager_data_stream_unmap_paged_pcm_frames(ma_resou
message.decodeStreamPage.pageIndex = pDataStream->currentPageIndex;
/* The page needs to be marked as invalid so that the public API doesn't try reading from it. */
ma_atomic_exchange_32(&pDataStream->isPageValid[pDataStream->currentPageIndex], MA_FALSE);
c89atomic_exchange_32(&pDataStream->isPageValid[pDataStream->currentPageIndex], MA_FALSE);
/* Before sending the message we need to make sure we set some state. */
pDataStream->relativeCursor = newRelativeCursor;
......@@ -2731,7 +2731,7 @@ static ma_result ma_resource_manager_data_source_set_result_and_signal(ma_resour
MA_ASSERT(pDataSource != NULL);
/* If the data source's status is anything other than MA_BUSY it means it is being deleted or an error occurred. We don't ever want to move away from that state. */
ma_compare_and_swap_32(&pDataSource->result, result, MA_BUSY);
c89atomic_compare_and_swap_32(&pDataSource->result, MA_BUSY, result);
/* If we have an event we want to signal it after setting the data source's status. */
if (pEvent != NULL) {
......@@ -2923,7 +2923,7 @@ static ma_result ma_resource_manager_data_source_init_buffer(ma_resource_manager
return result;
}
ma_atomic_exchange_32(&pDataSource->result, MA_SUCCESS);
c89atomic_exchange_32(&pDataSource->result, MA_SUCCESS);
return MA_SUCCESS;
} else {
/* Some other error has occurred with the data buffer. Lets abandon everything and return the data buffer's result. */
......@@ -2999,7 +2999,7 @@ MA_API ma_result ma_resource_manager_data_source_uninit(ma_resource_manager* pRe
}
/* The first thing to do is to mark the data source as unavailable. This will stop other threads from acquiring a hold on the data source which is what happens in the callbacks. */
ma_atomic_exchange_32(&pDataSource->result, MA_UNAVAILABLE);
c89atomic_exchange_32(&pDataSource->result, MA_UNAVAILABLE);
if ((pDataSource->flags & MA_DATA_SOURCE_FLAG_STREAM) != 0) {
return ma_resource_manager_data_source_uninit_stream(pResourceManager, pDataSource);
......@@ -3030,7 +3030,7 @@ MA_API ma_result ma_resource_manager_data_source_set_looping(ma_resource_manager
if ((pDataSource->flags & MA_DATA_SOURCE_FLAG_STREAM) != 0) {
return ma_resource_manager_data_stream_set_looping(pResourceManager, &pDataSource->dataStream.stream, isLooping);
} else {
ma_atomic_exchange_32(&pDataSource->dataBuffer.isLooping, isLooping);
c89atomic_exchange_32(&pDataSource->dataBuffer.isLooping, isLooping);
return MA_SUCCESS;
}
}
......@@ -3164,7 +3164,7 @@ static ma_result ma_resource_manager_handle_message__load_data_buffer(ma_resourc
is set *before* setting the number of available frames. This way, the other thread need only check if decodedFrameCount > 0, in
which case it can assume pData and frameCount are valid.
*/
ma_memory_barrier();
c89atomic_thread_fence(c89atomic_memory_order_acquire);
pDataBuffer->data.decoded.decodedFrameCount = framesRead;
ma__free_from_callbacks(pDecoder, &pResourceManager->config.allocationCallbacks/*, MA_ALLOCATION_TYPE_DECODER*/);
......@@ -3191,7 +3191,7 @@ static ma_result ma_resource_manager_handle_message__load_data_buffer(ma_resourc
is set *before* setting the number of available frames. This way, the other thread need only check if decodedFrameCount > 0, in
which case it can assume pData and frameCount are valid.
*/
ma_memory_barrier();
c89atomic_thread_fence(c89atomic_memory_order_acquire);
pDataBuffer->data.decoded.decodedFrameCount = framesRead;
} else {
decodeBufferPageMessage.decodeBufferPage.isUnknownLength = MA_TRUE;
......@@ -3229,7 +3229,7 @@ done:
immediately deletes it before we've got to this point. In this case, pDataBuffer->result will be MA_UNAVAILABLE, and setting it to MA_SUCCESS or any
other error code would cause the buffer to look like it's in a state that it's not.
*/
ma_compare_and_swap_32(&pDataBuffer->result, result, MA_BUSY);
c89atomic_compare_and_swap_32(&pDataBuffer->result, MA_BUSY, result);
/* Only signal the other threads after the result has been set just for cleanliness sake. */
if (pEvent != NULL) {
......@@ -3282,11 +3282,11 @@ static void ma_resource_manager_data_stream_fill_page(ma_resource_manager* pReso
}
if (totalFramesReadForThisPage < pageSizeInFrames) {
ma_atomic_exchange_32(&pDataStream->isDecoderAtEnd, MA_TRUE);
c89atomic_exchange_32(&pDataStream->isDecoderAtEnd, MA_TRUE);
}
ma_atomic_exchange_32(&pDataStream->pageFrameCount[pageIndex], (ma_uint32)totalFramesReadForThisPage);
ma_atomic_exchange_32(&pDataStream->isPageValid[pageIndex], MA_TRUE);
c89atomic_exchange_32(&pDataStream->pageFrameCount[pageIndex], (ma_uint32)totalFramesReadForThisPage);
c89atomic_exchange_32(&pDataStream->isPageValid[pageIndex], MA_TRUE);
}
static void ma_resource_manager_data_stream_fill_pages(ma_resource_manager* pResourceManager, ma_resource_manager_data_stream* pDataStream)
......@@ -3353,7 +3353,7 @@ done:
ma__free_from_callbacks(pFilePath, &pResourceManager->config.allocationCallbacks/*, MA_ALLOCATION_TYPE_TRANSIENT_STRING*/);
/* We can only change the status away from MA_BUSY. If it's set to anything else it means an error has occurred somewhere or the uninitialization process has started (most likely). */
ma_compare_and_swap_32(&pDataStream->result, result, MA_BUSY);
c89atomic_compare_and_swap_32(&pDataStream->result, MA_BUSY, result);
/* Only signal the other threads after the result has been set just for cleanliness sake. */
if (pEvent != NULL) {
......@@ -3576,7 +3576,7 @@ static ma_result ma_resource_manager_handle_message__decode_buffer_page(ma_resou
is set *before* setting the number of available frames. This way, the other thread need only check if decodedFrameCount > 0, in
which case it can assume pData and frameCount are valid.
*/
ma_memory_barrier();
c89atomic_thread_fence(c89atomic_memory_order_seq_cst);
messageCopy.decodeBufferPage.pDataBuffer->data.decoded.decodedFrameCount = messageCopy.decodeBufferPage.decodedFrameCount;
......@@ -3586,7 +3586,7 @@ static ma_result ma_resource_manager_handle_message__decode_buffer_page(ma_resou
}
/* We need to set the status of the page so other things can know about it. We can only change the status away from MA_BUSY. If it's anything else it cannot be changed. */
ma_compare_and_swap_32(&messageCopy.decodeBufferPage.pDataBuffer->result, result, MA_BUSY);
c89atomic_compare_and_swap_32(&messageCopy.decodeBufferPage.pDataBuffer->result, MA_BUSY, result);
/* We need to signal an event to indicate that we're done. */
if (messageCopy.decodeBufferPage.pCompletedEvent != NULL) {
......@@ -3637,7 +3637,7 @@ static ma_result ma_resource_manager_handle_message__seek_data_stream(ma_resourc
ma_resource_manager_data_stream_fill_pages(pResourceManager, pDataStream);
/* We need to let the public API know that we're done seeking. */
ma_atomic_decrement_32(&pDataStream->seekCounter);
c89atomic_fetch_sub_32(&pDataStream->seekCounter, 1);
return MA_SUCCESS;
}
......@@ -4072,7 +4072,7 @@ static void ma_engine_mix_sound(ma_engine* pEngine, ma_sound_group* pGroup, ma_s
MA_ASSERT(pGroup != NULL);
MA_ASSERT(pSound != NULL);
ma_atomic_exchange_32(&pSound->isMixing, MA_TRUE); /* This must be done before checking the isPlaying state. */
c89atomic_exchange_32(&pSound->isMixing, MA_TRUE); /* This must be done before checking the isPlaying state. */
{
if (pSound->isPlaying) {
ma_result result = MA_SUCCESS;
......@@ -4096,12 +4096,12 @@ static void ma_engine_mix_sound(ma_engine* pEngine, ma_sound_group* pGroup, ma_s
/* If we reached the end of the sound we'll want to mark it as at the end and not playing. */
if (result == MA_AT_END) {
ma_atomic_exchange_32(&pSound->isPlaying, MA_FALSE);
ma_atomic_exchange_32(&pSound->atEnd, MA_TRUE); /* Set to false in ma_engine_sound_start(). */
c89atomic_exchange_32(&pSound->isPlaying, MA_FALSE);
c89atomic_exchange_32(&pSound->atEnd, MA_TRUE); /* Set to false in ma_engine_sound_start(). */
}
}
}
ma_atomic_exchange_32(&pSound->isMixing, MA_FALSE);
c89atomic_exchange_32(&pSound->isMixing, MA_FALSE);
}
static void ma_engine_mix_sound_group(ma_engine* pEngine, ma_sound_group* pGroup, void* pFramesOut, ma_uint32 frameCount)
......@@ -4474,18 +4474,18 @@ static ma_result ma_engine_sound_detach(ma_engine* pEngine, ma_sound* pSound)
/* The sound is the head of the list. All we need to do is change the pPrevSoundInGroup member of the next sound to NULL and make it the new head. */
/* Make a new head. */
ma_atomic_exchange_ptr(&pGroup->pFirstSoundInGroup, pSound->pNextSoundInGroup);
c89atomic_exchange_ptr(&pGroup->pFirstSoundInGroup, pSound->pNextSoundInGroup);
} else {
/*
The sound is not the head. We need to remove the sound from the group by simply changing the pNextSoundInGroup member of the previous sound. This is
the important part. This is the part that allows the mixing thread to continue iteration without locking.
*/
ma_atomic_exchange_ptr(&pSound->pPrevSoundInGroup->pNextSoundInGroup, pSound->pNextSoundInGroup);
c89atomic_exchange_ptr(&pSound->pPrevSoundInGroup->pNextSoundInGroup, pSound->pNextSoundInGroup);
}
/* This doesn't really need to be done atomically because we've wrapped this in a lock and it's not used by the mixing thread. */
if (pSound->pNextSoundInGroup != NULL) {
ma_atomic_exchange_ptr(&pSound->pNextSoundInGroup->pPrevSoundInGroup, pSound->pPrevSoundInGroup);
c89atomic_exchange_ptr(&pSound->pNextSoundInGroup->pPrevSoundInGroup, pSound->pPrevSoundInGroup);
}
}
ma_mutex_unlock(&pGroup->lock);
......@@ -4523,7 +4523,7 @@ static ma_result ma_engine_sound_attach(ma_engine* pEngine, ma_sound* pSound, ma
pOldFirstSoundInGroup->pPrevSoundInGroup = pNewFirstSoundInGroup;
}
ma_atomic_exchange_ptr(&pGroup->pFirstSoundInGroup, pNewFirstSoundInGroup);
c89atomic_exchange_ptr(&pGroup->pFirstSoundInGroup, pNewFirstSoundInGroup);
}
ma_mutex_unlock(&pGroup->lock);
......@@ -4984,7 +4984,7 @@ MA_API ma_result ma_engine_sound_start(ma_engine* pEngine, ma_sound* pSound)
}
/* Once everything is set up we can tell the mixer thread about it. */
ma_atomic_exchange_32(&pSound->isPlaying, MA_TRUE);
c89atomic_exchange_32(&pSound->isPlaying, MA_TRUE);
return MA_SUCCESS;
}
......@@ -4995,7 +4995,7 @@ MA_API ma_result ma_engine_sound_stop(ma_engine* pEngine, ma_sound* pSound)
return MA_INVALID_ARGS;
}
ma_atomic_exchange_32(&pSound->isPlaying, MA_FALSE);
c89atomic_exchange_32(&pSound->isPlaying, MA_FALSE);
return MA_SUCCESS;
}
......@@ -5075,7 +5075,7 @@ MA_API ma_result ma_engine_sound_set_looping(ma_engine* pEngine, ma_sound* pSoun
return MA_INVALID_ARGS;
}
ma_atomic_exchange_32(&pSound->isLooping, isLooping);
c89atomic_exchange_32(&pSound->isLooping, isLooping);
/*
This is a little bit of a hack, but basically we need to set the looping flag at the data source level if we are running a data source managed by
......@@ -5131,7 +5131,7 @@ MA_API ma_result ma_engine_play_sound(ma_engine* pEngine, const char* pFilePath,
We need to check that atEnd flag to determine if this sound is available. The problem is that another thread might be wanting to acquire this
sound at the same time. We want to avoid as much locking as possible, so we'll do this as a compare and swap.
*/
if (ma_compare_and_swap_32(&pNextSound->atEnd, MA_FALSE, MA_TRUE) == MA_TRUE) {
if (c89atomic_compare_and_swap_32(&pNextSound->atEnd, MA_TRUE, MA_FALSE) == MA_TRUE) {
/* We got it. */
pSound = pNextSound;
break;
......@@ -5160,7 +5160,7 @@ MA_API ma_result ma_engine_play_sound(ma_engine* pEngine, const char* pFilePath,
result = ma_resource_manager_data_source_init(pEngine->pResourceManager, pFilePath, dataSourceFlags, &pSound->resourceManagerDataSource);
if (result != MA_SUCCESS) {
/* We failed to load the resource. We need to return an error. We must also put this sound back up for recycling by setting the at-end flag to true. */
ma_atomic_exchange_32(&pSound->atEnd, MA_TRUE); /* <-- Put the sound back up for recycling. */
c89atomic_exchange_32(&pSound->atEnd, MA_TRUE); /* <-- Put the sound back up for recycling. */
return result;
}
......@@ -5251,10 +5251,10 @@ static ma_result ma_engine_sound_group_detach(ma_engine* pEngine, ma_sound_group
MA_ASSERT(pGroup->pParent != NULL);
MA_ASSERT(pGroup->pParent->pFirstChild == pGroup);
ma_atomic_exchange_ptr(&pGroup->pParent->pFirstChild, pGroup->pNextSibling);
c89atomic_exchange_ptr(&pGroup->pParent->pFirstChild, pGroup->pNextSibling);
} else {
/* It's not the first child in the parent group. */
ma_atomic_exchange_ptr(&pGroup->pPrevSibling->pNextSibling, pGroup->pNextSibling);
c89atomic_exchange_ptr(&pGroup->pPrevSibling->pNextSibling, pGroup->pNextSibling);
}
/* The previous sibling needs to be changed for the old next sibling. */
......@@ -5370,7 +5370,7 @@ MA_API ma_result ma_engine_sound_group_start(ma_engine* pEngine, ma_sound_group*
pGroup = &pEngine->masterSoundGroup;
}
ma_atomic_exchange_32(&pGroup->isPlaying, MA_TRUE);
c89atomic_exchange_32(&pGroup->isPlaying, MA_TRUE);
return MA_SUCCESS;
}
......@@ -5385,7 +5385,7 @@ MA_API ma_result ma_engine_sound_group_stop(ma_engine* pEngine, ma_sound_group*
pGroup = &pEngine->masterSoundGroup;
}
ma_atomic_exchange_32(&pGroup->isPlaying, MA_FALSE);
c89atomic_exchange_32(&pGroup->isPlaying, MA_FALSE);
return MA_SUCCESS;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment