Add atomics for 64-bit integers

The public API looks the same but with added overloads.
The implementation uses templates or overloads depending on the context
to avoid code duplication.
This commit is contained in:
Pedro J. Estébanez 2017-06-22 05:05:59 +02:00
parent 98a9c8fc5f
commit 9a868cd846
2 changed files with 100 additions and 37 deletions

View File

@ -33,7 +33,10 @@
#ifdef NO_THREADS
uint32_t atomic_conditional_increment(register uint32_t *pw) {
/* Bogus implementation unaware of multiprocessing */
template <class T>
static _ALWAYS_INLINE_ T _atomic_conditional_increment_impl(register T *pw) {
if (*pw == 0)
return 0;
@ -43,53 +46,34 @@ uint32_t atomic_conditional_increment(register uint32_t *pw) {
return *pw;
}
uint32_t atomic_increment(register uint32_t *pw) {
(*pw)++;
return *pw;
}
uint32_t atomic_decrement(register uint32_t *pw) {
template <class T>
static _ALWAYS_INLINE_ T _atomic_decrement_impl(register T *pw) {
(*pw)--;
return *pw;
}
#else
template <class T>
static _ALWAYS_INLINE_T _atomic_increment_impl(register T *pw) {
#ifdef _MSC_VER
(*pw)++;
// don't pollute my namespace!
#include <windows.h>
uint32_t atomic_conditional_increment(register uint32_t *pw) {
/* try to increment until it actually works */
// taken from boost
while (true) {
uint32_t tmp = static_cast<uint32_t const volatile &>(*pw);
if (tmp == 0)
return 0; // if zero, can't add to it anymore
if (InterlockedCompareExchange((LONG volatile *)pw, tmp + 1, tmp) == tmp)
return tmp + 1;
}
return *pw;
}
uint32_t atomic_decrement(register uint32_t *pw) {
return InterlockedDecrement((LONG volatile *)pw);
}
uint32_t atomic_increment(register uint32_t *pw) {
return InterlockedIncrement((LONG volatile *)pw);
}
#elif defined(__GNUC__)
uint32_t atomic_conditional_increment(register uint32_t *pw) {
/* Implementation for GCC & Clang */
// GCC guarantees atomic intrinsics for sizes of 1, 2, 4 and 8 bytes.
// Clang states it supports GCC atomic builtins.
template <class T>
static _ALWAYS_INLINE_ T _atomic_conditional_increment_impl(register T *pw) {
while (true) {
uint32_t tmp = static_cast<uint32_t const volatile &>(*pw);
T tmp = static_cast<T const volatile &>(*pw);
if (tmp == 0)
return 0; // if zero, can't add to it anymore
if (__sync_val_compare_and_swap(pw, tmp, tmp + 1) == tmp)
@ -97,20 +81,95 @@ uint32_t atomic_conditional_increment(register uint32_t *pw) {
}
}
uint32_t atomic_decrement(register uint32_t *pw) {
template <class T>
static _ALWAYS_INLINE_ T _atomic_decrement_impl(register T *pw) {
return __sync_sub_and_fetch(pw, 1);
}
uint32_t atomic_increment(register uint32_t *pw) {
template <class T>
static _ALWAYS_INLINE_ T _atomic_increment_impl(register T *pw) {
return __sync_add_and_fetch(pw, 1);
}
#elif defined(_MSC_VER)
/* Implementation for MSVC-Windows */
// don't pollute my namespace!
#include <windows.h>
#define ATOMIC_CONDITIONAL_INCREMENT_BODY(m_pw, m_win_type, m_win_cmpxchg, m_cpp_type) \
/* try to increment until it actually works */ \
/* taken from boost */ \
while (true) { \
m_cpp_type tmp = static_cast<m_cpp_type const volatile &>(*(m_pw)); \
if (tmp == 0) \
return 0; /* if zero, can't add to it anymore */ \
if (m_win_cmpxchg((m_win_type volatile *)(m_pw), tmp + 1, tmp) == tmp) \
return tmp + 1; \
}
static _ALWAYS_INLINE_ uint32_t _atomic_conditional_increment_impl(register uint32_t *pw) {
ATOMIC_CONDITIONAL_INCREMENT_BODY(pw, LONG, InterlockedCompareExchange, uint32_t)
}
static _ALWAYS_INLINE_ uint32_t _atomic_decrement_impl(register uint32_t *pw) {
return InterlockedDecrement((LONG volatile *)pw);
}
static _ALWAYS_INLINE_ uint32_t _atomic_increment_impl(register uint32_t *pw) {
return InterlockedIncrement((LONG volatile *)pw);
}
static _ALWAYS_INLINE_ uint64_t _atomic_conditional_increment_impl(register uint64_t *pw) {
ATOMIC_CONDITIONAL_INCREMENT_BODY(pw, LONGLONG, InterlockedCompareExchange64, uint64_t)
}
static _ALWAYS_INLINE_ uint64_t _atomic_decrement_impl(register uint64_t *pw) {
return InterlockedDecrement64((LONGLONG volatile *)pw);
}
static _ALWAYS_INLINE_ uint64_t _atomic_increment_impl(register uint64_t *pw) {
return InterlockedIncrement64((LONGLONG volatile *)pw);
}
#else
//no threads supported?
#error Must provide atomic functions for this platform or compiler!
#endif
#endif
// The actual advertised functions; they'll call the right implementation
uint32_t atomic_conditional_increment(register uint32_t *counter) {
return _atomic_conditional_increment_impl(counter);
}
uint32_t atomic_decrement(register uint32_t *pw) {
return _atomic_decrement_impl(pw);
}
uint32_t atomic_increment(register uint32_t *pw) {
return _atomic_increment_impl(pw);
}
uint64_t atomic_conditional_increment(register uint64_t *counter) {
return _atomic_conditional_increment_impl(counter);
}
uint64_t atomic_decrement(register uint64_t *pw) {
return _atomic_decrement_impl(pw);
}
uint64_t atomic_increment(register uint64_t *pw) {
return _atomic_increment_impl(pw);
}

View File

@ -40,6 +40,10 @@ uint32_t atomic_conditional_increment(register uint32_t *counter);
uint32_t atomic_decrement(register uint32_t *pw);
uint32_t atomic_increment(register uint32_t *pw);
uint64_t atomic_conditional_increment(register uint64_t *counter);
uint64_t atomic_decrement(register uint64_t *pw);
uint64_t atomic_increment(register uint64_t *pw);
struct SafeRefCount {
uint32_t count;